--- a/internal/wc/wc.go
+++ b/internal/wc/wc.go
@@ -1,1016 +1,1030 @@
package wc
import (
"encoding/json"
"fmt"
"io/fs"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"arche/internal/merge"
"arche/internal/object"
"arche/internal/repo"
"arche/internal/store"
"arche/internal/watcher"
)
func dirtySet(r *repo.Repo) (map[string]bool, error) {
if !watcher.IsActive(r.ArcheDir()) {
return nil, nil
}
entries, err := r.Store.ListDirtyWCacheEntries()
if err != nil {
return nil, err
}
m := make(map[string]bool, len(entries))
for _, e := range entries {
m[e.Path] = true
}
return m, nil
}
type FileStatus struct {
Path string
Status rune
}
type WC struct {
Repo *repo.Repo
SignKey string
}
func New(r *repo.Repo) *WC { return &WC{Repo: r} }
func (wc *WC) maybeSign(c *object.Commit) error {
if wc.SignKey == "" {
return nil
}
body := object.CommitBodyForSigning(c)
sig, _, err := object.SignCommitBody(body, wc.SignKey)
if err != nil {
return fmt.Errorf("commit signing: %w", err)
}
c.CommitSig = sig
return nil
}
func (wc *WC) snapshotIntoTx(tx *store.Tx, headCommit *object.Commit, paths []string, cacheMap map[string]store.WCacheEntry, dirty map[string]bool, message string, now time.Time) (*object.Commit, [32]byte, error) {
r := wc.Repo
var entries []fileEntry
if err := r.Store.ClearWCache(tx); err != nil {
return nil, object.ZeroID, fmt.Errorf("clear wcache: %w", err)
}
for _, rel := range paths {
if dirty != nil && !dirty[rel] {
if cached, ok := cacheMap[rel]; ok {
entries = append(entries, fileEntry{
path: rel,
blobID: cached.BlobID,
mode: object.EntryMode(cached.Mode),
})
if err := r.Store.SetWCacheEntry(tx, cached); err != nil {
return nil, object.ZeroID, fmt.Errorf("set wcache: %w", err)
}
continue
}
}
abs := filepath.Join(r.Root, rel)
info, err := os.Lstat(abs)
if err != nil {
continue
}
var blobID [32]byte
mode := fileMode(info)
if cached, ok := cacheMap[rel]; ok {
st := info.Sys().(*syscall.Stat_t)
inode := st.Ino
mtime := info.ModTime().UnixNano()
size := info.Size()
if cached.Inode == inode && cached.MtimeNs == mtime && cached.Size == size {
blobID = cached.BlobID
}
}
if blobID == object.ZeroID {
content, err := readFileContent(abs, info)
if err != nil {
return nil, object.ZeroID, err
}
id, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: content})
if err != nil {
return nil, object.ZeroID, err
}
blobID = id
}
st := info.Sys().(*syscall.Stat_t)
if err := r.Store.SetWCacheEntry(tx, store.WCacheEntry{
Path: rel,
Inode: st.Ino,
MtimeNs: info.ModTime().UnixNano(),
Size: info.Size(),
BlobID: blobID,
Mode: uint8(mode),
}); err != nil {
return nil, object.ZeroID, fmt.Errorf("set wcache: %w", err)
}
entries = append(entries, fileEntry{path: rel, blobID: blobID, mode: mode})
}
tree, err := buildTree(r, tx, entries)
if err != nil {
return nil, object.ZeroID, err
}
sig := object.Signature{
Name: r.Cfg.User.Name,
Email: r.Cfg.User.Email,
Timestamp: now,
}
c := &object.Commit{
TreeID: tree,
Parents: headCommit.Parents,
ChangeID: headCommit.ChangeID,
Author: headCommit.Author,
Committer: sig,
Message: message,
Phase: headCommit.Phase,
}
if headCommit.Author.Timestamp.IsZero() {
c.Author = sig
}
if err := wc.maybeSign(c); err != nil {
return nil, object.ZeroID, err
}
commitID, err := repo.WriteCommitTx(r.Store, tx, c)
if err != nil {
return nil, object.ZeroID, err
}
if err := r.Store.SetChangeCommit(tx, c.ChangeID, commitID); err != nil {
return nil, object.ZeroID, err
}
return c, commitID, nil
}
func (wc *WC) snapshotInput() (paths []string, cacheMap map[string]store.WCacheEntry, dirty map[string]bool, err error) {
r := wc.Repo
cacheEntries, err := r.Store.ListWCacheEntries()
if err != nil {
return nil, nil, nil, err
}
cacheMap = make(map[string]store.WCacheEntry, len(cacheEntries))
for _, e := range cacheEntries {
cacheMap[e.Path] = e
}
dirty, _ = dirtySet(r)
if dirty != nil {
seen := make(map[string]bool, len(cacheMap)+len(dirty))
for p := range cacheMap {
seen[p] = true
paths = append(paths, p)
}
for p := range dirty {
if !seen[p] {
paths = append(paths, p)
}
}
} else {
paths, err = wc.trackedPaths()
if err != nil {
return nil, nil, nil, err
}
}
return paths, cacheMap, dirty, nil
}
func (wc *WC) Snapshot(message string) (*object.Commit, [32]byte, error) {
r := wc.Repo
now := time.Now()
head, _, err := r.HeadCommit()
if err != nil {
return nil, object.ZeroID, err
}
paths, cacheMap, dirty, err := wc.snapshotInput()
if err != nil {
return nil, object.ZeroID, err
}
tx, err := r.Store.Begin()
if err != nil {
return nil, object.ZeroID, err
}
c, commitID, err := wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
if err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
if err := r.Store.Commit(tx); err != nil {
return nil, object.ZeroID, err
}
return c, commitID, nil
}
func (wc *WC) Snap(message string) (*object.Commit, [32]byte, error) {
r := wc.Repo
now := time.Now()
before, err := r.CaptureRefState()
if err != nil {
return nil, object.ZeroID, err
}
statusBefore, err := wc.Status()
if err != nil {
return nil, object.ZeroID, err
}
diffPaths := make(map[string]bool, len(statusBefore))
for _, fsEntry := range statusBefore {
diffPaths[fsEntry.Path] = true
}
useRestrictedPaths := len(r.Cfg.Hooks.PreSnap) > 0
if useRestrictedPaths {
if err := RunHooksSequential(r.Root, "pre-snap", r.Cfg.Hooks.PreSnap); err != nil {
return nil, object.ZeroID, fmt.Errorf("pre-snap hook failed: %w", err)
}
}
head,
-_
+oldHeadID
, err := r.HeadCommit()
if err != nil {
return nil, object.ZeroID, err
}
type snapshotFn func(tx *store.Tx) (*object.Commit, [32]byte, error)
var doSnapshot snapshotFn
if useRestrictedPaths {
headBlobs := make(map[string][32]byte)
headModes := make(map[string]object.EntryMode)
if err := flattenTree(r, head.TreeID, "", headBlobs); err != nil {
return nil, object.ZeroID, err
}
if err := flattenTreeModes(r, head.TreeID, "", headModes); err != nil {
return nil, object.ZeroID, err
}
doSnapshot = func(tx *store.Tx) (*object.Commit, [32]byte, error) {
return wc.snapshotRestrictedPathsIntoTx(tx, head, headBlobs, headModes, diffPaths, message, now)
}
} else {
paths, cacheMap, dirty, err := wc.snapshotInput()
if err != nil {
return nil, object.ZeroID, err
}
doSnapshot = func(tx *store.Tx) (*object.Commit, [32]byte, error) {
return wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
}
}
+ existingBookmarks, _ := r.Store.ListBookmarks()
+
tx, err := r.Store.Begin()
if err != nil {
return nil, object.ZeroID, err
}
snapped, snappedID, err := doSnapshot(tx)
if err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
+ if snappedID != oldHeadID {
+ for _, bm := range existingBookmarks {
+ if bm.CommitID == oldHeadID {
+ _ = r.Store.SetBookmark(tx, store.Bookmark{
+ Name: bm.Name,
+ CommitID: snappedID,
+ Remote: bm.Remote,
+ })
+ }
+ }
+ }
+
newChangeID, err := r.Store.AllocChangeID(tx)
if err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
newDraft := &object.Commit{
TreeID: snapped.TreeID,
Parents: [][32]byte{snappedID},
ChangeID: newChangeID,
Author: sig,
Committer: sig,
Message: "",
Phase: object.PhaseDraft,
}
newDraftID, err := repo.WriteCommitTx(r.Store, tx, newDraft)
if err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
if err := r.Store.SetChangeCommit(tx, newChangeID, newDraftID); err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
after := buildRefState(snappedID, object.FormatChangeID(newChangeID))
op := store.Operation{
Kind: "snap",
Timestamp: now.Unix(),
Before: before,
After: after,
Metadata: "'" + firstLine(snapped.Message) + "'",
}
if _, err := r.Store.InsertOperation(tx, op); err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
if err := r.Store.Commit(tx); err != nil {
return nil, object.ZeroID, err
}
if err := r.WriteHead(object.FormatChangeID(newChangeID)); err != nil {
return nil, object.ZeroID, err
}
if len(r.Cfg.Hooks.PostSnap) > 0 {
if err := RunHooksSequential(r.Root, "post-snap", r.Cfg.Hooks.PostSnap); err != nil {
fmt.Fprintf(os.Stderr, "arche snap: post-snap hook: %v\n", err)
}
}
return snapped, snappedID, nil
}
func (wc *WC) Status() ([]FileStatus, error) {
r := wc.Repo
head, _, err := r.HeadCommit()
if err != nil {
return nil, err
}
headFiles := make(map[string][32]byte)
if err := flattenTree(r, head.TreeID, "", headFiles); err != nil {
return nil, err
}
wcPaths, err := wc.trackedPaths()
if err != nil {
return nil, err
}
wcSet := make(map[string]bool, len(wcPaths))
for _, p := range wcPaths {
wcSet[p] = true
}
cacheEntries, _ := r.Store.ListWCacheEntries()
cacheMap := make(map[string]store.WCacheEntry, len(cacheEntries))
for _, e := range cacheEntries {
cacheMap[e.Path] = e
}
dirty, _ := dirtySet(r)
var out []FileStatus
for path, headBlobID := range headFiles {
if !wcSet[path] {
out = append(out, FileStatus{Path: path, Status: 'D'})
continue
}
if dirty != nil && !dirty[path] {
if cached, ok := cacheMap[path]; ok {
if cached.BlobID != headBlobID {
out = append(out, FileStatus{Path: path, Status: 'M'})
}
continue
}
}
curBlobID, err := wc.blobIDForPath(path)
if err != nil {
continue
}
if curBlobID != headBlobID {
out = append(out, FileStatus{Path: path, Status: 'M'})
}
}
ignore, _ := loadIgnore(r.Root)
for _, path := range wcPaths {
if _, inHead := headFiles[path]; !inHead {
if ignore.Match(path) {
continue
}
out = append(out, FileStatus{Path: path, Status: 'A'})
}
}
sort.Slice(out, func(i, j int) bool { return out[i].Path < out[j].Path })
return out, nil
}
func (wc *WC) materializeDisk(treeID [32]byte) (map[string][32]byte, map[string]object.EntryMode, error) {
r := wc.Repo
wantFiles := make(map[string][32]byte)
wantMode := make(map[string]object.EntryMode)
if err := flattenTree(r, treeID, "", wantFiles); err != nil {
return nil, nil, err
}
if err := flattenTreeModes(r, treeID, "", wantMode); err != nil {
return nil, nil, err
}
ignore, _ := loadIgnore(r.Root)
err := filepath.WalkDir(r.Root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return nil
}
rel, _ := filepath.Rel(r.Root, path)
if rel == "." {
return nil
}
if d.IsDir() {
if rel == archeDirName || strings.HasPrefix(rel, archeDirName+string(os.PathSeparator)) {
return filepath.SkipDir
}
return nil
}
if ignore.Match(rel) {
return nil
}
if _, ok := wantFiles[rel]; !ok {
return os.Remove(path)
}
return nil
})
if err != nil {
return nil, nil, err
}
var conflictPaths []string
for relPath, blobID := range wantFiles {
abs := filepath.Join(r.Root, relPath)
if err := os.MkdirAll(filepath.Dir(abs), 0o755); err != nil {
return nil, nil, err
}
content, err := r.ReadBlob(blobID)
if err != nil {
if conf, cErr := r.ReadConflict(blobID); cErr == nil {
content = renderConflictMarkers(r, conf)
conflictPaths = append(conflictPaths, relPath)
err = nil
}
}
if err != nil {
return nil, nil, err
}
perm := fs.FileMode(0o644)
if wantMode[relPath] == object.ModeExec {
perm = 0o755
}
if err := os.WriteFile(abs, content, perm); err != nil {
return nil, nil, err
}
}
for _, p := range conflictPaths {
delete(wantFiles, p)
}
return wantFiles, wantMode, nil
}
func renderConflictMarkers(r *repo.Repo, conf *object.Conflict) []byte {
readStr := func(id [32]byte) string {
if id == object.ZeroID {
return ""
}
b, _ := r.ReadBlob(id)
return string(b)
}
nl := func(s string) string {
if len(s) > 0 && s[len(s)-1] != '\n' {
return s + "\n"
}
return s
}
if conf.Ours.BlobID == object.ZeroID {
return []byte(fmt.Sprintf("<<<<<<< ours\n(deleted)\n=======\n%s>>>>>>> theirs\n", nl(readStr(conf.Theirs.BlobID))))
}
if conf.Theirs.BlobID == object.ZeroID {
return []byte(fmt.Sprintf("<<<<<<< ours\n%s=======\n(deleted)\n>>>>>>> theirs\n", nl(readStr(conf.Ours.BlobID))))
}
return []byte(fmt.Sprintf("<<<<<<< ours\n%s=======\n%s>>>>>>> theirs\n",
nl(readStr(conf.Ours.BlobID)),
nl(readStr(conf.Theirs.BlobID))))
}
func (wc *WC) populateWCacheInTx(tx *store.Tx, wantFiles map[string][32]byte) error {
r := wc.Repo
if err := r.Store.ClearWCache(tx); err != nil {
return err
}
for relPath, blobID := range wantFiles {
abs := filepath.Join(r.Root, relPath)
info, err := os.Lstat(abs)
if err != nil {
continue
}
st, ok := info.Sys().(*syscall.Stat_t)
if !ok {
continue
}
_ = r.Store.SetWCacheEntry(tx, store.WCacheEntry{
Path: relPath,
Inode: st.Ino,
MtimeNs: info.ModTime().UnixNano(),
Size: info.Size(),
BlobID: blobID,
Mode: uint8(fileMode(info)),
})
}
return nil
}
func (wc *WC) MaterializeQuiet(treeID [32]byte) error {
r := wc.Repo
wantFiles, _, err := wc.materializeDisk(treeID)
if err != nil {
return err
}
tx, err := r.Store.Begin()
if err != nil {
return err
}
if err := wc.populateWCacheInTx(tx, wantFiles); err != nil {
r.Store.Rollback(tx)
return err
}
return r.Store.Commit(tx)
}
func (wc *WC) Materialize(treeID [32]byte, newChangeID string) error {
r := wc.Repo
before, _ := r.CaptureRefState()
now := time.Now()
wantFiles, _, err := wc.materializeDisk(treeID)
if err != nil {
return err
}
bare := object.StripChangeIDPrefix(newChangeID)
commitID, _ := r.Store.GetChangeCommit(bare)
after := buildRefState(commitID, newChangeID)
tx, err := r.Store.Begin()
if err != nil {
return err
}
if err := wc.populateWCacheInTx(tx, wantFiles); err != nil {
r.Store.Rollback(tx)
return err
}
op := store.Operation{
Kind: "co",
Timestamp: now.Unix(),
Before: before,
After: after,
Metadata: "checked out " + newChangeID,
}
if _, err := r.Store.InsertOperation(tx, op); err != nil {
r.Store.Rollback(tx)
return err
}
return r.Store.Commit(tx)
}
const archeDirName = ".arche"
func (wc *WC) trackedPaths() ([]string, error) {
r := wc.Repo
ignore, _ := loadIgnore(r.Root)
var paths []string
err := filepath.WalkDir(r.Root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return nil
}
rel, _ := filepath.Rel(r.Root, path)
if rel == "." {
return nil
}
if d.IsDir() {
if rel == archeDirName || strings.HasPrefix(rel, archeDirName+string(os.PathSeparator)) {
return filepath.SkipDir
}
if ignore.MatchDir(rel) {
return filepath.SkipDir
}
return nil
}
if ignore.Match(rel) {
return nil
}
paths = append(paths, filepath.ToSlash(rel))
return nil
})
return paths, err
}
func (wc *WC) blobIDForPath(rel string) ([32]byte, error) {
r := wc.Repo
abs := filepath.Join(r.Root, rel)
info, err := os.Lstat(abs)
if err != nil {
return object.ZeroID, err
}
st := info.Sys().(*syscall.Stat_t)
if cached, _ := r.Store.GetWCacheEntry(rel); cached != nil {
if cached.Inode == st.Ino &&
cached.MtimeNs == info.ModTime().UnixNano() &&
cached.Size == info.Size() {
return cached.BlobID, nil
}
}
content, err := readFileContent(abs, info)
if err != nil {
return object.ZeroID, err
}
b := &object.Blob{Content: content}
return object.HashBlob(b), nil
}
func flattenTree(r *repo.Repo, treeID [32]byte, prefix string, out map[string][32]byte) error {
if treeID == object.ZeroID {
return nil
}
t, err := r.ReadTree(treeID)
if err != nil {
return err
}
for _, e := range t.Entries {
rel := join(prefix, e.Name)
switch e.Mode {
case object.ModeDir:
if err := flattenTree(r, e.ObjectID, rel, out); err != nil {
return err
}
default:
out[rel] = e.ObjectID
}
}
return nil
}
func flattenTreeModes(r *repo.Repo, treeID [32]byte, prefix string, out map[string]object.EntryMode) error {
if treeID == object.ZeroID {
return nil
}
t, err := r.ReadTree(treeID)
if err != nil {
return err
}
for _, e := range t.Entries {
rel := join(prefix, e.Name)
switch e.Mode {
case object.ModeDir:
if err := flattenTreeModes(r, e.ObjectID, rel, out); err != nil {
return err
}
default:
out[rel] = e.Mode
}
}
return nil
}
type fileEntry struct {
path string
blobID [32]byte
mode object.EntryMode
}
func buildTree(r *repo.Repo, tx *store.Tx, entries []fileEntry) ([32]byte, error) {
type node struct {
isFile bool
blobID [32]byte
mode object.EntryMode
children map[string]*node
}
root := &node{children: make(map[string]*node)}
for _, e := range entries {
parts := strings.Split(e.path, "/")
cur := root
for i, part := range parts {
if i == len(parts)-1 {
cur.children[part] = &node{isFile: true, blobID: e.blobID, mode: e.mode}
} else {
if _, ok := cur.children[part]; !ok {
cur.children[part] = &node{children: make(map[string]*node)}
}
cur = cur.children[part]
}
}
}
var writeNode func(n *node) ([32]byte, error)
writeNode = func(n *node) ([32]byte, error) {
var treeEntries []object.TreeEntry
for name, child := range n.children {
if child.isFile {
treeEntries = append(treeEntries, object.TreeEntry{
Name: name,
Mode: child.mode,
ObjectID: child.blobID,
})
} else {
subID, err := writeNode(child)
if err != nil {
return object.ZeroID, err
}
treeEntries = append(treeEntries, object.TreeEntry{
Name: name,
Mode: object.ModeDir,
ObjectID: subID,
})
}
}
sort.Slice(treeEntries, func(i, j int) bool { return treeEntries[i].Name < treeEntries[j].Name })
t := &object.Tree{Entries: treeEntries}
id, err := repo.WriteTreeTx(r.Store, tx, t)
return id, err
}
return writeNode(root)
}
func fileMode(info os.FileInfo) object.EntryMode {
if info.Mode()&0o111 != 0 {
return object.ModeExec
}
if info.Mode()&os.ModeSymlink != 0 {
return object.ModeSymlink
}
return object.ModeFile
}
func readFileContent(abs string, info os.FileInfo) ([]byte, error) {
if info.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(abs)
if err != nil {
return nil, err
}
return []byte(target), nil
}
return os.ReadFile(abs)
}
func join(prefix, name string) string {
if prefix == "" {
return name
}
return prefix + "/" + name
}
func buildRefState(commitID [32]byte, changeID string) string {
m := map[string]string{
"head": changeID,
"tip": fmt.Sprintf("%x", commitID),
}
b, _ := json.Marshal(m)
return string(b)
}
func firstLine(s string) string {
if i := strings.IndexByte(s, '\n'); i >= 0 {
return s[:i]
}
return s
}
func (wc *WC) Amend(message string) (*object.Commit, [32]byte, error) {
r := wc.Repo
now := time.Now()
head, oldHeadID, err := r.HeadCommit()
if err != nil {
return nil, object.ZeroID, err
}
if head.Phase == object.PhasePublic {
return nil, object.ZeroID, fmt.Errorf("cannot amend a public commit; use --force-rewrite if you are sure")
}
before, err := r.CaptureRefState()
if err != nil {
return nil, object.ZeroID, err
}
if message == "" {
message = head.Message
}
paths, cacheMap, dirty, err := wc.snapshotInput()
if err != nil {
return nil, object.ZeroID, err
}
tx, err := r.Store.Begin()
if err != nil {
return nil, object.ZeroID, err
}
amended, amendedID, err := wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
if err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
if oldHeadID != amendedID {
obs := &object.ObsoleteMarker{
Predecessor: oldHeadID,
Successors: [][32]byte{amendedID},
Reason: "amend",
Timestamp: now.Unix(),
}
if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
}
after := buildRefState(amendedID, object.FormatChangeID(amended.ChangeID))
op := store.Operation{
Kind: "amend",
Timestamp: now.Unix(),
Before: before,
After: after,
Metadata: "'" + firstLine(amended.Message) + "'",
}
if _, err := r.Store.InsertOperation(tx, op); err != nil {
r.Store.Rollback(tx)
return nil, object.ZeroID, err
}
if err := r.Store.Commit(tx); err != nil {
return nil, object.ZeroID, err
}
if oldHeadID != amendedID {
if err := wc.autoRebaseDownstream(oldHeadID, amendedID, head.ChangeID, now); err != nil {
fmt.Fprintf(os.Stderr, "arche: warning: downstream rebase failed: %v\n", err)
}
}
return amended, amendedID, nil
}
func (wc *WC) autoRebaseDownstream(oldParentID, newParentID [32]byte, headChangeID string, now time.Time) error {
r := wc.Repo
allChanges, err := r.Store.ListChanges()
if err != nil {
return err
}
type draftEntry struct {
id [32]byte
changeID string
commit *object.Commit
}
children := make(map[[32]byte][]draftEntry)
for _, ch := range allChanges {
if ch.CommitID == object.ZeroID {
continue
}
c, err := r.ReadCommit(ch.CommitID)
if err != nil || c == nil {
continue
}
if c.Phase != object.PhaseDraft {
continue
}
if c.ChangeID == headChangeID {
continue
}
if len(c.Parents) == 0 {
continue
}
d := draftEntry{id: ch.CommitID, changeID: ch.Name, commit: c}
children[c.Parents[0]] = append(children[c.Parents[0]], d)
}
type rebaseTask struct {
entry draftEntry
newParent [32]byte
}
var tasks []rebaseTask
queue := []struct {
oldID [32]byte
newID [32]byte
}{{oldParentID, newParentID}}
for len(queue) > 0 {
cur := queue[0]
queue = queue[1:]
for _, child := range children[cur.oldID] {
tasks = append(tasks, rebaseTask{entry: child, newParent: cur.newID})
queue = append(queue, struct{ oldID, newID [32]byte }{child.id, child.id})
}
}
remapped := map[[32]byte][32]byte{oldParentID: newParentID}
for _, task := range tasks {
oldFirst := task.entry.commit.Parents[0]
newParent, ok := remapped[oldFirst]
if !ok {
newParent = oldFirst
}
var baseTreeID [32]byte
if pc, err2 := r.ReadCommit(oldFirst); err2 == nil {
baseTreeID = pc.TreeID
}
newParentCommit, err := r.ReadCommit(newParent)
if err != nil {
return fmt.Errorf("read new parent for %s: %w", object.FormatChangeID(task.entry.changeID), err)
}
result, err := merge.Trees(r, baseTreeID, task.entry.commit.TreeID, newParentCommit.TreeID)
if err != nil {
return fmt.Errorf("merge for %s: %w", object.FormatChangeID(task.entry.changeID), err)
}
newCommit := &object.Commit{
TreeID: result.TreeID,
Parents: [][32]byte{newParent},
ChangeID: task.entry.changeID,
Author: task.entry.commit.Author,
Committer: object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now},
Message: task.entry.commit.Message,
Phase: task.entry.commit.Phase,
}
tx, err := r.Store.Begin()
if err != nil {
return err
}
newCommitID, err := repo.WriteCommitTx(r.Store, tx, newCommit)
if err != nil {
r.Store.Rollback(tx)
return err
}
if err := r.Store.SetChangeCommit(tx, task.entry.changeID, newCommitID); err != nil {
r.Store.Rollback(tx)
return err
}
obs := &object.ObsoleteMarker{
Predecessor: task.entry.id,
Successors: [][32]byte{newCommitID},
Reason: "amend",
Timestamp: now.Unix(),
}
if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
r.Store.Rollback(tx)
return err
}
if err := r.Store.Commit(tx); err != nil {
return err
}
remapped[task.entry.id] = newCommitID
conflictNote := ""
if len(result.Conflicts) > 0 {
conflictNote = fmt.Sprintf(" (%d conflict(s))", len(result.Conflicts))
}
fmt.Printf(" auto-rebased %s%s\n", object.FormatChangeID(task.entry.changeID), conflictNote)
}
return nil
}