arche / commit

commit 154431fddfaac3d06c4f391498bf6691350873e77404f471db4c80e6b19060b9
change yedmhbwv
author dewn <dewn5228@proton.me>
committer dewn <dewn5228@proton.me>
date 2026-03-11 16:49:01
phase public
bookmarks main
parents e3cb2784
signature Unsigned
srv: phase-only log visibility; bookmark filter; fix test URLs
internal/archesrv/handlers_repo.go [M]
--- a/internal/archesrv/handlers_repo.go
+++ b/internal/archesrv/handlers_repo.go
@@ -1,741 +1,779 @@
 package archesrv
 
 import (
 	"bytes"
 	"encoding/hex"
 	"fmt"
 	"html/template"
 	"net/http"
 	"s
+ort"
+	"s
 trings"
 
 	"arche/internal/diff"
 	"arche/internal/object"
 	"arche/internal/repo"
 	"arche/internal/revset"
 	"arche/internal/syncpkg"
 
 	"github.com/alecthomas/chroma/v2"
 	chrhtml "github.com/alecthomas/chroma/v2/formatters/html"
 	"github.com/alecthomas/chroma/v2/lexers"
 	"github.com/alecthomas/chroma/v2/styles"
 	"golang.org/x/crypto/ssh"
 )
 
 func (s *forgeServer) requireRepoAccess(w http.ResponseWriter, r *http.Request) (*repo.Repo, *RepoRecord, bool) {
 	repoName := r.PathValue("repo")
 	rec, err := s.db.GetRepo(repoName)
 	if err != nil || rec == nil {
 		http.NotFound(w, r)
 		return nil, nil, false
 	}
 
 	user := s.db.currentUser(r)
 	if !s.db.CanRead(rec, user) {
 		http.Error(w, "Unauthorized", http.StatusUnauthorized)
 		return nil, nil, false
 	}
 
 	repoObj, err := openRepo(s.dataDir(), repoName)
 	if err != nil {
 		http.Error(w, "open repo: "+err.Error(), http.StatusInternalServerError)
 		return nil, nil, false
 	}
 
 	return repoObj, rec, true
 }
 
 func (s *forgeServer) handleSyncProxy(w http.ResponseWriter, r *http.Request) {
 	repoName := r.PathValue("repo")
 	rec, err := s.db.GetRepo(repoName)
 	if err != nil || rec == nil {
 		http.Error(w, "repo not found", http.StatusNotFound)
 		return
 	}
 
 	user := s.db.currentUser(r)
 
 	if r.Method != http.MethodGet && !s.db.CanWrite(rec, user) {
 		user := s.db.currentUser(r)
 		username := "anonymous"
 		if user != nil {
 			username = user.Username
 		}
 		s.log.Warn("sync write denied", "repo", repoName, "user", username)
 		http.Error(w, "Unauthorized", http.StatusUnauthorized)
 		return
 	}
 	if r.Method == http.MethodGet && !s.db.CanRead(rec, user) {
 		s.log.Warn("sync read denied", "repo", repoName)
 		http.Error(w, "Unauthorized", http.StatusUnauthorized)
 		return
 	}
 
 	repoObj, err := openRepo(s.dataDir(), repoName)
 	if err != nil {
 		http.Error(w, "open repo: "+err.Error(), http.StatusInternalServerError)
 		return
 	}
 	defer repoObj.Close()
 
 	action := strings.TrimPrefix(r.URL.Path, "/"+repoName)
 	r2 := r.Clone(r.Context())
 	r2.URL.Path = action
 
 	user = s.db.currentUser(r)
 	pusher := "anonymous"
 	if user != nil {
 		pusher = user.Username
 	}
 
 	srv := syncpkg.NewServer(repoObj, "")
 
 	repoKey := repoName
 	repoCfg := s.cfg.Repo[repoKey]
 	srv.PreUpdateHook = func(bm, oldHex, newHex string) error {
 		if s.cfg.Hooks.PreReceive != "" || s.cfg.Hooks.Update != "" {
 			if err := runPreReceiveHook(s.cfg.Hooks.PreReceive, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec); err != nil {
 				return err
 			}
 			if err := runPreReceiveHook(s.cfg.Hooks.Update, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec); err != nil {
 				return err
 			}
 		}
 		if repoCfg.RequireSignedCommits && user != nil {
 			for _, id := range collectNewCommitIDs(repoObj, oldHex, newHex) {
 				c, err := repoObj.ReadCommit(id)
 				if err != nil {
 					continue
 				}
 				if len(c.CommitSig) == 0 {
 					return fmt.Errorf("commit %s (ch:%s) is unsigned; this repository requires signed commits",
 						hex.EncodeToString(id[:8]), c.ChangeID)
 				}
 				body := object.CommitBodyForSigning(c)
 				keys, _ := s.db.ListSSHKeys(user.ID)
 				verified := false
 				for _, k := range keys {
 					pub, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.PublicKey))
 					if err != nil {
 						continue
 					}
 					if object.VerifyCommitSig(body, c.CommitSig, pub) == nil {
 						verified = true
 						break
 					}
 				}
 				if !verified {
 					return fmt.Errorf("commit %s (ch:%s) has an unverifiable signature; this repository requires commits signed by a registered key",
 						hex.EncodeToString(id[:8]), c.ChangeID)
 				}
 			}
 		}
 		return nil
 	}
 
 	srv.OnBookmarkUpdated = func(bm, oldHex, newHex string) {
 		s.db.FirePushWebhooks(repoName, pusher, bm, oldHex, newHex, collectPushCommits(repoObj, oldHex, newHex))
 		runPostReceiveHook(s.cfg.Hooks.PostReceive, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
 
 		if user != nil {
 			for _, id := range collectNewCommitIDs(repoObj, oldHex, newHex) {
 				c, err := repoObj.ReadCommit(id)
 				if err != nil {
 					continue
 				}
 				_ = s.db.RecordCommitSignature(repoObj, id, c, user.ID)
 			}
 		}
 
 		if allowed, script, _ := s.db.GetRepoHookConfig(rec.ID); allowed && script != "" {
 			if !s.db.hasWriteCollaborator(rec.ID) {
 				runPostReceiveHook(script, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
 			}
 		}
 	}
 	srv.Handler().ServeHTTP(w, r2)
 }
 
 func collectPushCommits(r *repo.Repo, oldHex, newHex string) []CommitRef {
 	if len(newHex) != 64 {
 		return []CommitRef{}
 	}
 	newBytes, err := hex.DecodeString(newHex)
 	if err != nil || len(newBytes) != 32 {
 		return []CommitRef{}
 	}
 	var newID [32]byte
 	copy(newID[:], newBytes)
 
 	var oldID [32]byte
 	if len(oldHex) == 64 {
 		if oldBytes, err2 := hex.DecodeString(oldHex); err2 == nil && len(oldBytes) == 32 {
 			copy(oldID[:], oldBytes)
 		}
 	}
 
 	seen := make(map[[32]byte]bool)
 	queue := [][32]byte{newID}
 	var results []CommitRef
 	const maxCommits = 50
 
 	for len(queue) > 0 && len(results) < maxCommits {
 		id := queue[0]
 		queue = queue[1:]
 		if seen[id] || id == oldID {
 			continue
 		}
 		seen[id] = true
 		c, err := r.ReadCommit(id)
 		if err != nil {
 			break
 		}
 		author := c.Author.Name
 		if c.Author.Email != "" {
 			author += " <" + c.Author.Email + ">"
 		}
 		results = append(results, CommitRef{
 			ID:       hex.EncodeToString(id[:]),
 			ChangeID: "ch:" + c.ChangeID,
 			Message:  c.Message,
 			Author:   author,
 		})
 		for _, p := range c.Parents {
 			if !seen[p] && p != oldID {
 				queue = append(queue, p)
 			}
 		}
 	}
 	return results
 }
 
 func (s *forgeServer) handleRepoHome(w http.ResponseWriter, r *http.Request) {
 	http.Redirect(w, r, "/"+r.PathValue("repo")+"/log", http.StatusFound)
 }
 
 type srvCommitRow struct {
 	HexID      string
 	ShortHex   string
 	ChangeID   string
 	Author     string
 	Date       string
 	Phase      string
 	PhaseClass string
 	Message    string
 	Bookmarks  []string
 	IsHead     bool
 }
 
 type srvLogData struct {
 	Repo      
+     
 string
 	User
+     
       *User
 	Commits   
+     
 []srvCommitRow
 	WhereExpr 
+     
 string
 	WhereErr  
+     string
+	BookmarkFilter string
+	AllBookmarks   []
 string
 }
 
 func (s *forgeServer) handleRepoLog(w http.ResponseWriter, r *http.Request) {
 	repoObj, rec, ok := s.requireRepoAccess(w, r)
 	if !ok {
 		return
 	}
 	defer repoObj.Close()
 
 	const maxCommits = 200
 	where := r.URL.Query().Get("where")
+	bookmarkFilter := r.URL.Query().Get("bookmark")
+
 	var whereFilter revset.Func
 	var whereErr string
 	if where != "" {
 		var err error
 		whereFilter, err = revset.Parse(where)
 		if err != nil {
 			whereErr = err.Error()
 		}
 	}
+
-	
+	
-_, 
 head
+C
 ID, 
-err
+_
  := repoObj.HeadC
-ommit
+hangeID
-()
-	
+()
+	
-if err
+bmMap
- 
+ 
-!
+:
-= 
+= 
-nil {
-		http.Err
+b
-o
+o
+okma
-r
+r
+kMap
-(
+(
-w
+repoObj)
+
+	allBms
-, 
+, 
-"HEAD
+_ 
-:
+:
+=
- 
+ 
-"+
+r
-e
+e
-r
+poObj.Sto
-r
+r
+e
-.
+.
-Err
+ListBo
-o
+o
+kma
-r
+r
+ks
-()
+()
-,
+
+	allBmNames :=
- 
+ 
-http.St
+m
-a
+a
-tu
+ke([]
-s
+s
-I
+tri
-n
+n
-t
+g, 0, l
-e
+e
-r
-n
+n
+(
-al
+al
-ServerError
+lBms)
-)
-	
+)
+	
-	retu
+fo
-r
+r
-n
-	}
-	if
- 
+ 
-def
+_, bm
- := r
+ := r
+ang
-e
+e
+ allBm
-s
+s
-o
+ {
+		al
-l
+l
-v
+BmNam
-e
+e
-D
+s = app
-e
+e
-f
+nd(
-a
+a
-u
-l
+l
-tCo
+lB
 m
+Na
 m
-it(r
 e
-poO
+s, 
 b
-j
+m.Name
 )
-;
+
+	}
+
+	var
  
+can
 d
+idat
 e
-f !=
+IDs
  
-(
+[]
 [32]byte
-{}) {
 
 	
-	he
+if bookm
 a
-dID
+rkFilter
  
+!
 = 
-def
+"" {
 
 	
-}
-
 	
-headCID
+bm
 , 
-_
+err
  := repoObj.
-H
+Stor
 e
-adChang
+.G
 e
-ID()
-	b
+tBook
 m
-M
 a
-p := 
+rk(
 bookmark
-Map(
+Filte
 r
+)
+		if 
 e
-poO
+rr == nil && 
 b
-j)
+m != nil {
 
+		
 	visited := map[[32]byte]bool{}
 	
+		
 queue := [][32]byte{
-headID}
-
-	var rows []srv
+bm.
 Commit
-Row
+ID}
 
+		
 	for len(queue) > 0 && len(
-row
+candidateID
 s) < maxCommits
+*2
  {
 		
+		
 id := queue[0]
+	
 		
+	
 queue = queue[1:]
-
+	
 		
+	
 if visited[id] {
+		
 			continue
 		
+		
 }
+		
 		visited[id] = true
+				candidateIDs = append(candidateIDs, id)
 
+		
 		c, err := repoObj.ReadCommit(id)
+	
 		
+	
 if err != nil {
 			
+		
 continue
 		
+		
 }
+				for _, p := range c.Parents {
 
 		
+			if !visited[
 p
-h
+] {
+						queue = 
 a
+ppend(queue, p)
+					}
+				}
+			}
+		}
+	} el
 se
+ {
+		var err error
+		candidateIDs
 , 
-_
+err
  
-:
 = repoObj.Store.
-Ge
+Lis
 tP
-ha
+ublicCommitID
 s
-e
 (
-id
 )
 		if 
-wh
 er
-eFilte
 r != nil 
-&& !w
+{
+			
 h
-e
+ttp.Er
 r
-eF
+or(w, "list comm
 i
-l
 t
+s: "+
 er
+r.Error
 (
-id, c
+)
 , 
+htt
 p
-h
+.St
 a
+tu
 s
+Int
 e
+rnalServerError
 )
+
+			return
+		}
+	}
+
+	type rowWithTime struct
  {
 		
+row  srvCommitRow
+		time int64
+	}
+	var withTimes []rowWithTime
 	for _, 
-p
+id
  := range c
-.P
 a
-re
 n
+dida
 t
+eID
 s {
 		
+c, err := repoObj.ReadCommit(id)
 		if
+ err
  !
-v
+= n
 i
-sited[p]
+l
  {
 			
+continue
 		
-qu
+}
+
+		phase, _ := repoObj.Store.G
 e
-u
+tPhas
 e
+(id)
+		if
  
+bookmarkFilter !
 = 
+"" && ph
 a
-pp
+se != obj
 e
-nd(qu
+ct.Phas
 e
+P
 u
-e,
+blic
  
-p)
+{
 
 			
+continue
+	
 	}
 		
-	}
+if whereFilter != nil && !whereFilter(id, c, phase) {
 
 			continue
 		}
 		hexID := fullHex(id)
 		msg := c.Message
 		if idx := strings.IndexByte(msg, '\n'); idx >= 0 {
 			msg = msg[:idx]
 		}
-
 		
-ro
 w
+ithTime
 s = append(
+withTimes, 
 row
-s,
+WithTime{
+			row:
  srvCommitRow{
+	
 			HexID:      hexID,
 			
+	
 ShortHex:   shortHex(id),
 			
+	
 ChangeID:   c.ChangeID,
+	
 			Author:     c.Author.Name,
 			
+	
 Date:       c.Author.Timestamp.Format("2006-01-02 15:04"),
 			
+	
 Phase:      phase.String(),
+	
 			PhaseClass: phaseClass(phase),
 			
+	
 Message:    msg,
+	
 			Bookmarks:  bmMap[hexID],
 			
+	
 IsHead:     c.ChangeID == headCID,
+	
 		}
+,
+			time: c.Author.Timestamp.Unix(
 )
+,
 
+		})
 
 	
+}
+
 	
-f
+s
 or
+t.Slice(withTimes,
  
-_
+func(i
 , 
-p
+j
  
-:=
+int) bool
  
+{
+		
 r
-a
+etur
 n
-g
+ withTimes[i].tim
 e 
-c
+> withTimes[j]
 .
-Par
+time
+	})
+	if l
 en
+(withTimes) > maxCommi
 ts {
 		
-	
+w
 i
-f !v
+thT
 i
+me
 s
+ = w
 it
+hTim
 e
-d
+s
 [
-p
+:maxCommits
 ]
- {
 
 	
+}
 	
-		queue
+rows
  
+:

internal/archesrv/server_test.go [M]
--- a/internal/archesrv/server_test.go
+++ b/internal/archesrv/server_test.go
@@ -1,282 +1,282 @@
 package archesrv
 
 import (
 	"io"
 	"log/slog"
 	"net/http"
 	"net/http/httptest"
 	"net/url"
 	"os"
 	"path/filepath"
 	"testing"
 	"time"
 
 	"arche/internal/repo"
 	"arche/internal/store"
 	"arche/internal/syncpkg"
 	"arche/internal/wc"
 
 	_ "github.com/mattn/go-sqlite3"
 )
 
 func newTestServer(t *testing.T) (*forgeServer, *httptest.Server) {
 	t.Helper()
 	dir := t.TempDir()
 	db, err := openDB(filepath.Join(dir, "server.db"))
 	if err != nil {
 		t.Fatalf("openDB: %v", err)
 	}
 	t.Cleanup(func() { db.Close() })
 
 	cfg := DefaultConfig()
 	cfg.Storage.DataDir = dir
 
 	s := &forgeServer{
 		db:  db,
 		cfg: cfg,
 		log: slog.New(slog.NewTextHandler(io.Discard, nil)),
 	}
 	ts := httptest.NewServer(s.routes())
 	t.Cleanup(ts.Close)
 	return s, ts
 }
 
 func makeLocalRepoWithCommit(t *testing.T) *repo.Repo {
 	t.Helper()
 	dir := t.TempDir()
 	r, err := repo.Init(dir)
 	if err != nil {
 		t.Fatalf("repo.Init local: %v", err)
 	}
 	t.Cleanup(func() { r.Close() })
 
 	if err := os.WriteFile(filepath.Join(dir, "hello.txt"), []byte("hello arche\n"), 0o644); err != nil {
 		t.Fatalf("WriteFile: %v", err)
 	}
 
 	w := wc.New(r)
 	_, commitID, err := w.Snap("initial commit")
 	if err != nil {
 		t.Fatalf("Snap: %v", err)
 	}
 
 	tx, err := r.Store.Begin()
 	if err != nil {
 		t.Fatalf("Store.Begin: %v", err)
 	}
 	if err := r.Store.SetBookmark(tx, store.Bookmark{Name: "main", CommitID: commitID}); err != nil {
 		r.Store.Rollback(tx)
 		t.Fatalf("SetBookmark: %v", err)
 	}
 	if err := r.Store.Commit(tx); err != nil {
 		t.Fatalf("Store.Commit: %v", err)
 	}
 	return r
 }
 
 func TestForgeServer_SetupPage(t *testing.T) {
 	_, ts := newTestServer(t)
 	resp, err := http.Get(ts.URL + "/setup")
 	if err != nil {
 		t.Fatalf("GET /setup: %v", err)
 	}
 	defer resp.Body.Close()
 	if resp.StatusCode != http.StatusOK {
 		t.Errorf("GET /setup: want 200, got %d", resp.StatusCode)
 	}
 }
 
 func TestForgeServer_SyncPushPull(t *testing.T) {
 	s, ts := newTestServer(t)
 
 	admin, err := s.db.CreateUser("admin", "adminpass", true)
 	if err != nil {
 		t.Fatalf("CreateUser: %v", err)
 	}
 	token, err := s.db.CreateAPIToken(admin.ID, "test")
 	if err != nil {
 		t.Fatalf("CreateAPIToken: %v", err)
 	}
 
 	if _, err := s.db.CreateRepo("testrepo", "test repo", "private"); err != nil {
 		t.Fatalf("CreateRepo: %v", err)
 	}
 	if _, err := repo.Init(filepath.Join(s.dataDir(), "testrepo")); err != nil {
 		t.Fatalf("repo.Init server side: %v", err)
 	}
 
 	localRepo := makeLocalRepoWithCommit(t)
 
 	remoteURL := ts.URL + "/
-arche/v1/
 testrepo"
 
 	pushClient := syncpkg.NewClient(localRepo, remoteURL, token)
 	if err := pushClient.Push(); err != nil {
 		t.Fatalf("Push: %v", err)
 	}
 
 	pullDir := t.TempDir()
 	pullRepo, err := repo.Init(pullDir)
 	if err != nil {
 		t.Fatalf("repo.Init pull: %v", err)
 	}
 	defer pullRepo.Close()
 
 	pullClient := syncpkg.NewClient(pullRepo, remoteURL, token)
 	if err := pullClient.Pull(); err != nil {
 		t.Fatalf("Pull: %v", err)
 	}
 
 	localBMs, err := localRepo.Store.ListBookmarks()
 	if err != nil {
 		t.Fatalf("ListBookmarks local: %v", err)
 	}
 	pullBMs, err := pullRepo.Store.ListBookmarks()
 	if err != nil {
 		t.Fatalf("ListBookmarks pulled: %v", err)
 	}
 	if len(pullBMs) == 0 {
 		t.Fatal("pulled repo has no bookmarks after Pull()")
 	}
 	for _, lb := range localBMs {
 		found := false
 		for _, pb := range pullBMs {
 			if lb.Name == pb.Name && lb.CommitID == pb.CommitID {
 				found = true
 				break
 			}
 		}
 		if !found {
 			t.Errorf("bookmark %q (commitID %x) missing from pulled repo", lb.Name, lb.CommitID[:8])
 		}
 	}
 
 	time.Sleep(50 * time.Millisecond)
 }
 
 func TestForgeServer_SyncUnauthorized(t *testing.T) {
 	s, ts := newTestServer(t)
 
 	admin, _ := s.db.CreateUser("admin", "adminpass", true)
 	_, _ = s.db.CreateAPIToken(admin.ID, "test")
 	_, _ = s.db.CreateRepo("privaterepo", "", "private")
 	_, _ = repo.Init(filepath.Join(s.dataDir(), "privaterepo"))
 
 	resp, err := http.Get(ts.URL + "/
-arche/v1/
 privaterepo/arche/v1/info")
 	if err != nil {
 		t.Fatalf("GET info: %v", err)
 	}
 	defer resp.Body.Close()
 	if resp.StatusCode != http.StatusUnauthorized {
 		t.Errorf("expected 401 for unauthenticated read, got %d", resp.StatusCode)
 	}
 }
 
 func TestForgeServer_SyncWrongToken(t *testing.T) {
 	s, ts := newTestServer(t)
 
 	admin, _ := s.db.CreateUser("admin", "adminpass", true)
 	_, _ = s.db.CreateAPIToken(admin.ID, "test")
 	_, _ = s.db.CreateRepo("privaterepo", "", "private")
 	_, _ = repo.Init(filepath.Join(s.dataDir(), "privaterepo"))
 
 	req, _ := http.NewRequest(http.MethodGet, ts.URL+"/
-arche/v1/
 privaterepo/arche/v1/info", nil)
 	req.Header.Set("Authorization", "Bearer wrongtoken")
 	resp, err := http.DefaultClient.Do(req)
 	if err != nil {
 		t.Fatalf("GET info: %v", err)
 	}
 	defer resp.Body.Close()
 	if resp.StatusCode != http.StatusUnauthorized {
 		t.Errorf("expected 401 for wrong token, got %d", resp.StatusCode)
 	}
 }
 
 func TestForgeServer_AdminCRUD(t *testing.T) {
 	s, ts := newTestServer(t)
 
 	_, err := s.db.CreateUser("admin", "adminpass", true)
 	if err != nil {
 		t.Fatalf("CreateUser: %v", err)
 	}
 
 	jar := newCookieJar()
 	client := &http.Client{
 		Jar: jar,
 		CheckRedirect: func(req *http.Request, via []*http.Request) error {
 			return http.ErrUseLastResponse
 		},
 	}
 
 	loginResp, err := client.PostForm(ts.URL+"/login", map[string][]string{
 		"username": {"admin"},
 		"password": {"adminpass"},
 	})
 	if err != nil {
 		t.Fatalf("POST /login: %v", err)
 	}
 	loginResp.Body.Close()
 	if loginResp.StatusCode != http.StatusFound && loginResp.StatusCode != http.StatusSeeOther {
 		t.Fatalf("POST /login: want redirect (302/303), got %d", loginResp.StatusCode)
 	}
 
 	repoResp, err := client.PostForm(ts.URL+"/admin/repos", map[string][]string{
 		"name":        {"myrepo"},
 		"description": {"a test repo"},
 		"visibility":  {"private"},
 	})
 	if err != nil {
 		t.Fatalf("POST /admin/repos: %v", err)
 	}
 	repoResp.Body.Close()
 	if repoResp.StatusCode >= 400 {
 		t.Errorf("POST /admin/repos: got %d", repoResp.StatusCode)
 	}
 
 	rec, err := s.db.GetRepo("myrepo")
 	if err != nil || rec == nil {
 		t.Fatalf("GetRepo: repo not found after create: %v", err)
 	}
 	if rec.Description != "a test repo" {
 		t.Errorf("Description: want %q, got %q", "a test repo", rec.Description)
 	}
 
 	userResp, err := client.PostForm(ts.URL+"/admin/users", map[string][]string{
 		"username": {"bob"},
 		"password": {"bobpass"},
 	})
 	if err != nil {
 		t.Fatalf("POST /admin/users: %v", err)
 	}
 	userResp.Body.Close()
 	if userResp.StatusCode >= 400 {
 		t.Errorf("POST /admin/users: got %d", userResp.StatusCode)
 	}
 
 	users, err := s.db.ListUsers()
 	if err != nil {
 		t.Fatalf("ListUsers: %v", err)
 	}
 	found := false
 	for _, u := range users {
 		if u.Username == "bob" {
 			found = true
 		}
 	}
 	if !found {
 		t.Error("user 'bob' not found after admin create")
 	}
 }
 
 type simpleCookieJar struct {
 	cookies []*http.Cookie
 }
 
 func newCookieJar() *simpleCookieJar { return &simpleCookieJar{} }
 
 func (j *simpleCookieJar) SetCookies(_ *url.URL, cookies []*http.Cookie) {
 	j.cookies = append(j.cookies, cookies...)
 }
 
 func (j *simpleCookieJar) Cookies(_ *url.URL) []*http.Cookie {
 	return j.cookies
 }

internal/archesrv/templates/srv_repo_log.html [M]
--- a/internal/archesrv/templates/srv_repo_log.html
+++ b/internal/archesrv/templates/srv_repo_log.html
@@ -1,62 +1,79 @@
 {{ define "title" }}{{.Repo}} — log{{ end }}
 {{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
 {{ define "srv_repo_log.html" }}{{ template "head" . }}
 <div class="container">
     <h1 class="repo-name">{{.Repo}} / log</h1>
     <form method="get"
           style="display:flex;
                  gap:8px;
                  margin-bottom:16px;
                  align-items:center">
         <input name="where"
                value="{{.WhereExpr}}"
                placeholder='filter: author(alice) or draft()'
                style="flex:1;
                       padding:6px 10px;
                       font-family:monospace;
                       font-size:13px;
                       background:#1e1e1e;
                       color:#ccc;
                       border:1px solid #444;
                       border-radius:4px;
                       outline:none">
         <button type="submit"
                 style="padding:6px 14px;
                        background:#2a6;
                        color:#fff;
                        border:none;
                        border-radius:4px;
                        cursor:pointer;
                        font-size:13px">Filter</button>
         {{ if .WhereExpr }}<a href="?where=" style="color:#888; font-size:12px; white-space:nowrap">✕ clear</a>{{ end }}
     </form>
     {{ if .WhereErr }}
     <p style="color:#e55;
               font-family:monospace;
               font-size:12px;
               margin:-8px 0 12px">parse error: {{.WhereErr}}</p>
     {{ end }}
+    {{ if .AllBookmarks }}
+    <div style="display:flex; flex-wrap:wrap; gap:6px; margin-bottom:14px; align-items:center">
+        <span style="font-size:12px; color:#888">branch:</span>
+        {{ if .BookmarkFilter }}
+        <a href="?where={{.WhereExpr}}" style="font-size:11px; padding:2px 8px; border-radius:3px; border:1px solid #aaa; color:#555; background:transparent; text-decoration:none">all</a>
+        {{ else }}
+        <a href="?where={{.WhereExpr}}" style="font-size:11px; padding:2px 8px; border-radius:3px; border:1px solid #aaa; color:#fff; background:#444; text-decoration:none">all</a>
+        {{ end }}
+        {{ range .AllBookmarks }}
+        {{ if eq . $.BookmarkFilter }}
+        <a href="?bookmark={{.}}&where={{$.WhereExpr}}" style="font-size:11px; padding:2px 8px; border-radius:3px; border:1px solid #b0c0ff; color:#fff; background:#0645ad; text-decoration:none">{{.}}</a>
+        {{ else }}
+        <a href="?bookmark={{.}}&where={{$.WhereExpr}}" style="font-size:11px; padding:2px 8px; border-radius:3px; border:1px solid #b0c0ff; color:#0645ad; background:transparent; text-decoration:none">{{.}}</a>
+        {{ end }}
+        {{ end }}
+    </div>
+    {{ end }}
     {{ if .WhereExpr }}
     <p style="color:#888; font-size:12px; margin:-4px 0 10px">
         Showing {{ len .Commits }} commit(s) matching <code style="color:#adf">{{.WhereExpr}}</code>
     </p>
     {{ end }}
     {{ if .Commits }} {{ range .Commits }}
     <div class="log-row phase-{{.PhaseClass}}">
         <span class="log-sha"><a href="/{{$.Repo}}/commit?id={{.HexID}}">{{.ShortHex}}</a></span>
         <span class="log-msg">
             <a href="/{{$.Repo}}/commit?id={{.HexID}}">{{.Message}}</a>
             {{ if .IsHead }}<span class="head-tag">HEAD</span>{{ end }} {{ range .Bookmarks }}<span class="bookmark-tag">{{.}}</span>{{ end }}
         </span>
         <span class="log-author">{{.Author}}</span>
         <span class="log-date">{{.Date}}</span>
         <span class="badge badge-{{.PhaseClass}}">{{.Phase}}</span>
     </div>
 {{ end }} {{ else }}
     <p style="color: #888; margin-top: 12px">
     {{ if .WhereExpr }}No commits match this filter.{{ else }}No commits yet.{{ end }}
     </p>
     {{ end }}
 </div>
 {{ template "foot" . }}
 {{ end }}

internal/cli/cmd_snap.go [M]
--- a/internal/cli/cmd_snap.go
+++ b/internal/cli/cmd_snap.go
@@ -1,174 +1,180 @@
 package cli
 
 import (
 	"bufio"
 	"fmt"
 	"os"
 	"strings"
 
 	"arche/internal/gitcompat"
 	"arche/internal/tui"
 	"arche/internal/wc"
 
 	"github.com/spf13/cobra"
 )
 
 var (
 	snapInteractive bool
 	snapSign        bool
 	snapSignKey     string
 	snapAmend       bool
+	snapNoAdvance   bool
 )
 
 var snapCmd = &cobra.Command{
 	Use:     "snap [message]",
 	Aliases: []string{"commit"},
 	Short:   "Finalise the working copy draft into a named commit",
 	Long: `Snapshot the current working directory into the draft commit (optionally
 setting a message), finalise it, and create a new empty draft as the next
 HEAD. The snapped commit keeps its change ID; only the content hash changes
 if files were modified since the last snap.
 
 With --interactive (-i) you are shown each hunk and asked whether to include
 it in this snap.  Unselected hunks remain as working-copy changes in the new
 draft.`,
 	RunE: func(cmd *cobra.Command, args []string) error {
 		r := openRepo()
 		defer r.Close()
 
 		msg := strings.Join(args, " ")
 		if msg == "" {
 			msg = promptMessage()
 		}
 		if msg == "" {
 			return fmt.Errorf("aborting snap: empty commit message")
 		}
 
 		w := wc.New(r)
 
+		if snapNoAdvance {
+			w.NoAutoAdvance = true
+		}
+
 		if snapSign || r.Cfg.Sign.Auto {
 			w.SignKey = r.Cfg.Sign.KeyFile
 			if snapSignKey != "" {
 				w.SignKey = snapSignKey
 			}
 		}
 
 		if snapAmend {
 			amended, amendedID, err := w.Amend(msg)
 			if err != nil {
 				return err
 			}
 			signedLabel := ""
 			if amended.CommitSig != nil {
 				signedLabel = " [signed]"
 			}
 			fmt.Printf("Amended %s - %s%s\n", "ch:"+amended.ChangeID, amended.Message, signedLabel)
 			fmt.Printf("  %x\n", amendedID[:8])
 			return nil
 		}
 
 		if snapInteractive {
 			diffs, err := w.ComputeWorkingDiffs()
 			if err != nil {
 				return err
 			}
 			if len(diffs) == 0 {
 				return fmt.Errorf("nothing to snap: working copy is clean")
 			}
 
 			var items []tui.HunkItem
 			for _, fhd := range diffs {
 				for hi, h := range fhd.Hunks {
 					items = append(items, tui.HunkItem{
 						FilePath:         fhd.Path,
 						HunkIdx:          hi,
 						TotalHunksInFile: len(fhd.Hunks),
 						Hunk:             h,
 					})
 				}
 			}
 
 			sel, err := tui.RunHunkSelector(items, "include in snap")
 			if err != nil {
 				return err
 			}
 			if sel.Cancelled {
 				fmt.Fprintln(os.Stderr, "Interactive snap cancelled.")
 				return nil
 			}
 
 			perFile := make(map[string][]bool)
 			idx := 0
 			for _, fhd := range diffs {
 				n := len(fhd.Hunks)
 				perFile[fhd.Path] = sel.Selected[idx : idx+n]
 				idx += n
 			}
 
 			snapped, snappedID, err := w.SnapSelectedHunks(msg, diffs, perFile)
 			if err != nil {
 				return err
 			}
 
 			signedLabelI := ""
 			if snapped.CommitSig != nil {
 				signedLabelI = " [signed]"
 			}
 			fmt.Printf("Snapped %s - %s%s\n", "ch:"+snapped.ChangeID, snapped.Message, signedLabelI)
 			fmt.Printf("  %x\n", snappedID[:8])
 
 			if r.Cfg.Git.Enabled {
 				gitHash, err := gitcompat.MirrorCommit(r.Root, r, snappedID)
 				if err != nil {
 					fmt.Fprintf(os.Stderr, "arche: git mirror failed: %v\n", err)
 				} else if gitHash != "" {
 					fmt.Printf("  git: %s\n", gitHash[:8])
 				}
 			}
 
 			head, _ := r.Head()
 			fmt.Printf("Working copy now at %s (draft)\n", head)
 			return nil
 		}
 
 		snapped, snappedID, err := w.Snap(msg)
 		if err != nil {
 			return err
 		}
 
 		signedLabel := ""
 		if snapped.CommitSig != nil {
 			signedLabel = " [signed]"
 		}
 
 		fmt.Printf("Snapped %s - %s%s\n", "ch:"+snapped.ChangeID, snapped.Message, signedLabel)
 		fmt.Printf("  %x\n", snappedID[:8])
 
 		if r.Cfg.Git.Enabled {
 			gitHash, err := gitcompat.MirrorCommit(r.Root, r, snappedID)
 			if err != nil {
 				fmt.Fprintf(os.Stderr, "arche: git mirror failed: %v\n", err)
 			} else if gitHash != "" {
 				fmt.Printf("  git: %s\n", gitHash[:8])
 			}
 		}
 
 		head, _ := r.Head()
 		fmt.Printf("Working copy now at %s (draft, empty)\n", head)
 		return nil
 	},
 }
 
 func promptMessage() string {
 	fmt.Print("Commit message: ")
 	sc := bufio.NewScanner(os.Stdin)
 	sc.Scan()
 	return strings.TrimSpace(sc.Text())
 }
 
 func init() {
 	snapCmd.Flags().BoolVarP(&snapInteractive, "interactive", "i", false, "interactively select hunks to include in this snap")
 	snapCmd.Flags().BoolVar(&snapSign, "sign", false, "sign the commit with your SSH key")
 	snapCmd.Flags().StringVar(&snapSignKey, "key", "", "path to SSH private key to use for signing (default: auto-detect)")
 	snapCmd.Flags().BoolVar(&snapAmend, "amend", false, "amend the current commit in-place and auto-rebase downstream draft dependents")
+	snapCmd.Flags().BoolVar(&snapNoAdvance, "no-advance", false, "disable bookmark auto-advance for this snap (overrides config)")
 }

internal/repo/config.go [M]
--- a/internal/repo/config.go
+++ b/internal/repo/config.go
@@ -1,117 +1,123 @@
 package repo
 
 import (
 	"fmt"
 	"os"
 
 	"github.com/BurntSushi/toml"
 )
 
 type Config struct {
 	Storage StorageConfig  `toml:"storage"`
 	User    UserConfig     `toml:"user"`
 	UI      UIConfig       `toml:"ui"`
 	Serve   ServeConfig    `toml:"serve"`
 	Hooks   HooksConfig    `toml:"hooks"`
 	Git     GitConfig      `toml:"git"`
 	Sign    SignConfig     `toml:"sign"`
+
+	Snap    SnapConfig     `toml:"snap"`
 
 	Remotes []RemoteConfig `toml:"remote"`
 }
 
 type StorageConfig struct {
 	PackThreshold int    `toml:"pack_threshold"`
 	PackSealSize  int    `toml:"pack_seal_size"`
 	Compression   string `toml:"compression"`
 }
 
 type UserConfig struct {
 	Name  string `toml:"name"`
 	Email string `toml:"email"`
 }
 
 type UIConfig struct {
 	Port int `toml:"port"`
 }
 
 type ServeConfig struct {
 	Port  int    `toml:"port"`
 	Token string `toml:"token"`
 }
 
 type RemoteConfig struct {
 	Name  string `toml:"name"`
 	URL   string `toml:"url"`
 	Token string `toml:"token"`
 }
 
 type HooksConfig struct {
 	PreSnap  []string `toml:"pre-snap"`
 	PostSnap []string `toml:"post-snap"`
 }
 
 type GitConfig struct {
 	Enabled bool   `toml:"enabled"`
 	Remote  string `toml:"remote"`
 }
 
 type SignConfig struct {
 	Auto    bool   `toml:"auto"`
 	KeyFile string `toml:"key"`
 }
 
+type SnapConfig struct {
+	AutoAdvanceBookmarks bool `toml:"auto_advance_bookmarks"`
+}
+
 func DefaultConfig() *Config {
 	return &Config{
 		Storage: StorageConfig{
 			PackThreshold: 128 * 1024,
 			Compression:   "zstd",
 		},
 		User: UserConfig{
 			Name:  gitConfigValue("user.name", "Unknown User"),
 			Email: gitConfigValue("user.email", "unknown@example.com"),
 		},
 		UI:    UIConfig{Port: 7070},
 		Serve: ServeConfig{Port: 8765},
+		Snap:  SnapConfig{AutoAdvanceBookmarks: true},
 	}
 }
 
 func loadConfig(path string) (*Config, error) {
 	cfg := DefaultConfig()
 	if _, err := os.Stat(path); os.IsNotExist(err) {
 		return cfg, nil
 	}
 	_, err := toml.DecodeFile(path, cfg)
 	return cfg, err
 }
 
 func writeConfig(path string, cfg *Config) error {
 	f, err := os.Create(path)
 	if err != nil {
 		return err
 	}
 	defer f.Close()
 	fmt.Fprintln(f, "# Arche repository configuration")
 	return toml.NewEncoder(f).Encode(cfg)
 }
 
 func gitConfigValue(key, fallback string) string {
 	switch key {
 	case "user.name":
 		if v := os.Getenv("GIT_AUTHOR_NAME"); v != "" {
 			return v
 		}
 		if v := os.Getenv("USER"); v != "" {
 			return v
 		}
 	case "user.email":
 		if v := os.Getenv("GIT_AUTHOR_EMAIL"); v != "" {
 			return v
 		}
 		if host, _ := os.Hostname(); host != "" {
 			user := os.Getenv("USER")
 			if user != "" {
 				return user + "@" + host
 			}
 		}
 	}
 	return fallback
 }

internal/store/sqlite.go [M]
--- a/internal/store/sqlite.go
+++ b/internal/store/sqlite.go
@@ -1,733 +1,752 @@
 package store
 
 import (
 	"database/sql"
 	"encoding/hex"
 	"errors"
 	"fmt"
 	"strings"
 	"time"
 
 	"arche/internal/object"
 	"arche/internal/store/migrate"
 
 	"github.com/klauspost/compress/zstd"
 	_ "github.com/mattn/go-sqlite3"
 )
 
 const defaultPackThreshold = 128 * 1024
 
 type SQLiteStore struct {
 	db              *sql.DB
 	pack            *packManager
 	codec           codec
 	packDir         string
 	packThreshold   int
 	compressionName string
 }
 
 func OpenSQLiteStore(dbPath, packDir string, packThreshold, packSealSize int, compression string) (*SQLiteStore, error) {
 	if packThreshold <= 0 {
 		packThreshold = defaultPackThreshold
 	}
 	db, err := sql.Open("sqlite3", dbPath+"?_busy_timeout=5000")
 	if err != nil {
 		return nil, fmt.Errorf("store open %s: %w", dbPath, err)
 	}
 	db.SetMaxOpenConns(1)
 
 	if _, err := db.Exec("PRAGMA journal_mode = WAL; PRAGMA foreign_keys = ON;"); err != nil {
 		db.Close()
 		return nil, fmt.Errorf("store pragma: %w", err)
 	}
 
 	if err := migrate.Run(db); err != nil {
 		db.Close()
 		return nil, fmt.Errorf("store migrate: %w", err)
 	}
 
 	pm, err := newPackManager(packDir, packSealSize)
 	if err != nil {
 		db.Close()
 		return nil, err
 	}
 
 	var dictData []byte
 	_ = db.QueryRow("SELECT dict FROM zstd_dicts ORDER BY id DESC LIMIT 1").Scan(&dictData)
 
 	cd, err := newCodec(compression, dictData)
 	if err != nil {
 		db.Close()
 		pm.close()
 		return nil, err
 	}
 
 	return &SQLiteStore{
 		db:              db,
 		pack:            pm,
 		codec:           cd,
 		packDir:         packDir,
 		packThreshold:   packThreshold,
 		compressionName: compression,
 	}, nil
 }
 
 func (s *SQLiteStore) Begin() (*Tx, error) {
 	sqlTx, err := s.db.Begin()
 	if err != nil {
 		return nil, fmt.Errorf("begin tx: %w", err)
 	}
 	return &Tx{sqlTx: sqlTx}, nil
 }
 
 func (s *SQLiteStore) Commit(tx *Tx) error {
 	return tx.sqlTx.Commit()
 }
 
 func (s *SQLiteStore) Rollback(tx *Tx) error {
 	return tx.sqlTx.Rollback()
 }
 
 func (s *SQLiteStore) Close() error {
 	s.pack.close()
 	s.codec.Close()
 	return s.db.Close()
 }
 
 func (s *SQLiteStore) AddConflict(tx *Tx, path string) error {
 	_, err := tx.sqlTx.Exec("INSERT OR IGNORE INTO conflicts (path) VALUES (?)", path)
 	return err
 }
 
 func (s *SQLiteStore) ClearConflict(tx *Tx, path string) error {
 	_, err := tx.sqlTx.Exec("DELETE FROM conflicts WHERE path = ?", path)
 	return err
 }
 
 func (s *SQLiteStore) ClearAllConflicts(tx *Tx) error {
 	_, err := tx.sqlTx.Exec("DELETE FROM conflicts")
 	return err
 }
 
 func (s *SQLiteStore) ListConflicts() ([]string, error) {
 	rows, err := s.db.Query("SELECT path FROM conflicts ORDER BY path")
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 	var out []string
 	for rows.Next() {
 		var p string
 		if err := rows.Scan(&p); err != nil {
 			return nil, err
 		}
 		out = append(out, p)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) HasObject(id [32]byte) (bool, error) {
 	var count int
 	err := s.db.QueryRow("SELECT COUNT(*) FROM objects WHERE id = ?", id[:]).Scan(&count)
 	if err != nil {
 		return false, err
 	}
 	if count > 0 {
 		return true, nil
 	}
 
 	err = s.db.QueryRow("SELECT COUNT(*) FROM pack_index WHERE blob_id = ?", id[:]).Scan(&count)
 	if err != nil {
 		return false, err
 	}
 	return count > 0, nil
 }
 
 func (s *SQLiteStore) ReadObject(id [32]byte) (kind string, raw []byte, err error) {
 	var compressed []byte
 	rowErr := s.db.QueryRow("SELECT kind, data FROM objects WHERE id = ?", id[:]).Scan(&kind, &compressed)
 	if rowErr == nil {
 		raw, err = s.codec.Decompress(compressed)
 		if err != nil {
 			return "", nil, fmt.Errorf("decompress object %s: %w", hex.EncodeToString(id[:])[:12], err)
 		}
 		return kind, raw, nil
 	}
 	if !errors.Is(rowErr, sql.ErrNoRows) {
 		return "", nil, rowErr
 	}
 
 	var packFile string
 	var offset, rawSize int64
 	var deltaBaseIDRaw []byte
 	var deltaDepth int
 	rowErr = s.db.QueryRow(
 		"SELECT pack_file, offset, raw_size, delta_base_id, delta_depth FROM pack_index WHERE blob_id = ?", id[:],
 	).Scan(&packFile, &offset, &rawSize, &deltaBaseIDRaw, &deltaDepth)
 	if errors.Is(rowErr, sql.ErrNoRows) {
 		return "", nil, fmt.Errorf("object %s not found", hex.EncodeToString(id[:])[:12])
 	}
 	if rowErr != nil {
 		return "", nil, rowErr
 	}
 
 	compressed, err = s.pack.read(packFile, offset)
 	if err != nil {
 		return "", nil, err
 	}
 
 	if len(deltaBaseIDRaw) > 0 {
 		if deltaDepth > deltaMaxDepth {
 			return "", nil, fmt.Errorf("pack object %s: delta chain depth %d exceeds limit %d",
 				hex.EncodeToString(id[:])[:12], deltaDepth, deltaMaxDepth)
 		}
 		deltaBytes, decErr := s.codec.Decompress(compressed)
 		if decErr != nil {
 			return "", nil, fmt.Errorf("decompress delta %s: %w", hex.EncodeToString(id[:])[:12], decErr)
 		}
 		var baseID [32]byte
 		copy(baseID[:], deltaBaseIDRaw)
 		_, baseRaw, baseErr := s.ReadObject(baseID)
 		if baseErr != nil {
 			return "", nil, fmt.Errorf("read delta base for %s: %w", hex.EncodeToString(id[:])[:12], baseErr)
 		}
 		raw, err = ApplyDelta(baseRaw, deltaBytes)
 		if err != nil {
 			return "", nil, fmt.Errorf("apply delta %s: %w", hex.EncodeToString(id[:])[:12], err)
 		}
 		return string(object.KindBlob), raw, nil
 	}
 
 	raw, err = s.codec.Decompress(compressed)
 	if err != nil {
 		return "", nil, fmt.Errorf("decompress pack object %s: %w", hex.EncodeToString(id[:])[:12], err)
 	}
 	return string(object.KindBlob), raw, nil
 }
 
 func (s *SQLiteStore) WriteObject(tx *Tx, id [32]byte, kind string, raw []byte) error {
 	compressed := s.codec.Compress(raw)
 
 	if len(raw) > s.packThreshold && kind == string(object.KindBlob) {
 		entry, err := s.pack.write(compressed, int64(len(raw)))
 		if err != nil {
 			return err
 		}
 		_, err = tx.sqlTx.Exec(
 			"INSERT OR IGNORE INTO pack_index (blob_id, pack_file, offset, raw_size) VALUES (?, ?, ?, ?)",
 			id[:], entry.packFile, entry.offset, entry.rawSize,
 		)
 		return err
 	}
 
 	_, err := tx.sqlTx.Exec(
 		"INSERT OR IGNORE INTO objects (id, kind, data) VALUES (?, ?, ?)",
 		id[:], kind, compressed,
 	)
 	return err
 }
 
 func (s *SQLiteStore) ListObjectsByKind(kind string) ([][32]byte, error) {
 	rows, err := s.db.Query("SELECT id FROM objects WHERE kind = ?", kind)
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 	var ids [][32]byte
 	for rows.Next() {
 		var raw []byte
 		if err := rows.Scan(&raw); err != nil {
 			return nil, err
 		}
 		var id [32]byte
 		copy(id[:], raw)
 		ids = append(ids, id)
 	}
 	return ids, rows.Err()
 }
 
 func (s *SQLiteStore) GetBookmark(name string) (*Bookmark, error) {
 	var cid []byte
 	var remote sql.NullString
 	err := s.db.QueryRow("SELECT commit_id, remote FROM bookmarks WHERE name = ?", name).Scan(&cid, &remote)
 	if errors.Is(err, sql.ErrNoRows) {
 		return nil, nil
 	}
 	if err != nil {
 		return nil, err
 	}
 	b := &Bookmark{Name: name, Remote: remote.String}
 	copy(b.CommitID[:], cid)
 	return b, nil
 }
 
 func (s *SQLiteStore) SetBookmark(tx *Tx, b Bookmark) error {
 	var remote interface{}
 	if b.Remote != "" {
 		remote = b.Remote
 	}
 	_, err := tx.sqlTx.Exec(
 		"INSERT OR REPLACE INTO bookmarks (name, commit_id, remote) VALUES (?, ?, ?)",
 		b.Name, b.CommitID[:], remote,
 	)
 	return err
 }
 
 func (s *SQLiteStore) DeleteBookmark(tx *Tx, name string) error {
 	_, err := tx.sqlTx.Exec("DELETE FROM bookmarks WHERE name = ?", name)
 	return err
 }
 
 func (s *SQLiteStore) ListBookmarks() ([]Bookmark, error) {
 	rows, err := s.db.Query("SELECT name, commit_id, remote FROM bookmarks ORDER BY name")
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 
 	var out []Bookmark
 	for rows.Next() {
 		var b Bookmark
 		var cid []byte
 		var remote sql.NullString
 		if err := rows.Scan(&b.Name, &cid, &remote); err != nil {
 			return nil, err
 		}
 		copy(b.CommitID[:], cid)
 		b.Remote = remote.String
 		out = append(out, b)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) GetPhase(commitID [32]byte) (object.Phase, error) {
 	var phase int
 	err := s.db.QueryRow("SELECT phase FROM phases WHERE commit_id = ?", commitID[:]).Scan(&phase)
 	if errors.Is(err, sql.ErrNoRows) {
 		return object.PhaseDraft, nil
 	}
 
 	if err != nil {
 		return 0, err
 	}
 
 	return object.Phase(phase), nil
 }
 
 func (s *SQLiteStore) SetPhase(tx *Tx, commitID [32]byte, phase object.Phase) error {
 	_, err := tx.sqlTx.Exec(
 		"INSERT OR REPLACE INTO phases (commit_id, phase) VALUES (?, ?)",
 		commitID[:], int(phase),
 	)
 	return err
 }
 
+func (s *SQLiteStore) ListPublicCommitIDs() ([][32]byte, error) {
+	rows, err := s.db.Query("SELECT commit_id FROM phases WHERE phase = ?", int(object.PhasePublic))
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out [][32]byte
+	for rows.Next() {
+		var raw []byte
+		if err := rows.Scan(&raw); err != nil {
+			return nil, err
+		}
+		var id [32]byte
+		copy(id[:], raw)
+		out = append(out, id)
+	}
+	return out, rows.Err()
+}
+
 func (s *SQLiteStore) ListSecretCommitIDs() ([][32]byte, error) {
 	rows, err := s.db.Query("SELECT commit_id FROM phases WHERE phase = ?", int(object.PhaseSecret))
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 	var out [][32]byte
 	for rows.Next() {
 		var raw []byte
 		if err := rows.Scan(&raw); err != nil {
 			return nil, err
 		}
 		var id [32]byte
 		copy(id[:], raw)
 		out = append(out, id)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) AllocChangeID(tx *Tx) (string, error) {
 	for length := 8; length <= 32; length += 2 {
 		id := object.NewChangeID(length)
 		_, err := tx.sqlTx.Exec("INSERT INTO changes (change_id, commit_id) VALUES (?, NULL)", id)
 		if err == nil {
 			return id, nil
 		}
 		if !isSQLiteConstraintError(err) {
 			return "", fmt.Errorf("alloc change ID: %w", err)
 		}
 	}
 	return "", errors.New("change ID allocation failed after max retries")
 }
 
 func (s *SQLiteStore) GetChangeCommit(changeID string) ([32]byte, error) {
 	var rows *sql.Rows
 	var err error
 	if len(changeID) < 8 {
 		rows, err = s.db.Query(
 			"SELECT change_id, commit_id FROM changes WHERE change_id LIKE ? AND commit_id IS NOT NULL",
 			changeID+"%",
 		)
 	} else {
 		rows, err = s.db.Query(
 			"SELECT change_id, commit_id FROM changes WHERE change_id = ? AND commit_id IS NOT NULL",
 			changeID,
 		)
 	}
 
 	if err != nil {
 		return object.ZeroID, err
 	}
 	defer rows.Close()
 
 	var found [32]byte
 	var count int
 	for rows.Next() {
 		var cid []byte
 		var chid string
 		if err := rows.Scan(&chid, &cid); err != nil {
 			return object.ZeroID, err
 		}
 		copy(found[:], cid)
 		count++
 	}
 
 	if err := rows.Err(); err != nil {
 		return object.ZeroID, err
 	}
 
 	if count == 0 {
 		return object.ZeroID, sql.ErrNoRows
 	}
 
 	if count > 1 {
 		return object.ZeroID, fmt.Errorf("ambiguous change ID prefix %q matches %d changes", changeID, count)
 	}
 
 	return found, nil
 }
 
 func (s *SQLiteStore) SetChangeCommit(tx *Tx, changeID string, commitID [32]byte) error {
 	_, err := tx.sqlTx.Exec(
 		"UPDATE changes SET commit_id = ? WHERE change_id = ?",
 		commitID[:], changeID,
 	)
 	return err
 }
 
 func (s *SQLiteStore) ListChanges() ([]Bookmark, error) {
 	rows, err := s.db.Query("SELECT change_id, commit_id FROM changes WHERE commit_id IS NOT NULL")
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 	var out []Bookmark
 	for rows.Next() {
 		var name string
 		var commitID []byte
 		if err := rows.Scan(&name, &commitID); err != nil {
 			return nil, err
 		}
 		var id [32]byte
 		copy(id[:], commitID)
 		out = append(out, Bookmark{Name: name, CommitID: id})
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) GetWCacheEntry(path string) (*WCacheEntry, error) {
 	var e WCacheEntry
 	var blobID []byte
 	var dirty int
 	err := s.db.QueryRow(
 		"SELECT path, inode, mtime_ns, size, blob_id, mode, dirty FROM wcache WHERE path = ?", path,
 	).Scan(&e.Path, &e.Inode, &e.MtimeNs, &e.Size, &blobID, &e.Mode, &dirty)
 	if errors.Is(err, sql.ErrNoRows) {
 		return nil, nil
 	}
 	if err != nil {
 		return nil, err
 	}
 	copy(e.BlobID[:], blobID)
 	e.Dirty = dirty != 0
 	return &e, nil
 }
 
 func (s *SQLiteStore) SetWCacheEntry(tx *Tx, e WCacheEntry) error {
 	dirty := 0
 	if e.Dirty {
 		dirty = 1
 	}
 	_, err := tx.sqlTx.Exec(
 		"INSERT OR REPLACE INTO wcache (path, inode, mtime_ns, size, blob_id, mode, dirty) VALUES (?, ?, ?, ?, ?, ?, ?)",
 		e.Path, e.Inode, e.MtimeNs, e.Size, e.BlobID[:], e.Mode, dirty,
 	)
 	return err
 }
 
 func (s *SQLiteStore) DeleteWCacheEntry(tx *Tx, path string) error {
 	_, err := tx.sqlTx.Exec("DELETE FROM wcache WHERE path = ?", path)
 	return err
 }
 
 func (s *SQLiteStore) ListWCacheEntries() ([]WCacheEntry, error) {
 	rows, err := s.db.Query("SELECT path, inode, mtime_ns, size, blob_id, mode, dirty FROM wcache ORDER BY path")
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 
 	var out []WCacheEntry
 	for rows.Next() {
 		var e WCacheEntry
 		var blobID []byte
 		var dirty int
 		if err := rows.Scan(&e.Path, &e.Inode, &e.MtimeNs, &e.Size, &blobID, &e.Mode, &dirty); err != nil {
 			return nil, err
 		}
 		copy(e.BlobID[:], blobID)
 		e.Dirty = dirty != 0
 		out = append(out, e)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) ClearWCache(tx *Tx) error {
 	_, err := tx.sqlTx.Exec("DELETE FROM wcache")
 	return err
 }
 
 func (s *SQLiteStore) MarkWCacheDirty(path string) error {
 	_, err := s.db.Exec(
 		`INSERT INTO wcache (path, inode, mtime_ns, size, blob_id, mode, dirty)
 		 VALUES (?, 0, 0, 0, zeroblob(32), 0, 1)
 		 ON CONFLICT(path) DO UPDATE SET dirty = 1`,
 		path,
 	)
 	return err
 }
 
 func (s *SQLiteStore) ListDirtyWCacheEntries() ([]WCacheEntry, error) {
 	rows, err := s.db.Query("SELECT path, inode, mtime_ns, size, blob_id, mode, dirty FROM wcache WHERE dirty = 1 ORDER BY path")
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 
 	var out []WCacheEntry
 	for rows.Next() {
 		var e WCacheEntry
 		var blobID []byte
 		var dirty int
 		if err := rows.Scan(&e.Path, &e.Inode, &e.MtimeNs, &e.Size, &blobID, &e.Mode, &dirty); err != nil {
 			return nil, err
 		}
 		copy(e.BlobID[:], blobID)
 		e.Dirty = dirty != 0
 		out = append(out, e)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) ClearWCacheDirtyFlags(tx *Tx) error {
 	_, err := tx.sqlTx.Exec("UPDATE wcache SET dirty = 0 WHERE dirty = 1")
 	return err
 }
 
 func (s *SQLiteStore) InsertOperation(tx *Tx, op Operation) (int64, error) {
 	if op.Timestamp == 0 {
 		op.Timestamp = time.Now().Unix()
 	}
 	result, err := tx.sqlTx.Exec(
 		"INSERT INTO operations (kind, timestamp, before, after, metadata) VALUES (?, ?, ?, ?, ?)",
 		op.Kind, op.Timestamp, op.Before, op.After, nullableString(op.Metadata),
 	)
 	if err != nil {
 		return 0, err
 	}
 	return result.LastInsertId()
 }
 
 func (s *SQLiteStore) ListOperations(n int) ([]Operation, error) {
 	query := "SELECT seq, kind, timestamp, before, after, metadata FROM operations ORDER BY seq DESC"
 	if n > 0 {
 		query += fmt.Sprintf(" LIMIT %d", n)
 	}
 	rows, err := s.db.Query(query)
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 
 	var out []Operation
 	for rows.Next() {
 		var op Operation
 		var meta sql.NullString
 		if err := rows.Scan(&op.Seq, &op.Kind, &op.Timestamp, &op.Before, &op.After, &meta); err != nil {
 			return nil, err
 		}
 		op.Metadata = meta.String
 		out = append(out, op)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) GetOperation(seq int64) (*Operation, error) {
 	var op Operation
 	var meta sql.NullString
 	err := s.db.QueryRow(
 		"SELECT seq, kind, timestamp, before, after, metadata FROM operations WHERE seq = ?", seq,
 	).Scan(&op.Seq, &op.Kind, &op.Timestamp, &op.Before, &op.After, &meta)
 	if errors.Is(err, sql.ErrNoRows) {
 		return nil, nil
 	}
 	if err != nil {
 		return nil, err
 	}
 	op.Metadata = meta.String
 	return &op, nil
 }
 
 func (s *SQLiteStore) GetLastOperation() (*Operation, error) {
 	var op Operation
 	var meta sql.NullString
 	err := s.db.QueryRow(
 		"SELECT seq, kind, timestamp, before, after, metadata FROM operations ORDER BY seq DESC LIMIT 1",
 	).Scan(&op.Seq, &op.Kind, &op.Timestamp, &op.Before, &op.After, &meta)
 	if errors.Is(err, sql.ErrNoRows) {
 		return nil, nil
 	}
 	if err != nil {
 		return nil, err
 	}
 	op.Metadata = meta.String
 	return &op, nil
 }
 
 func isSQLiteConstraintError(err error) bool {
 	if err == nil {
 		return false
 	}
 	return strings.Contains(err.Error(), "UNIQUE constraint failed") ||
 		strings.Contains(err.Error(), "constraint failed")
 }
 
 func nullableString(s string) interface{} {
 	if s == "" {
 		return nil
 	}
 	return s
 }
 
 func (s *SQLiteStore) AcquireLock(tx *Tx, path, owner, comment string) error {
 	var existingOwner string
 	err := tx.sqlTx.QueryRow("SELECT owner FROM file_locks WHERE path = ?", path).Scan(&existingOwner)
 	if err == nil && existingOwner != owner {
 		return fmt.Errorf("file %q is locked by %q", path, existingOwner)
 	}
 	_, err = tx.sqlTx.Exec(`
 		INSERT INTO file_locks (path, owner, acquired_at, comment) VALUES (?, ?, ?, ?)
 		ON CONFLICT(path) DO UPDATE SET acquired_at = excluded.acquired_at, comment = excluded.comment`,
 		path, owner, time.Now().Unix(), comment,
 	)
 	return err
 }
 
 func (s *SQLiteStore) ReleaseLock(tx *Tx, path, owner string) error {
 	res, err := tx.sqlTx.Exec("DELETE FROM file_locks WHERE path = ? AND owner = ?", path, owner)
 	if err != nil {
 		return err
 	}
 	n, _ := res.RowsAffected()
 	if n == 0 {
 		return fmt.Errorf("lock on %q is not held by %q", path, owner)
 	}
 	return nil
 }
 
 func (s *SQLiteStore) ReleaseLockAdmin(tx *Tx, path string) error {
 	_, err := tx.sqlTx.Exec("DELETE FROM file_locks WHERE path = ?", path)
 	return err
 }
 
 func (s *SQLiteStore) GetLock(path string) (*FileLock, error) {
 	var l FileLock
 	var comment sql.NullString
 	err := s.db.QueryRow(
 		"SELECT path, owner, acquired_at, comment FROM file_locks WHERE path = ?", path,
 	).Scan(&l.Path, &l.Owner, &l.AcquiredAt, &comment)
 	if errors.Is(err, sql.ErrNoRows) {
 		return nil, nil
 	}
 	if err != nil {
 		return nil, err
 	}
 	l.Comment = comment.String
 	return &l, nil
 }
 
 func (s *SQLiteStore) ListLocks() ([]FileLock, error) {
 	rows, err := s.db.Query(
 		"SELECT path, owner, acquired_at, comment FROM file_locks ORDER BY acquired_at DESC",
 	)
 	if err != nil {
 		return nil, err
 	}
 	defer rows.Close()
 	var out []FileLock
 	for rows.Next() {
 		var l FileLock
 		var comment sql.NullString
 		if err := rows.Scan(&l.Path, &l.Owner, &l.AcquiredAt, &comment); err != nil {
 			return nil, err
 		}
 		l.Comment = comment.String
 		out = append(out, l)
 	}
 	return out, rows.Err()
 }
 
 func (s *SQLiteStore) TrainAndSaveDict() error {
 	rows, err := s.db.Query(
 		"SELECT data FROM objects WHERE kind = 'blob' ORDER BY RANDOM() LIMIT 200",
 	)
 	if err != nil {
 		return fmt.Errorf("sample blobs: %w", err)
 	}
 	defer rows.Close()
 
 	var samples [][]byte
 	for rows.Next() {
 		var compressed []byte
 		if scanErr := rows.Scan(&compressed); scanErr != nil {
 			return scanErr
 		}
 		raw, decErr := s.codec.Decompress(compressed)
 		if decErr == nil && len(raw) > 0 {
 			samples = append(samples, raw)
 		}
 	}
 	if err := rows.Err(); err != nil {
 		return err
 	}
 	if len(samples) < 5 {
 		return fmt.Errorf("not enough blobs to train dictionary (need at least 5, found %d)", len(samples))
 	}
 
 	dict, err := zstd.BuildDict(zstd.BuildDictOptions{
 		Contents: samples,
 	})
 	if err != nil {
 		return fmt.Errorf("build dict: %w", err)
 	}
 
 	if _, err := s.db.Exec(
 		"INSERT INTO zstd_dicts (created_at, dict) VALUES (?, ?)",
 		time.Now().Unix(), dict,
 	); err != nil {
 		return fmt.Errorf("save dict: %w", err)
 	}
 
 	s.codec.Close()
 	newCD, err := newCodec(s.compressionName, dict)
 	if err != nil {
 		return fmt.Errorf("reload codec: %w", err)
 	}
 	s.codec = newCD
 	return nil
 }

internal/store/store.go [M]
--- a/internal/store/store.go
+++ b/internal/store/store.go
@@ -1,102 +1,103 @@
 package store
 
 import (
 	"database/sql"
 
 	"arche/internal/object"
 )
 
 type Bookmark struct {
 	Name     string
 	CommitID [32]byte
 	Remote   string
 }
 
 type WCacheEntry struct {
 	Path    string
 	Inode   uint64
 	MtimeNs int64
 	Size    int64
 	BlobID  [32]byte
 	Mode    uint8 // object.EntryMode value (0=file,1=exec,2=symlink,3=dir)
 	Dirty   bool  // set by watcher; cleared by snap/status after processing
 }
 
 type Operation struct {
 	Seq       int64
 	Kind      string
 	Timestamp int64
 	Before    string
 	After     string
 	Metadata  string
 }
 
 type Tx struct {
 	sqlTx *sql.Tx
 }
 
 func (t *Tx) SQLTx() *sql.Tx { return t.sqlTx }
 
 type Store interface {
 	HasObject(id [32]byte) (bool, error)
 	ReadObject(id [32]byte) (kind string, raw []byte, err error)
 	WriteObject(tx *Tx, id [32]byte, kind string, raw []byte) error
 	ListObjectsByKind(kind string) ([][32]byte, error)
 
 	GetBookmark(name string) (*Bookmark, error)
 	SetBookmark(tx *Tx, b Bookmark) error
 	DeleteBookmark(tx *Tx, name string) error
 	ListBookmarks() ([]Bookmark, error)
 
 	GetPhase(commitID [32]byte) (object.Phase, error)
 	SetPhase(tx *Tx, commitID [32]byte, phase object.Phase) error
+	ListPublicCommitIDs() ([][32]byte, error)
 
 	AllocChangeID(tx *Tx) (string, error)
 	GetChangeCommit(changeID string) ([32]byte, error)
 	SetChangeCommit(tx *Tx, changeID string, commitID [32]byte) error
 	ListChanges() ([]Bookmark, error)
 
 	GetWCacheEntry(path string) (*WCacheEntry, error)
 	SetWCacheEntry(tx *Tx, e WCacheEntry) error
 	DeleteWCacheEntry(tx *Tx, path string) error
 	ListWCacheEntries() ([]WCacheEntry, error)
 	ClearWCache(tx *Tx) error
 	MarkWCacheDirty(path string) error
 	ListDirtyWCacheEntries() ([]WCacheEntry, error)
 	ClearWCacheDirtyFlags(tx *Tx) error
 
 	InsertOperation(tx *Tx, op Operation) (int64, error)
 	ListOperations(n int) ([]Operation, error)
 	GetOperation(seq int64) (*Operation, error)
 	GetLastOperation() (*Operation, error)
 
 	AddConflict(tx *Tx, path string) error
 	ClearConflict(tx *Tx, path string) error
 	ClearAllConflicts(tx *Tx) error
 	ListConflicts() ([]string, error)
 	ListSecretCommitIDs() ([][32]byte, error)
 
 	Begin() (*Tx, error)
 	Commit(tx *Tx) error
 	Rollback(tx *Tx) error
 	Close() error
 }
 
 type FileLock struct {
 	Path       string
 	Owner      string
 	AcquiredAt int64
 	Comment    string
 }
 
 type LockStore interface {
 	AcquireLock(tx *Tx, path, owner, comment string) error
 	ReleaseLock(tx *Tx, path, owner string) error
 	ReleaseLockAdmin(tx *Tx, path string) error
 	GetLock(path string) (*FileLock, error)
 	ListLocks() ([]FileLock, error)
 }
 
 type DictTrainer interface {
 	TrainAndSaveDict() error
 }

internal/wc/wc.go [M]
--- a/internal/wc/wc.go
+++ b/internal/wc/wc.go
@@ -1,1030 +1,1032 @@
 package wc
 
 import (
 	"encoding/json"
 	"fmt"
 	"io/fs"
 	"os"
 	"path/filepath"
 	"sort"
 	"strings"
 	"syscall"
 	"time"
 
 	"arche/internal/merge"
 	"arche/internal/object"
 	"arche/internal/repo"
 	"arche/internal/store"
 	"arche/internal/watcher"
 )
 
 func dirtySet(r *repo.Repo) (map[string]bool, error) {
 	if !watcher.IsActive(r.ArcheDir()) {
 		return nil, nil
 	}
 	entries, err := r.Store.ListDirtyWCacheEntries()
 	if err != nil {
 		return nil, err
 	}
 	m := make(map[string]bool, len(entries))
 	for _, e := range entries {
 		m[e.Path] = true
 	}
 	return m, nil
 }
 
 type FileStatus struct {
 	Path   string
 	Status rune
 }
 
 type WC struct {
 	Repo    
+       
 *repo.Repo
 	SignKey 
-string
+       string
+	NoAutoAdvance  bool
+	AuthorOverride *object.Signature
 
 }
 
 func New(r *repo.Repo) *WC { return &WC{Repo: r} }
 
 func (wc *WC) maybeSign(c *object.Commit) error {
 	if wc.SignKey == "" {
 		return nil
 	}
 	body := object.CommitBodyForSigning(c)
 	sig, _, err := object.SignCommitBody(body, wc.SignKey)
 	if err != nil {
 		return fmt.Errorf("commit signing: %w", err)
 	}
 	c.CommitSig = sig
 	return nil
 }
 
 func (wc *WC) snapshotIntoTx(tx *store.Tx, headCommit *object.Commit, paths []string, cacheMap map[string]store.WCacheEntry, dirty map[string]bool, message string, now time.Time) (*object.Commit, [32]byte, error) {
 	r := wc.Repo
 
 	var entries []fileEntry
 
 	if err := r.Store.ClearWCache(tx); err != nil {
 		return nil, object.ZeroID, fmt.Errorf("clear wcache: %w", err)
 	}
 
 	for _, rel := range paths {
 		if dirty != nil && !dirty[rel] {
 			if cached, ok := cacheMap[rel]; ok {
 				entries = append(entries, fileEntry{
 					path:   rel,
 					blobID: cached.BlobID,
 					mode:   object.EntryMode(cached.Mode),
 				})
 				if err := r.Store.SetWCacheEntry(tx, cached); err != nil {
 					return nil, object.ZeroID, fmt.Errorf("set wcache: %w", err)
 				}
 				continue
 			}
 		}
 
 		abs := filepath.Join(r.Root, rel)
 		info, err := os.Lstat(abs)
 		if err != nil {
 			continue
 		}
 
 		var blobID [32]byte
 		mode := fileMode(info)
 
 		if cached, ok := cacheMap[rel]; ok {
 			st := info.Sys().(*syscall.Stat_t)
 			inode := st.Ino
 			mtime := info.ModTime().UnixNano()
 			size := info.Size()
 			if cached.Inode == inode && cached.MtimeNs == mtime && cached.Size == size {
 				blobID = cached.BlobID
 			}
 		}
 
 		if blobID == object.ZeroID {
 			content, err := readFileContent(abs, info)
 			if err != nil {
 				return nil, object.ZeroID, err
 			}
 			id, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: content})
 			if err != nil {
 				return nil, object.ZeroID, err
 			}
 			blobID = id
 		}
 
 		st := info.Sys().(*syscall.Stat_t)
 		if err := r.Store.SetWCacheEntry(tx, store.WCacheEntry{
 			Path:    rel,
 			Inode:   st.Ino,
 			MtimeNs: info.ModTime().UnixNano(),
 			Size:    info.Size(),
 			BlobID:  blobID,
 			Mode:    uint8(mode),
 		}); err != nil {
 			return nil, object.ZeroID, fmt.Errorf("set wcache: %w", err)
 		}
 
 		entries = append(entries, fileEntry{path: rel, blobID: blobID, mode: mode})
 	}
 
 	tree, err := buildTree(r, tx, entries)
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	sig := object.Signature{
 		Name:      r.Cfg.User.Name,
 		Email:     r.Cfg.User.Email,
 		Timestamp: now,
 	}
 
 	c := &object.Commit{
 		TreeID:    tree,
 		Parents:   headCommit.Parents,
 		ChangeID:  headCommit.ChangeID,
 		Author:    headCommit.Author,
 		Committer: sig,
 		Message:   message,
 		Phase:     headCommit.Phase,
 	}
 	if headCommit.Author.Timestamp.IsZero() {
 		c.Author = sig
 	}
 
 	if err := wc.maybeSign(c); err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	commitID, err := repo.WriteCommitTx(r.Store, tx, c)
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 	if err := r.Store.SetChangeCommit(tx, c.ChangeID, commitID); err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	return c, commitID, nil
 }
 
 func (wc *WC) snapshotInput() (paths []string, cacheMap map[string]store.WCacheEntry, dirty map[string]bool, err error) {
 	r := wc.Repo
 
 	cacheEntries, err := r.Store.ListWCacheEntries()
 	if err != nil {
 		return nil, nil, nil, err
 	}
 	cacheMap = make(map[string]store.WCacheEntry, len(cacheEntries))
 	for _, e := range cacheEntries {
 		cacheMap[e.Path] = e
 	}
 
 	dirty, _ = dirtySet(r)
 
 	if dirty != nil {
 		seen := make(map[string]bool, len(cacheMap)+len(dirty))
 		for p := range cacheMap {
 			seen[p] = true
 			paths = append(paths, p)
 		}
 		for p := range dirty {
 			if !seen[p] {
 				paths = append(paths, p)
 			}
 		}
 	} else {
 		paths, err = wc.trackedPaths()
 		if err != nil {
 			return nil, nil, nil, err
 		}
 	}
 
 	return paths, cacheMap, dirty, nil
 }
 
 func (wc *WC) Snapshot(message string) (*object.Commit, [32]byte, error) {
 	r := wc.Repo
 	now := time.Now()
 
 	head, _, err := r.HeadCommit()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	paths, cacheMap, dirty, err := wc.snapshotInput()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	tx, err := r.Store.Begin()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	c, commitID, err := wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
 	if err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 	if err := r.Store.Commit(tx); err != nil {
 		return nil, object.ZeroID, err
 	}
 	return c, commitID, nil
 }
 
 func (wc *WC) Snap(message string) (*object.Commit, [32]byte, error) {
 	r := wc.Repo
 	now := time.Now()
 
 	before, err := r.CaptureRefState()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	statusBefore, err := wc.Status()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 	diffPaths := make(map[string]bool, len(statusBefore))
 	for _, fsEntry := range statusBefore {
 		diffPaths[fsEntry.Path] = true
 	}
 
 	useRestrictedPaths := len(r.Cfg.Hooks.PreSnap) > 0
 	if useRestrictedPaths {
 		if err := RunHooksSequential(r.Root, "pre-snap", r.Cfg.Hooks.PreSnap); err != nil {
 			return nil, object.ZeroID, fmt.Errorf("pre-snap hook failed: %w", err)
 		}
 	}
 
 	head, oldHeadID, err := r.HeadCommit()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	type snapshotFn func(tx *store.Tx) (*object.Commit, [32]byte, error)
 	var doSnapshot snapshotFn
 
 	if useRestrictedPaths {
 		headBlobs := make(map[string][32]byte)
 		headModes := make(map[string]object.EntryMode)
 		if err := flattenTree(r, head.TreeID, "", headBlobs); err != nil {
 			return nil, object.ZeroID, err
 		}
 		if err := flattenTreeModes(r, head.TreeID, "", headModes); err != nil {
 			return nil, object.ZeroID, err
 		}
 		doSnapshot = func(tx *store.Tx) (*object.Commit, [32]byte, error) {
 			return wc.snapshotRestrictedPathsIntoTx(tx, head, headBlobs, headModes, diffPaths, message, now)
 		}
 	} else {
 		paths, cacheMap, dirty, err := wc.snapshotInput()
 		if err != nil {
 			return nil, object.ZeroID, err
 		}
 		doSnapshot = func(tx *store.Tx) (*object.Commit, [32]byte, error) {
 			return wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
 		}
 	}
 
 	existingBookmarks, _ := r.Store.ListBookmarks()
 
 	tx, err := r.Store.Begin()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	snapped, snappedID, err := doSnapshot(tx)
 	if err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	if snappedID != oldHeadID {
 		for _, bm := range existingBookmarks {
 			if bm.CommitID == oldHeadID {
 				_ = r.Store.SetBookmark(tx, store.Bookmark{
 					Name:     bm.Name,
 					CommitID: snappedID,
 					Remote:   bm.Remote,
 				})
 			}
 		}
 	}
 
 	newChangeID, err := r.Store.AllocChangeID(tx)
 	if err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
 	newDraft := &object.Commit{
 		TreeID:    snapped.TreeID,
 		Parents:   [][32]byte{snappedID},
 		ChangeID:  newChangeID,
 		Author:    sig,
 		Committer: sig,
 		Message:   "",
 		Phase:     object.PhaseDraft,
 	}
 
 	newDraftID, err := repo.WriteCommitTx(r.Store, tx, newDraft)
 	if err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	if err := r.Store.SetChangeCommit(tx, newChangeID, newDraftID); err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	after := buildRefState(snappedID, object.FormatChangeID(newChangeID))
 	op := store.Operation{
 		Kind:      "snap",
 		Timestamp: now.Unix(),
 		Before:    before,
 		After:     after,
 		Metadata:  "'" + firstLine(snapped.Message) + "'",
 	}
 	if _, err := r.Store.InsertOperation(tx, op); err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	if err := r.Store.Commit(tx); err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	if err := r.WriteHead(object.FormatChangeID(newChangeID)); err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	if len(r.Cfg.Hooks.PostSnap) > 0 {
 		if err := RunHooksSequential(r.Root, "post-snap", r.Cfg.Hooks.PostSnap); err != nil {
 			fmt.Fprintf(os.Stderr, "arche snap: post-snap hook: %v\n", err)
 		}
 	}
 
 	return snapped, snappedID, nil
 }
 
 func (wc *WC) Status() ([]FileStatus, error) {
 	r := wc.Repo
 	head, _, err := r.HeadCommit()
 	if err != nil {
 		return nil, err
 	}
 
 	headFiles := make(map[string][32]byte)
 	if err := flattenTree(r, head.TreeID, "", headFiles); err != nil {
 		return nil, err
 	}
 
 	wcPaths, err := wc.trackedPaths()
 	if err != nil {
 		return nil, err
 	}
 	wcSet := make(map[string]bool, len(wcPaths))
 	for _, p := range wcPaths {
 		wcSet[p] = true
 	}
 
 	cacheEntries, _ := r.Store.ListWCacheEntries()
 	cacheMap := make(map[string]store.WCacheEntry, len(cacheEntries))
 	for _, e := range cacheEntries {
 		cacheMap[e.Path] = e
 	}
 	dirty, _ := dirtySet(r)
 
 	var out []FileStatus
 
 	for path, headBlobID := range headFiles {
 		if !wcSet[path] {
 			out = append(out, FileStatus{Path: path, Status: 'D'})
 			continue
 		}
 
 		if dirty != nil && !dirty[path] {
 			if cached, ok := cacheMap[path]; ok {
 				if cached.BlobID != headBlobID {
 					out = append(out, FileStatus{Path: path, Status: 'M'})
 				}
 				continue
 			}
 		}
 
 		curBlobID, err := wc.blobIDForPath(path)
 		if err != nil {
 			continue
 		}
 		if curBlobID != headBlobID {
 			out = append(out, FileStatus{Path: path, Status: 'M'})
 		}
 	}
 
 	ignore, _ := loadIgnore(r.Root)
 	for _, path := range wcPaths {
 		if _, inHead := headFiles[path]; !inHead {
 			if ignore.Match(path) {
 				continue
 			}
 			out = append(out, FileStatus{Path: path, Status: 'A'})
 		}
 	}
 
 	sort.Slice(out, func(i, j int) bool { return out[i].Path < out[j].Path })
 	return out, nil
 }
 
 func (wc *WC) materializeDisk(treeID [32]byte) (map[string][32]byte, map[string]object.EntryMode, error) {
 	r := wc.Repo
 
 	wantFiles := make(map[string][32]byte)
 	wantMode := make(map[string]object.EntryMode)
 	if err := flattenTree(r, treeID, "", wantFiles); err != nil {
 		return nil, nil, err
 	}
 
 	if err := flattenTreeModes(r, treeID, "", wantMode); err != nil {
 		return nil, nil, err
 	}
 
 	ignore, _ := loadIgnore(r.Root)
 	err := filepath.WalkDir(r.Root, func(path string, d fs.DirEntry, err error) error {
 		if err != nil {
 			return nil
 		}
 		rel, _ := filepath.Rel(r.Root, path)
 		if rel == "." {
 			return nil
 		}
 		if d.IsDir() {
 			if rel == archeDirName || strings.HasPrefix(rel, archeDirName+string(os.PathSeparator)) {
 				return filepath.SkipDir
 			}
 			return nil
 		}
 		if ignore.Match(rel) {
 			return nil
 		}
 		if _, ok := wantFiles[rel]; !ok {
 			return os.Remove(path)
 		}
 		return nil
 	})
 	if err != nil {
 		return nil, nil, err
 	}
 
 	var conflictPaths []string
 	for relPath, blobID := range wantFiles {
 		abs := filepath.Join(r.Root, relPath)
 		if err := os.MkdirAll(filepath.Dir(abs), 0o755); err != nil {
 			return nil, nil, err
 		}
 		content, err := r.ReadBlob(blobID)
 		if err != nil {
 			if conf, cErr := r.ReadConflict(blobID); cErr == nil {
 				content = renderConflictMarkers(r, conf)
 				conflictPaths = append(conflictPaths, relPath)
 				err = nil
 			}
 		}
 		if err != nil {
 			return nil, nil, err
 		}
 		perm := fs.FileMode(0o644)
 		if wantMode[relPath] == object.ModeExec {
 			perm = 0o755
 		}
 		if err := os.WriteFile(abs, content, perm); err != nil {
 			return nil, nil, err
 		}
 	}
 
 	for _, p := range conflictPaths {
 		delete(wantFiles, p)
 	}
 
 	return wantFiles, wantMode, nil
 }
 
 func renderConflictMarkers(r *repo.Repo, conf *object.Conflict) []byte {
 	readStr := func(id [32]byte) string {
 		if id == object.ZeroID {
 			return ""
 		}
 		b, _ := r.ReadBlob(id)
 		return string(b)
 	}
 	nl := func(s string) string {
 		if len(s) > 0 && s[len(s)-1] != '\n' {
 			return s + "\n"
 		}
 		return s
 	}
 	if conf.Ours.BlobID == object.ZeroID {
 		return []byte(fmt.Sprintf("<<<<<<< ours\n(deleted)\n=======\n%s>>>>>>> theirs\n", nl(readStr(conf.Theirs.BlobID))))
 	}
 	if conf.Theirs.BlobID == object.ZeroID {
 		return []byte(fmt.Sprintf("<<<<<<< ours\n%s=======\n(deleted)\n>>>>>>> theirs\n", nl(readStr(conf.Ours.BlobID))))
 	}
 	return []byte(fmt.Sprintf("<<<<<<< ours\n%s=======\n%s>>>>>>> theirs\n",
 		nl(readStr(conf.Ours.BlobID)),
 		nl(readStr(conf.Theirs.BlobID))))
 }
 
 func (wc *WC) populateWCacheInTx(tx *store.Tx, wantFiles map[string][32]byte) error {
 	r := wc.Repo
 	if err := r.Store.ClearWCache(tx); err != nil {
 		return err
 	}
 	for relPath, blobID := range wantFiles {
 		abs := filepath.Join(r.Root, relPath)
 		info, err := os.Lstat(abs)
 		if err != nil {
 			continue
 		}
 		st, ok := info.Sys().(*syscall.Stat_t)
 		if !ok {
 			continue
 		}
 		_ = r.Store.SetWCacheEntry(tx, store.WCacheEntry{
 			Path:    relPath,
 			Inode:   st.Ino,
 			MtimeNs: info.ModTime().UnixNano(),
 			Size:    info.Size(),
 			BlobID:  blobID,
 			Mode:    uint8(fileMode(info)),
 		})
 	}
 	return nil
 }
 
 func (wc *WC) MaterializeQuiet(treeID [32]byte) error {
 	r := wc.Repo
 
 	wantFiles, _, err := wc.materializeDisk(treeID)
 	if err != nil {
 		return err
 	}
 
 	tx, err := r.Store.Begin()
 	if err != nil {
 		return err
 	}
 	if err := wc.populateWCacheInTx(tx, wantFiles); err != nil {
 		r.Store.Rollback(tx)
 		return err
 	}
 	return r.Store.Commit(tx)
 }
 
 func (wc *WC) Materialize(treeID [32]byte, newChangeID string) error {
 	r := wc.Repo
 
 	before, _ := r.CaptureRefState()
 	now := time.Now()
 
 	wantFiles, _, err := wc.materializeDisk(treeID)
 	if err != nil {
 		return err
 	}
 
 	bare := object.StripChangeIDPrefix(newChangeID)
 	commitID, _ := r.Store.GetChangeCommit(bare)
 	after := buildRefState(commitID, newChangeID)
 
 	tx, err := r.Store.Begin()
 	if err != nil {
 		return err
 	}
 	if err := wc.populateWCacheInTx(tx, wantFiles); err != nil {
 		r.Store.Rollback(tx)
 		return err
 	}
 
 	op := store.Operation{
 		Kind:      "co",
 		Timestamp: now.Unix(),
 		Before:    before,
 		After:     after,
 		Metadata:  "checked out " + newChangeID,
 	}
 	if _, err := r.Store.InsertOperation(tx, op); err != nil {
 		r.Store.Rollback(tx)
 		return err
 	}
 
 	return r.Store.Commit(tx)
 }
 
 const archeDirName = ".arche"
 
 func (wc *WC) trackedPaths() ([]string, error) {
 	r := wc.Repo
 	ignore, _ := loadIgnore(r.Root)
 
 	var paths []string
 	err := filepath.WalkDir(r.Root, func(path string, d fs.DirEntry, err error) error {
 		if err != nil {
 			return nil
 		}
 		rel, _ := filepath.Rel(r.Root, path)
 		if rel == "." {
 			return nil
 		}
 		if d.IsDir() {
 			if rel == archeDirName || strings.HasPrefix(rel, archeDirName+string(os.PathSeparator)) {
 				return filepath.SkipDir
 			}
 			if ignore.MatchDir(rel) {
 				return filepath.SkipDir
 			}
 			return nil
 		}
 		if ignore.Match(rel) {
 			return nil
 		}
 		paths = append(paths, filepath.ToSlash(rel))
 		return nil
 	})
 	return paths, err
 }
 
 func (wc *WC) blobIDForPath(rel string) ([32]byte, error) {
 	r := wc.Repo
 	abs := filepath.Join(r.Root, rel)
 	info, err := os.Lstat(abs)
 	if err != nil {
 		return object.ZeroID, err
 	}
 	st := info.Sys().(*syscall.Stat_t)
 
 	if cached, _ := r.Store.GetWCacheEntry(rel); cached != nil {
 		if cached.Inode == st.Ino &&
 			cached.MtimeNs == info.ModTime().UnixNano() &&
 			cached.Size == info.Size() {
 			return cached.BlobID, nil
 		}
 	}
 
 	content, err := readFileContent(abs, info)
 	if err != nil {
 		return object.ZeroID, err
 	}
 	b := &object.Blob{Content: content}
 	return object.HashBlob(b), nil
 }
 
 func flattenTree(r *repo.Repo, treeID [32]byte, prefix string, out map[string][32]byte) error {
 	if treeID == object.ZeroID {
 		return nil
 	}
 	t, err := r.ReadTree(treeID)
 	if err != nil {
 		return err
 	}
 	for _, e := range t.Entries {
 		rel := join(prefix, e.Name)
 		switch e.Mode {
 		case object.ModeDir:
 			if err := flattenTree(r, e.ObjectID, rel, out); err != nil {
 				return err
 			}
 		default:
 			out[rel] = e.ObjectID
 		}
 	}
 	return nil
 }
 
 func flattenTreeModes(r *repo.Repo, treeID [32]byte, prefix string, out map[string]object.EntryMode) error {
 	if treeID == object.ZeroID {
 		return nil
 	}
 	t, err := r.ReadTree(treeID)
 	if err != nil {
 		return err
 	}
 	for _, e := range t.Entries {
 		rel := join(prefix, e.Name)
 		switch e.Mode {
 		case object.ModeDir:
 			if err := flattenTreeModes(r, e.ObjectID, rel, out); err != nil {
 				return err
 			}
 		default:
 			out[rel] = e.Mode
 		}
 	}
 	return nil
 }
 
 type fileEntry struct {
 	path   string
 	blobID [32]byte
 	mode   object.EntryMode
 }
 
 func buildTree(r *repo.Repo, tx *store.Tx, entries []fileEntry) ([32]byte, error) {
 	type node struct {
 		isFile   bool
 		blobID   [32]byte
 		mode     object.EntryMode
 		children map[string]*node
 	}
 	root := &node{children: make(map[string]*node)}
 
 	for _, e := range entries {
 		parts := strings.Split(e.path, "/")
 		cur := root
 		for i, part := range parts {
 			if i == len(parts)-1 {
 				cur.children[part] = &node{isFile: true, blobID: e.blobID, mode: e.mode}
 			} else {
 				if _, ok := cur.children[part]; !ok {
 					cur.children[part] = &node{children: make(map[string]*node)}
 				}
 				cur = cur.children[part]
 			}
 		}
 	}
 
 	var writeNode func(n *node) ([32]byte, error)
 	writeNode = func(n *node) ([32]byte, error) {
 		var treeEntries []object.TreeEntry
 		for name, child := range n.children {
 			if child.isFile {
 				treeEntries = append(treeEntries, object.TreeEntry{
 					Name:     name,
 					Mode:     child.mode,
 					ObjectID: child.blobID,
 				})
 			} else {
 				subID, err := writeNode(child)
 				if err != nil {
 					return object.ZeroID, err
 				}
 				treeEntries = append(treeEntries, object.TreeEntry{
 					Name:     name,
 					Mode:     object.ModeDir,
 					ObjectID: subID,
 				})
 			}
 		}
 		sort.Slice(treeEntries, func(i, j int) bool { return treeEntries[i].Name < treeEntries[j].Name })
 		t := &object.Tree{Entries: treeEntries}
 		id, err := repo.WriteTreeTx(r.Store, tx, t)
 		return id, err
 	}
 
 	return writeNode(root)
 }
 
 func fileMode(info os.FileInfo) object.EntryMode {
 	if info.Mode()&0o111 != 0 {
 		return object.ModeExec
 	}
 	if info.Mode()&os.ModeSymlink != 0 {
 		return object.ModeSymlink
 	}
 	return object.ModeFile
 }
 
 func readFileContent(abs string, info os.FileInfo) ([]byte, error) {
 	if info.Mode()&os.ModeSymlink != 0 {
 		target, err := os.Readlink(abs)
 		if err != nil {
 			return nil, err
 		}
 		return []byte(target), nil
 	}
 	return os.ReadFile(abs)
 }
 
 func join(prefix, name string) string {
 	if prefix == "" {
 		return name
 	}
 	return prefix + "/" + name
 }
 
 func buildRefState(commitID [32]byte, changeID string) string {
 	m := map[string]string{
 		"head": changeID,
 		"tip":  fmt.Sprintf("%x", commitID),
 	}
 	b, _ := json.Marshal(m)
 	return string(b)
 }
 
 func firstLine(s string) string {
 	if i := strings.IndexByte(s, '\n'); i >= 0 {
 		return s[:i]
 	}
 	return s
 }
 
 func (wc *WC) Amend(message string) (*object.Commit, [32]byte, error) {
 	r := wc.Repo
 	now := time.Now()
 
 	head, oldHeadID, err := r.HeadCommit()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 	if head.Phase == object.PhasePublic {
 		return nil, object.ZeroID, fmt.Errorf("cannot amend a public commit; use --force-rewrite if you are sure")
 	}
 
 	before, err := r.CaptureRefState()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	if message == "" {
 		message = head.Message
 	}
 
 	paths, cacheMap, dirty, err := wc.snapshotInput()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	tx, err := r.Store.Begin()
 	if err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	amended, amendedID, err := wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
 	if err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	if oldHeadID != amendedID {
 		obs := &object.ObsoleteMarker{
 			Predecessor: oldHeadID,
 			Successors:  [][32]byte{amendedID},
 			Reason:      "amend",
 			Timestamp:   now.Unix(),
 		}
 		if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
 			r.Store.Rollback(tx)
 			return nil, object.ZeroID, err
 		}
 	}
 
 	after := buildRefState(amendedID, object.FormatChangeID(amended.ChangeID))
 	op := store.Operation{
 		Kind:      "amend",
 		Timestamp: now.Unix(),
 		Before:    before,
 		After:     after,
 		Metadata:  "'" + firstLine(amended.Message) + "'",
 	}
 	if _, err := r.Store.InsertOperation(tx, op); err != nil {
 		r.Store.Rollback(tx)
 		return nil, object.ZeroID, err
 	}
 
 	if err := r.Store.Commit(tx); err != nil {
 		return nil, object.ZeroID, err
 	}
 
 	if oldHeadID != amendedID {
 		if err := wc.autoRebaseDownstream(oldHeadID, amendedID, head.ChangeID, now); err != nil {
 			fmt.Fprintf(os.Stderr, "arche: warning: downstream rebase failed: %v\n", err)
 		}
 	}
 
 	return amended, amendedID, nil
 }
 
 func (wc *WC) autoRebaseDownstream(oldParentID, newParentID [32]byte, headChangeID string, now time.Time) error {
 	r := wc.Repo
 
 	allChanges, err := r.Store.ListChanges()
 	if err != nil {
 		return err
 	}
 
 	type draftEntry struct {
 		id       [32]byte
 		changeID string
 		commit   *object.Commit
 	}
 
 	children := make(map[[32]byte][]draftEntry)
 	for _, ch := range allChanges {
 		if ch.CommitID == object.ZeroID {
 			continue
 		}
 		c, err := r.ReadCommit(ch.CommitID)
 		if err != nil || c == nil {
 			continue
 		}
 		if c.Phase != object.PhaseDraft {
 			continue
 		}
 		if c.ChangeID == headChangeID {
 			continue
 		}
 		if len(c.Parents) == 0 {
 			continue
 		}
 		d := draftEntry{id: ch.CommitID, changeID: ch.Name, commit: c}
 		children[c.Parents[0]] = append(children[c.Parents[0]], d)
 	}
 
 	type rebaseTask struct {
 		entry     draftEntry
 		newParent [32]byte
 	}
 	var tasks []rebaseTask
 	queue := []struct {
 		oldID [32]byte
 		newID [32]byte
 	}{{oldParentID, newParentID}}
 
 	for len(queue) > 0 {
 		cur := queue[0]
 		queue = queue[1:]
 		for _, child := range children[cur.oldID] {
 			tasks = append(tasks, rebaseTask{entry: child, newParent: cur.newID})
 			queue = append(queue, struct{ oldID, newID [32]byte }{child.id, child.id})
 		}
 	}
 
 	remapped := map[[32]byte][32]byte{oldParentID: newParentID}
 
 	for _, task := range tasks {
 		oldFirst := task.entry.commit.Parents[0]
 		newParent, ok := remapped[oldFirst]
 		if !ok {
 			newParent = oldFirst
 		}
 
 		var baseTreeID [32]byte
 		if pc, err2 := r.ReadCommit(oldFirst); err2 == nil {
 			baseTreeID = pc.TreeID
 		}
 		newParentCommit, err := r.ReadCommit(newParent)
 		if err != nil {
 			return fmt.Errorf("read new parent for %s: %w", object.FormatChangeID(task.entry.changeID), err)
 		}
 
 		result, err := merge.Trees(r, baseTreeID, task.entry.commit.TreeID, newParentCommit.TreeID)
 		if err != nil {
 			return fmt.Errorf("merge for %s: %w", object.FormatChangeID(task.entry.changeID), err)
 		}
 
 		newCommit := &object.Commit{
 			TreeID:    result.TreeID,
 			Parents:   [][32]byte{newParent},
 			ChangeID:  task.entry.changeID,
 			Author:    task.entry.commit.Author,
 			Committer: object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now},
 			Message:   task.entry.commit.Message,
 			Phase:     task.entry.commit.Phase,
 		}
 
 		tx, err := r.Store.Begin()
 		if err != nil {
 			return err
 		}
 		newCommitID, err := repo.WriteCommitTx(r.Store, tx, newCommit)
 		if err != nil {
 			r.Store.Rollback(tx)
 			return err
 		}
 		if err := r.Store.SetChangeCommit(tx, task.entry.changeID, newCommitID); err != nil {
 			r.Store.Rollback(tx)
 			return err
 		}
 		obs := &object.ObsoleteMarker{
 			Predecessor: task.entry.id,
 			Successors:  [][32]byte{newCommitID},
 			Reason:      "amend",
 			Timestamp:   now.Unix(),
 		}
 		if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
 			r.Store.Rollback(tx)
 			return err
 		}
 		if err := r.Store.Commit(tx); err != nil {
 			return err
 		}
 
 		remapped[task.entry.id] = newCommitID
 		conflictNote := ""
 		if len(result.Conflicts) > 0 {
 			conflictNote = fmt.Sprintf(" (%d conflict(s))", len(result.Conflicts))
 		}
 		fmt.Printf("  auto-rebased %s%s\n", object.FormatChangeID(task.entry.changeID), conflictNote)
 	}
 	return nil
 }