arche / commit

commit c3c5e8acf658238a113242337fe8a6386d1693fe2cb0b1f32998c84392825711
change vnahdsme
author dewn <dewn5228@proton.me>
committer dewn <dewn5228@proton.me>
date 2026-03-10 17:25:27
phase public
signature Unsigned
fix log and tree default to main bookmark on server
.archeignore [A]
--- /dev/null
+++ b/.archeignore
@@ -1,0 +1,3 @@
+.direnv
+bin/
+local/

.dockerignore [A]
--- /dev/null
+++ b/.dockerignore
@@ -1,0 +1,25 @@
+# Build artefacts
+bin/
+coverage/
+
+# Go test cache
+*.test
+
+# Development databases and repos
+*.db
+*.arche-bundle
+packs/
+
+# Editor/OS noise
+.idea/
+.vscode/
+*.swp
+*.swo
+.DS_Store
+
+# Nix
+.direnv/
+result
+
+# The deploy dir itself is mounted via volume, not baked into the image
+deploy/

.envrc [A]
--- /dev/null
+++ b/.envrc
@@ -1,0 +1,3 @@
+use flake
+watch_file flake.nix
+watch_file flake.lock

.golangci.yml [A]
--- /dev/null
+++ b/.golangci.yml
@@ -1,0 +1,8 @@
+version: "2"
+
+linters:
+  default: none
+  enable:
+    - ineffassign
+    - staticcheck
+    - unused

Dockerfile [A]
--- /dev/null
+++ b/Dockerfile
@@ -1,0 +1,29 @@
+FROM golang:1.25-bookworm AS builder
+
+RUN apt-get update && apt-get install -y --no-install-recommends gcc libc6-dev && rm -rf /var/lib/apt/lists/*
+
+WORKDIR /src
+
+COPY go.mod go.sum ./
+RUN go mod download
+
+COPY . .
+
+RUN CGO_ENABLED=1 go build -ldflags="-s -w" -o /out/arche-server ./cmd/arche-server
+
+FROM debian:bookworm-slim
+
+RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl && rm -rf /var/lib/apt/lists/*
+
+RUN useradd -r -u 1000 -m arche
+USER arche
+
+WORKDIR /app
+
+COPY --from=builder /out/arche-server /app/arche-server
+
+RUN mkdir -p /app/data
+
+EXPOSE 8080
+
+ENTRYPOINT ["/app/arche-server", "--config", "/app/config/server.toml"]

Makefile [A]
--- /dev/null
+++ b/Makefile
@@ -1,0 +1,140 @@
+BINARY     := arche
+SERVER     := arche-server
+CMD        := ./cmd/arche
+CMD_SRV    := ./cmd/arche-server
+BUILD_DIR  := ./bin
+COVER_DIR  := ./coverage
+
+CGO_ENABLED := 1
+GOFLAGS     :=
+LDFLAGS     := -ldflags="-s -w"
+
+TEST_FLAGS  := -race -count=1
+COVER_FLAGS := -coverprofile=$(COVER_DIR)/cover.out -covermode=atomic
+
+.DEFAULT_GOAL := help
+
+.PHONY: build
+build:
+	@mkdir -p $(BUILD_DIR)
+	CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) $(LDFLAGS) -o $(BUILD_DIR)/$(BINARY) $(CMD)
+	@echo "✓ Built $(BUILD_DIR)/$(BINARY)"
+	CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) $(LDFLAGS) -o $(BUILD_DIR)/$(SERVER) $(CMD_SRV)
+	@echo "✓ Built $(BUILD_DIR)/$(SERVER)"
+
+.PHONY: build-debug
+build-debug:
+	@mkdir -p $(BUILD_DIR)
+	CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) -gcflags="all=-N -l" -o $(BUILD_DIR)/$(BINARY)-debug $(CMD)
+	@echo "✓ Built $(BUILD_DIR)/$(BINARY)-debug"
+	CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) -gcflags="all=-N -l" -o $(BUILD_DIR)/$(SERVER)-debug $(CMD_SRV)
+	@echo "✓ Built $(BUILD_DIR)/$(SERVER)-debug"
+
+.PHONY: build-server
+build-server:
+	@mkdir -p $(BUILD_DIR)
+	CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) $(LDFLAGS) -o $(BUILD_DIR)/$(SERVER) $(CMD_SRV)
+	@echo "✓ Built $(BUILD_DIR)/$(SERVER)"
+
+.PHONY: build-client
+build-client:
+	@mkdir -p $(BUILD_DIR)
+	CGO_ENABLED=$(CGO_ENABLED) go build $(GOFLAGS) $(LDFLAGS) -o $(BUILD_DIR)/$(BINARY) $(CMD)
+	@echo "✓ Built $(BUILD_DIR)/$(BINARY)"
+
+.PHONY: install
+install:
+	CGO_ENABLED=$(CGO_ENABLED) go install $(GOFLAGS) $(LDFLAGS) $(CMD)
+	@echo "✓ Installed arche"
+
+.PHONY: run
+run: build
+	$(BUILD_DIR)/$(BINARY) $(ARGS)
+
+.PHONY: test
+test:
+	CGO_ENABLED=$(CGO_ENABLED) go test $(TEST_FLAGS) ./...
+
+.PHONY: test-short
+test-short:
+	CGO_ENABLED=$(CGO_ENABLED) go test $(TEST_FLAGS) -short ./...
+
+.PHONY: test-pkg
+test-pkg:
+	CGO_ENABLED=$(CGO_ENABLED) go test $(TEST_FLAGS) $(PKG)
+
+.PHONY: cover
+cover: 
+	@mkdir -p $(COVER_DIR)
+	CGO_ENABLED=$(CGO_ENABLED) go test $(TEST_FLAGS) $(COVER_FLAGS) ./...
+	go tool cover -html=$(COVER_DIR)/cover.out -o $(COVER_DIR)/cover.html
+	@echo "✓ Coverage report: $(COVER_DIR)/cover.html"
+
+.PHONY: cover-func
+cover-func:
+	@mkdir -p $(COVER_DIR)
+	CGO_ENABLED=$(CGO_ENABLED) go test $(TEST_FLAGS) $(COVER_FLAGS) ./...
+	go tool cover -func=$(COVER_DIR)/cover.out
+
+.PHONY: lint
+lint:
+	golangci-lint run ./...
+
+.PHONY: lint-fix
+lint-fix:
+	golangci-lint run --fix ./...
+
+.PHONY: fmt
+fmt:
+	gofumpt -w .
+	goimports -w .
+	djlint --profile go --reformat .
+	@echo "✓ Formatted all files"
+
+.PHONY: fmt-check
+fmt-check:
+	@out=$$(gofumpt -l .); \
+	if [ -n "$$out" ]; then \
+		echo "Unformatted files:"; echo "$$out"; exit 1; \
+	fi
+	djlint --profile go --check .
+	@echo "✓ All files formatted"
+
+.PHONY: vet
+vet:
+	CGO_ENABLED=$(CGO_ENABLED) go vet ./...
+
+.PHONY: generate
+generate:
+	CGO_ENABLED=$(CGO_ENABLED) go generate ./...
+
+.PHONY: tidy
+tidy:
+	go mod tidy
+	go mod verify
+
+.PHONY: deps
+deps:
+	go list -m -json all | jq -r 'select(.Indirect | not) | .Path + " " + .Version'
+
+.PHONY: db-inspect
+db-inspect:
+	sqlite3 $(or $(REPO),.)/arche/store.db
+
+.PHONY: db-schema
+db-schema:
+	sqlite3 $(or $(REPO),.)/arche/store.db ".schema"
+
+.PHONY: clean
+clean:
+	rm -rf $(BUILD_DIR) $(COVER_DIR)
+	@echo "✓ Cleaned"
+
+.PHONY: clean-all
+clean-all: clean
+	go clean -cache -testcache
+	@echo "✓ Cleaned build cache"
+
+.PHONY: ci
+ci: fmt-check vet lint test
+	@echo "✓ CI passed"

cmd/arche-server/main.go [A]
--- /dev/null
+++ b/cmd/arche-server/main.go
@@ -1,0 +1,15 @@
+package main
+
+import (
+	"fmt"
+	"os"
+
+	"arche/internal/archesrv"
+)
+
+func main() {
+	if err := archesrv.Run(); err != nil {
+		fmt.Fprintf(os.Stderr, "arche-server: %v\n", err)
+		os.Exit(1)
+	}
+}

cmd/arche/main.go [A]
--- /dev/null
+++ b/cmd/arche/main.go
@@ -1,0 +1,17 @@
+package main
+
+import (
+	"fmt"
+	"os"
+
+	"arche/internal/cli"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func main() {
+	if err := cli.Root.Execute(); err != nil {
+		fmt.Fprintln(os.Stderr, "error:", err)
+		os.Exit(1)
+	}
+}

deploy/server.toml [A]
--- /dev/null
+++ b/deploy/server.toml
@@ -1,0 +1,36 @@
+# arche-server configuration
+# This file is mounted read-only into the container at /app/config/server.toml.
+# Edit it and run `docker compose restart` to apply changes.
+
+[server]
+# HTTP listener. Required. The container exposes this on the host via
+# the "ports" mapping in docker-compose.yml.
+listen_http = ":8080"
+
+# Optional TLS. Uncomment and point at your certificate files
+# (mount them as extra volumes). When enabled, also expose port 8443.
+# listen_https = ":8443"
+# tls_cert     = "/app/config/tls/cert.pem"
+# tls_key      = "/app/config/tls/key.pem"
+
+# Optional SSH push access (arche+ssh://). Expose port 2222 in compose too.
+# listen_ssh = ":2222"
+
+[storage]
+# All data (server.db, repo objects, packs) lives here.
+# Backed by the "arche-data" Docker volume.
+data_dir = "/app/data"
+
+[auth]
+# Controls who can create accounts:
+#   "disabled" — admin creates accounts manually (good for personal forge)
+#   "invite"   — admin generates invite links
+#   "open"     — anyone can register (public forge)
+registration = "disabled"
+
+# Server-side shell hooks (admin only). Leave empty to disable.
+[hooks]
+# pre-receive  = "/app/hooks/secret-scan.sh"
+# update       = "/app/hooks/protect-main.sh"
+# post-receive = ""
+timeout_sec = 30

docker-compose.yml [A]
--- /dev/null
+++ b/docker-compose.yml
@@ -1,0 +1,27 @@
+services:
+  arche-server:
+    build:
+      context: .
+      dockerfile: Dockerfile
+    container_name: arche-server
+    restart: unless-stopped
+
+    ports:
+      - "8080:8080"
+
+    volumes:
+      - arche-data:/app/data
+      - ./deploy/server.toml:/app/config/server.toml:ro
+
+    environment:
+      - ARCHE_BASE_URL=${ARCHE_BASE_URL:-http://localhost:8080}
+
+    healthcheck:
+      test: ["CMD", "curl", "-fs", "http://localhost:8080/"]
+      interval: 30s
+      timeout: 5s
+      retries: 3
+      start_period: 10s
+
+volumes:
+  arche-data:

flake.lock [A]
--- /dev/null
+++ b/flake.lock
@@ -1,0 +1,61 @@
+{
+  "nodes": {
+    "flake-utils": {
+      "inputs": {
+        "systems": "systems"
+      },
+      "locked": {
+        "lastModified": 1731533236,
+        "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
+        "type": "github"
+      },
+      "original": {
+        "owner": "numtide",
+        "repo": "flake-utils",
+        "type": "github"
+      }
+    },
+    "nixpkgs": {
+      "locked": {
+        "lastModified": 1772773019,
+        "narHash": "sha256-E1bxHxNKfDoQUuvriG71+f+s/NT0qWkImXsYZNFFfCs=",
+        "owner": "NixOS",
+        "repo": "nixpkgs",
+        "rev": "aca4d95fce4914b3892661bcb80b8087293536c6",
+        "type": "github"
+      },
+      "original": {
+        "owner": "NixOS",
+        "ref": "nixos-unstable",
+        "repo": "nixpkgs",
+        "type": "github"
+      }
+    },
+    "root": {
+      "inputs": {
+        "flake-utils": "flake-utils",
+        "nixpkgs": "nixpkgs"
+      }
+    },
+    "systems": {
+      "locked": {
+        "lastModified": 1681028828,
+        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+        "owner": "nix-systems",
+        "repo": "default",
+        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+        "type": "github"
+      },
+      "original": {
+        "owner": "nix-systems",
+        "repo": "default",
+        "type": "github"
+      }
+    }
+  },
+  "root": "root",
+  "version": 7
+}

flake.nix [A]
--- /dev/null
+++ b/flake.nix
@@ -1,0 +1,41 @@
+{
+  description = "Arche VCS — development environment";
+
+  inputs = {
+    nixpkgs.url     = "github:NixOS/nixpkgs/nixos-unstable";
+    flake-utils.url = "github:numtide/flake-utils";
+  };
+
+  outputs = { self, nixpkgs, flake-utils }:
+    flake-utils.lib.eachDefaultSystem (system:
+      let
+        pkgs = nixpkgs.legacyPackages.${system};
+      in {
+        devShells.default = pkgs.mkShell {
+          name = "arche";
+
+          packages = with pkgs; [
+            go
+            gopls
+            gotools
+            golangci-lint
+            gofumpt
+
+            djlint
+            gnumake
+            git
+            just
+          ];
+
+          CGO_ENABLED = "1";
+
+          shellHook = ''
+            echo "Arche dev environment"
+            echo "   Go:     $(go version)"
+            echo "   SQLite: $(sqlite3 --version)"
+            echo ""
+          '';
+        };
+      }
+    );
+}

go.mod [A]
--- /dev/null
+++ b/go.mod
@@ -1,0 +1,45 @@
+module arche
+
+go 1.25.0
+
+require (
+	github.com/BurntSushi/toml v1.6.0
+	github.com/alecthomas/chroma/v2 v2.23.1
+	github.com/charmbracelet/bubbletea v1.3.10
+	github.com/charmbracelet/lipgloss v1.1.0
+	github.com/google/uuid v1.6.0
+	github.com/klauspost/compress v1.18.4
+	github.com/mattn/go-sqlite3 v1.14.34
+	github.com/sergi/go-diff v1.4.0
+	github.com/spf13/cobra v1.10.2
+	github.com/yuin/goldmark v1.7.16
+	github.com/zeebo/blake3 v0.2.4
+	golang.org/x/crypto v0.48.0
+)
+
+require (
+	github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
+	github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
+	github.com/charmbracelet/x/ansi v0.10.1 // indirect
+	github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
+	github.com/charmbracelet/x/term v0.2.1 // indirect
+	github.com/dlclark/regexp2 v1.11.5 // indirect
+	github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
+	github.com/fsnotify/fsnotify v1.9.0 // indirect
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/klauspost/cpuid/v2 v2.3.0 // indirect
+	github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+	github.com/mattn/go-isatty v0.0.20 // indirect
+	github.com/mattn/go-localereader v0.0.1 // indirect
+	github.com/mattn/go-runewidth v0.0.16 // indirect
+	github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
+	github.com/muesli/cancelreader v0.2.2 // indirect
+	github.com/muesli/termenv v0.16.0 // indirect
+	github.com/pierrec/lz4/v4 v4.1.26 // indirect
+	github.com/rivo/uniseg v0.4.7 // indirect
+	github.com/spf13/pflag v1.0.10 // indirect
+	github.com/stretchr/testify v1.9.0 // indirect
+	github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
+	golang.org/x/sys v0.42.0 // indirect
+	golang.org/x/text v0.34.0 // indirect
+)

go.sum [A]
--- /dev/null
+++ b/go.sum
@@ -1,0 +1,109 @@
+github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
+github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0=
+github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
+github.com/alecthomas/chroma/v2 v2.23.1 h1:nv2AVZdTyClGbVQkIzlDm/rnhk1E9bU9nXwmZ/Vk/iY=
+github.com/alecthomas/chroma/v2 v2.23.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o=
+github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs=
+github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
+github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
+github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
+github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
+github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
+github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
+github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
+github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ=
+github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
+github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
+github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
+github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
+github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
+github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
+github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
+github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
+github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk=
+github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
+github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
+github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
+github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
+github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
+github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
+github.com/pierrec/lz4/v4 v4.1.26 h1:GrpZw1gZttORinvzBdXPUXATeqlJjqUG/D87TKMnhjY=
+github.com/pierrec/lz4/v4 v4.1.26/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
+github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
+github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
+github.com/yuin/goldmark v1.7.16 h1:n+CJdUxaFMiDUNnWC3dMWCIQJSkxH4uz3ZwQBkAlVNE=
+github.com/yuin/goldmark v1.7.16/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
+github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
+github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
+github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
+github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
+golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
+golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
+golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
+golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
+golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
+golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
+golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
+golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

internal/archesrv/admin_test.go [A]
--- /dev/null
+++ b/internal/archesrv/admin_test.go
@@ -1,0 +1,100 @@
+package archesrv
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+	"testing"
+)
+
+func TestForgeServer_Admin_InvitesCRUD(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.Get(ts.URL + "/admin/invites")
+	if err != nil {
+		t.Fatalf("GET /admin/invites: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("admin invites page: want 200, got %d", resp.StatusCode)
+	}
+
+	resp2, err := client.PostForm(ts.URL+"/admin/invites", nil)
+	if err != nil {
+		t.Fatalf("POST /admin/invites: %v", err)
+	}
+	body2, _ := io.ReadAll(resp2.Body)
+	resp2.Body.Close()
+	if resp2.StatusCode != http.StatusOK {
+		t.Errorf("create invite: want 200, got %d", resp2.StatusCode)
+	}
+	if !strings.Contains(string(body2), "/register?invite=") {
+		t.Errorf("create invite: response should contain /register?invite= link")
+	}
+
+	adminUser, _, _ := s.db.GetUserByName("admin")
+	invites, err := s.db.ListInvites(adminUser.ID)
+	if err != nil {
+		t.Fatalf("ListInvites: %v", err)
+	}
+	if len(invites) == 0 {
+		t.Fatal("expected at least one invite after POST")
+	}
+	inviteID := invites[0].ID
+
+	req, _ := http.NewRequest(http.MethodDelete,
+		fmt.Sprintf("%s/admin/invites/%d", ts.URL, inviteID), nil)
+	resp3, err := client.Do(req)
+	if err != nil {
+		t.Fatalf("DELETE invite: %v", err)
+	}
+	resp3.Body.Close()
+	if resp3.StatusCode != http.StatusNoContent {
+		t.Errorf("delete invite: want 204, got %d", resp3.StatusCode)
+	}
+
+	remaining, _ := s.db.ListInvites(adminUser.ID)
+	for _, inv := range remaining {
+		if inv.ID == inviteID {
+			t.Error("invite should be gone after DELETE")
+		}
+	}
+}
+
+func TestForgeServer_Admin_NonAdminCannotAccessAdminRoutes(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "adminpass", true) //nolint:errcheck
+	s.db.CreateUser("alice", "pass", false)     //nolint:errcheck
+
+	aliceClient := loginAs(t, ts, "alice", "pass")
+
+	for _, path := range []string{
+		"/admin/invites",
+		"/admin/users",
+	} {
+		resp, err := aliceClient.Get(ts.URL + path)
+		if err != nil {
+			t.Fatalf("GET %s: %v", path, err)
+		}
+		resp.Body.Close()
+		if resp.StatusCode < 400 {
+			t.Errorf("%s: non-admin should get 4xx, got %d", path, resp.StatusCode)
+		}
+	}
+}
+
+func TestForgeServer_Admin_UsersPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.Get(ts.URL + "/admin/users")
+	if err != nil {
+		t.Fatalf("GET /admin/users: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("admin users page: want 200, got %d", resp.StatusCode)
+	}
+}

internal/archesrv/auth.go [A]
--- /dev/null
+++ b/internal/archesrv/auth.go
@@ -1,0 +1,287 @@
+package archesrv
+
+import (
+	"crypto/rand"
+	"crypto/subtle"
+	"database/sql"
+	"encoding/base64"
+	"encoding/hex"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+
+	"golang.org/x/crypto/argon2"
+)
+
+const (
+	argonTime    = 3
+	argonMemory  = 64 * 1024
+	argonThreads = 4
+	argonKeyLen  = 32
+	argonSaltLen = 16
+)
+
+type User struct {
+	ID       int64
+	Username string
+	IsAdmin  bool
+}
+
+func hashPassword(plain string) (string, error) {
+	salt := make([]byte, argonSaltLen)
+	if _, err := rand.Read(salt); err != nil {
+		return "", fmt.Errorf("generate salt: %w", err)
+	}
+	hash := argon2.IDKey([]byte(plain), salt, argonTime, argonMemory, argonThreads, argonKeyLen)
+	encoded := fmt.Sprintf("$argon2id$v=19$m=%d,t=%d,p=%d$%s$%s",
+		argonMemory, argonTime, argonThreads,
+		base64.RawStdEncoding.EncodeToString(salt),
+		base64.RawStdEncoding.EncodeToString(hash),
+	)
+	return encoded, nil
+}
+
+func checkPassword(encoded, plain string) bool {
+	parts := strings.Split(encoded, "$")
+	if len(parts) != 6 || parts[1] != "argon2id" {
+		return false
+	}
+	var m, t uint32
+	var p uint8
+	if _, err := fmt.Sscanf(parts[3], "m=%d,t=%d,p=%d", &m, &t, &p); err != nil {
+		return false
+	}
+	salt, err := base64.RawStdEncoding.DecodeString(parts[4])
+	if err != nil {
+		return false
+	}
+	wantHash, err := base64.RawStdEncoding.DecodeString(parts[5])
+	if err != nil {
+		return false
+	}
+	gotHash := argon2.IDKey([]byte(plain), salt, t, m, p, uint32(len(wantHash)))
+	return subtle.ConstantTimeCompare(gotHash, wantHash) == 1
+}
+
+func (d *DB) CreateUser(username, password string, isAdmin bool) (*User, error) {
+	hash, err := hashPassword(password)
+	if err != nil {
+		return nil, fmt.Errorf("hash password: %w", err)
+	}
+	admin := 0
+	if isAdmin {
+		admin = 1
+	}
+	res, err := d.db.Exec(
+		"INSERT INTO users(username,password_hash,is_admin,created_at) VALUES(?,?,?,?)",
+		username, hash, admin, time.Now().Unix(),
+	)
+	if err != nil {
+		return nil, fmt.Errorf("create user: %w", err)
+	}
+	id, _ := res.LastInsertId()
+	return &User{ID: id, Username: username, IsAdmin: isAdmin}, nil
+}
+
+func (d *DB) GetUserByName(username string) (*User, string, error) {
+	var u User
+	var hash string
+	err := d.db.QueryRow(
+		"SELECT id, username, password_hash, is_admin FROM users WHERE username=?",
+		username,
+	).Scan(&u.ID, &u.Username, &hash, &u.IsAdmin)
+	if err == sql.ErrNoRows {
+		return nil, "", nil
+	}
+	return &u, hash, err
+}
+
+func (d *DB) GetUserByID(id int64) (*User, error) {
+	var u User
+	err := d.db.QueryRow(
+		"SELECT id, username, is_admin FROM users WHERE id=?", id,
+	).Scan(&u.ID, &u.Username, &u.IsAdmin)
+	if err == sql.ErrNoRows {
+		return nil, nil
+	}
+	return &u, err
+}
+
+func (d *DB) HasAnyUser() (bool, error) {
+	var count int
+	err := d.db.QueryRow("SELECT COUNT(*) FROM users").Scan(&count)
+	return count > 0, err
+}
+
+func (d *DB) ListUsers() ([]User, error) {
+	rows, err := d.db.Query("SELECT id, username, is_admin FROM users ORDER BY id")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var users []User
+	for rows.Next() {
+		var u User
+		if err := rows.Scan(&u.ID, &u.Username, &u.IsAdmin); err != nil {
+			return nil, err
+		}
+		users = append(users, u)
+	}
+	return users, rows.Err()
+}
+
+func (d *DB) DeleteUser(id int64) error {
+	_, err := d.db.Exec("DELETE FROM users WHERE id=?", id)
+	return err
+}
+
+const (
+	sessionCookie = "arche_session"
+	sessionTTL    = 7 * 24 * time.Hour
+)
+
+func generateToken() (string, error) {
+	b := make([]byte, 32)
+	if _, err := rand.Read(b); err != nil {
+		return "", err
+	}
+	return hex.EncodeToString(b), nil
+}
+
+func (d *DB) CreateSession(userID int64) (string, error) {
+	tok, err := generateToken()
+	if err != nil {
+		return "", err
+	}
+	exp := time.Now().Add(sessionTTL).Unix()
+	_, err = d.db.Exec(
+		"INSERT INTO sessions(user_id,token,expires_at) VALUES(?,?,?)",
+		userID, tok, exp,
+	)
+	return tok, err
+}
+
+func (d *DB) GetSessionUser(token string) (*User, error) {
+	var userID int64
+	var exp int64
+	err := d.db.QueryRow(
+		"SELECT user_id, expires_at FROM sessions WHERE token=?", token,
+	).Scan(&userID, &exp)
+	if err == sql.ErrNoRows {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	if time.Now().Unix() > exp {
+		d.db.Exec("DELETE FROM sessions WHERE token=?", token) //nolint:errcheck
+		return nil, nil
+	}
+	return d.GetUserByID(userID)
+}
+
+func (d *DB) DeleteSession(token string) error {
+	_, err := d.db.Exec("DELETE FROM sessions WHERE token=?", token)
+	return err
+}
+
+func (d *DB) currentUser(r *http.Request) *User {
+	if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
+		fp := certFingerprint(r.TLS.PeerCertificates[0])
+		if u, _ := d.AuthorizeMTLSCert(fp); u != nil {
+			return u
+		}
+	}
+
+	auth := r.Header.Get("Authorization")
+	if strings.HasPrefix(auth, "Bearer ") {
+		tok := strings.TrimPrefix(auth, "Bearer ")
+		u, _ := d.lookupAPIToken(tok)
+		return u
+	}
+
+	c, err := r.Cookie(sessionCookie)
+	if err != nil {
+		return nil
+	}
+
+	u, _ := d.GetSessionUser(c.Value)
+	return u
+}
+
+func (d *DB) CreateAPIToken(userID int64, label string) (string, error) {
+	tok, err := generateToken()
+	if err != nil {
+		return "", err
+	}
+	hash, err := hashPassword(tok)
+	if err != nil {
+		return "", err
+	}
+	_, err = d.db.Exec(
+		"INSERT INTO api_tokens(user_id,token_hash,label,created_at) VALUES(?,?,?,?)",
+		userID, hash, label, time.Now().Unix(),
+	)
+	if err != nil {
+		return "", err
+	}
+	return tok, nil
+}
+
+func (d *DB) lookupAPIToken(plain string) (*User, error) {
+	rows, err := d.db.Query("SELECT user_id, token_hash FROM api_tokens")
+	if err != nil {
+		return nil, err
+	}
+	var foundUID int64
+	for rows.Next() {
+		var uid int64
+		var hash string
+		if err := rows.Scan(&uid, &hash); err != nil {
+			continue
+		}
+		if checkPassword(hash, plain) {
+			foundUID = uid
+			break
+		}
+	}
+
+	rows.Close()
+	if foundUID == 0 {
+		return nil, nil
+	}
+
+	return d.GetUserByID(foundUID)
+}
+
+type APIToken struct {
+	ID        int64
+	Label     string
+	CreatedAt int64
+}
+
+func (d *DB) ListAPITokens(userID int64) ([]APIToken, error) {
+	rows, err := d.db.Query(
+		"SELECT id, label, created_at FROM api_tokens WHERE user_id=? ORDER BY created_at DESC",
+		userID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var tokens []APIToken
+	for rows.Next() {
+		var t APIToken
+		if err := rows.Scan(&t.ID, &t.Label, &t.CreatedAt); err != nil {
+			return nil, err
+		}
+		tokens = append(tokens, t)
+	}
+	return tokens, rows.Err()
+}
+
+func (d *DB) DeleteAPIToken(id int64, userID int64) error {
+	_, err := d.db.Exec("DELETE FROM api_tokens WHERE id=? AND user_id=?", id, userID)
+	return err
+}

internal/archesrv/auth_test.go [A]
--- /dev/null
+++ b/internal/archesrv/auth_test.go
@@ -1,0 +1,200 @@
+package archesrv
+
+import (
+	"io"
+	"net/http"
+	"strings"
+	"testing"
+)
+
+func TestForgeServer_Login_PageLoads(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "pass", true) //nolint:errcheck
+
+	resp, err := http.Get(ts.URL + "/login")
+	if err != nil {
+		t.Fatalf("GET /login: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Login_WrongPassword(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "correctpass", true) //nolint:errcheck
+
+	jar := newCookieJar()
+	client := &http.Client{
+		Jar:           jar,
+		CheckRedirect: func(_ *http.Request, _ []*http.Request) error { return http.ErrUseLastResponse },
+	}
+	resp, err := client.PostForm(ts.URL+"/login", map[string][]string{
+		"username": {"admin"},
+		"password": {"wrongpass"},
+	})
+	if err != nil {
+		t.Fatalf("POST /login: %v", err)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("bad creds: want 200 (error page), got %d", resp.StatusCode)
+	}
+	if !strings.Contains(string(body), "invalid") {
+		t.Error("login error page should mention 'invalid'")
+	}
+}
+
+func TestForgeServer_Logout_InvalidatesSession(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	r1, err := client.Get(ts.URL + "/myrepo/issues")
+	if err != nil {
+		t.Fatalf("GET while logged in: %v", err)
+	}
+	r1.Body.Close()
+	if r1.StatusCode != http.StatusOK {
+		t.Fatalf("expected 200 while logged in, got %d", r1.StatusCode)
+	}
+
+	r2, err := client.Get(ts.URL + "/logout")
+	if err != nil {
+		t.Fatalf("GET /logout: %v", err)
+	}
+	r2.Body.Close()
+
+	r3, err := client.Get(ts.URL + "/myrepo/issues")
+	if err != nil {
+		t.Fatalf("GET after logout: %v", err)
+	}
+	r3.Body.Close()
+	if r3.StatusCode != http.StatusUnauthorized {
+		t.Errorf("after logout: want 401, got %d", r3.StatusCode)
+	}
+}
+
+func TestForgeServer_Register_OpenRegistration(t *testing.T) {
+	s, ts := newTestServerWith(t, func(cfg *Config) { cfg.Auth.Registration = "open" })
+	s.db.CreateUser("admin", "pass", true) //nolint:errcheck
+
+	jar := newCookieJar()
+	client := &http.Client{Jar: jar, CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
+		return http.ErrUseLastResponse
+	}}
+
+	resp, err := client.PostForm(ts.URL+"/register", map[string][]string{
+		"username": {"newuser"},
+		"password": {"newpass"},
+		"confirm":  {"newpass"},
+	})
+	if err != nil {
+		t.Fatalf("POST /register: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("open registration: want redirect, got %d", resp.StatusCode)
+	}
+
+	u, _, err := s.db.GetUserByName("newuser")
+	if err != nil || u == nil {
+		t.Fatal("new user not found after registration")
+	}
+}
+
+func TestForgeServer_Register_PasswordMismatch(t *testing.T) {
+	s, ts := newTestServerWith(t, func(cfg *Config) { cfg.Auth.Registration = "open" })
+	s.db.CreateUser("admin", "pass", true) //nolint:errcheck
+
+	jar := newCookieJar()
+	client := &http.Client{Jar: jar, CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
+		return http.ErrUseLastResponse
+	}}
+	resp, err := client.PostForm(ts.URL+"/register", map[string][]string{
+		"username": {"alice"},
+		"password": {"abc"},
+		"confirm":  {"xyz"},
+	})
+	if err != nil {
+		t.Fatalf("POST /register: %v", err)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	resp.Body.Close()
+
+	if strings.Contains(string(body), "do not match") == false {
+		u, _, _ := s.db.GetUserByName("alice")
+		if u != nil {
+			t.Error("user should not be created when passwords do not match")
+		}
+	}
+}
+
+func TestForgeServer_Register_InviteRequired(t *testing.T) {
+	s, ts := newTestServerWith(t, func(cfg *Config) { cfg.Auth.Registration = "invite" })
+	admin, _ := s.db.CreateUser("admin", "pass", true)
+
+	jar := newCookieJar()
+	client := &http.Client{Jar: jar, CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
+		return http.ErrUseLastResponse
+	}}
+
+	resp, err := client.PostForm(ts.URL+"/register", map[string][]string{
+		"username": {"bob"},
+		"password": {"pass"},
+		"confirm":  {"pass"},
+	})
+	if err != nil {
+		t.Fatalf("POST /register no invite: %v", err)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	resp.Body.Close()
+
+	u, _, _ := s.db.GetUserByName("bob")
+	if u != nil {
+		t.Error("user should not be created without invite token")
+	}
+	_ = body
+
+	inv, _ := s.db.CreateInvite(admin.ID)
+	resp2, err := client.PostForm(ts.URL+"/register", map[string][]string{
+		"username":     {"carol"},
+		"password":     {"carpass"},
+		"confirm":      {"carpass"},
+		"invite_token": {inv.Token},
+	})
+	if err != nil {
+		t.Fatalf("POST /register with invite: %v", err)
+	}
+	resp2.Body.Close()
+	if resp2.StatusCode >= 400 {
+		t.Errorf("invite registration: want redirect, got %d", resp2.StatusCode)
+	}
+
+	u2, _, err := s.db.GetUserByName("carol")
+	if err != nil || u2 == nil {
+		t.Fatal("user carol not found after invite registration")
+	}
+
+	got, _ := s.db.GetInvite(inv.Token)
+	if got == nil || got.UsedBy == nil {
+		t.Error("invite should be marked as used after registration")
+	}
+}
+
+func TestForgeServer_Register_DisabledByDefault(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "pass", true) //nolint:errcheck
+
+	resp, err := http.Get(ts.URL + "/register")
+	if err != nil {
+		t.Fatalf("GET /register: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusNotFound {
+		t.Errorf("want 404 (registration disabled), got %d", resp.StatusCode)
+	}
+}

internal/archesrv/collab_test.go [A]
--- /dev/null
+++ b/internal/archesrv/collab_test.go
@@ -1,0 +1,135 @@
+package archesrv
+
+import (
+	"fmt"
+	"net/http"
+	"testing"
+)
+
+func TestForgeServer_Collaborator_AddAndRemove(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, adminClient := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	s.db.CreateUser("bob", "pass", false) //nolint:errcheck
+
+	resp, err := adminClient.PostForm(ts.URL+"/myrepo/settings/collaborators", map[string][]string{
+		"username": {"bob"},
+		"role":     {"read"},
+	})
+	if err != nil {
+		t.Fatalf("POST collaborator: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("add collaborator: got %d", resp.StatusCode)
+	}
+
+	rec, _ := s.db.GetRepo("myrepo")
+	u, _, _ := s.db.GetUserByName("bob")
+	if !s.db.CanRead(rec, u) {
+		t.Error("bob should have read access after being added as collaborator")
+	}
+
+	collabs, err := s.db.ListCollaborators(rec.ID)
+	if err != nil {
+		t.Fatalf("ListCollaborators: %v", err)
+	}
+	found := false
+	for _, c := range collabs {
+		if c.Username == "bob" {
+			found = true
+		}
+	}
+	if !found {
+		t.Error("bob should appear in collaborator list")
+	}
+
+	req, _ := http.NewRequest(http.MethodDelete,
+		fmt.Sprintf("%s/myrepo/settings/collaborators/%d", ts.URL, u.ID), nil)
+	resp2, err := adminClient.Do(req)
+	if err != nil {
+		t.Fatalf("DELETE collaborator: %v", err)
+	}
+	resp2.Body.Close()
+	if resp2.StatusCode >= 400 {
+		t.Errorf("remove collaborator: got %d", resp2.StatusCode)
+	}
+
+	rec2, _ := s.db.GetRepo("myrepo")
+	if s.db.CanRead(rec2, u) {
+		t.Error("bob should lose read access after removal")
+	}
+}
+
+func TestForgeServer_Collaborator_NonAdminCannotAdd(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "adminpass", true) //nolint:errcheck
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	alice, _ := s.db.CreateUser("alice", "pass", false)
+	rec, _ := s.db.GetRepo("myrepo")
+	s.db.SetPermission(rec.ID, alice.ID, "write") //nolint:errcheck
+
+	aliceClient := loginAs(t, ts, "alice", "pass")
+	s.db.CreateUser("bob", "pass", false) //nolint:errcheck
+
+	resp, err := aliceClient.PostForm(ts.URL+"/myrepo/settings/collaborators", map[string][]string{
+		"username": {"bob"},
+		"role":     {"read"},
+	})
+	if err != nil {
+		t.Fatalf("POST collaborator: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode < 400 {
+		t.Errorf("non-admin should not add collaborators, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Collaborator_WriteRoleGrantsWrite(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, adminClient := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+	s.db.CreateUser("writer", "pass", false) //nolint:errcheck
+
+	resp, err := adminClient.PostForm(ts.URL+"/myrepo/settings/collaborators", map[string][]string{
+		"username": {"writer"},
+		"role":     {"write"},
+	})
+	if err != nil {
+		t.Fatalf("POST collaborator: %v", err)
+	}
+	resp.Body.Close()
+
+	rec, _ := s.db.GetRepo("myrepo")
+	u, _, _ := s.db.GetUserByName("writer")
+	if !s.db.CanWrite(rec, u) {
+		t.Error("writer should have write access")
+	}
+}
+
+func TestForgeServer_Collaborator_AdminRoleAllowsManagement(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("siteadmin", "adminpass", true) //nolint:errcheck
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	repoAdmin, _ := s.db.CreateUser("repoadmin", "pass", false)
+	rec, _ := s.db.GetRepo("myrepo")
+	s.db.SetPermission(rec.ID, repoAdmin.ID, "admin") //nolint:errcheck
+
+	repoAdminClient := loginAs(t, ts, "repoadmin", "pass")
+	s.db.CreateUser("target", "pass", false) //nolint:errcheck
+
+	resp, err := repoAdminClient.PostForm(ts.URL+"/myrepo/settings/collaborators", map[string][]string{
+		"username": {"target"},
+		"role":     {"read"},
+	})
+	if err != nil {
+		t.Fatalf("POST collaborator: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("repo admin should be able to add collaborators, got %d", resp.StatusCode)
+	}
+}

internal/archesrv/config.go [A]
--- /dev/null
+++ b/internal/archesrv/config.go
@@ -1,0 +1,74 @@
+package archesrv
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/BurntSushi/toml"
+)
+
+type Config struct {
+	Server  ServerSection             `toml:"server"`
+	Storage StorageSection            `toml:"storage"`
+	Auth    AuthSection               `toml:"auth"`
+	Hooks   HooksSection              `toml:"hooks"`
+	Repo    map[string]RepoHookConfig `toml:"repo"`
+}
+
+type RepoHookConfig struct {
+	AllowShellHooks      bool   `toml:"allow_shell_hooks"`
+	PostReceive          string `toml:"post-receive"`
+	RequireSignedCommits bool   `toml:"require_signed_commits"`
+}
+
+type ServerSection struct {
+	ListenHTTP  string `toml:"listen_http"`
+	ListenHTTPS string `toml:"listen_https"`
+	ListenSSH   string `toml:"listen_ssh"`
+	ListenMTLS  string `toml:"listen_mtls"`
+	TLSCert     string `toml:"tls_cert"`
+	TLSKey      string `toml:"tls_key"`
+}
+
+type HooksSection struct {
+	PreReceive  string `toml:"pre-receive"`
+	Update      string `toml:"update"`
+	PostReceive string `toml:"post-receive"`
+	TimeoutSec  int    `toml:"timeout_sec"`
+}
+
+type StorageSection struct {
+	DataDir string `toml:"data_dir"`
+}
+
+type AuthSection struct {
+	Registration string `toml:"registration"`
+}
+
+func DefaultConfig() Config {
+	return Config{
+		Server: ServerSection{
+			ListenHTTP: ":8080",
+		},
+		Storage: StorageSection{
+			DataDir: "./data",
+		},
+		Auth: AuthSection{
+			Registration: "disabled",
+		},
+		Hooks: HooksSection{
+			TimeoutSec: 30,
+		},
+	}
+}
+
+func LoadConfig(path string) (Config, error) {
+	cfg := DefaultConfig()
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		return cfg, nil
+	}
+	if _, err := toml.DecodeFile(path, &cfg); err != nil {
+		return cfg, fmt.Errorf("load config %s: %w", path, err)
+	}
+	return cfg, nil
+}

internal/archesrv/db.go [A]
--- /dev/null
+++ b/internal/archesrv/db.go
@@ -1,0 +1,142 @@
+package archesrv
+
+import (
+	"database/sql"
+	_ "embed"
+	"fmt"
+	"time"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+//go:embed sql/001_initial.sql
+var sql001 string
+
+//go:embed sql/002_ssh_webhooks_invites.sql
+var sql002 string
+
+//go:embed sql/003_per_repo_hooks.sql
+var sql003 string
+
+//go:embed sql/004_mtls_certs.sql
+var sql004 string
+
+//go:embed sql/005_commit_signatures.sql
+var sql005 string
+
+//go:embed sql/006_stack_reviews.sql
+var sql006 string
+
+type DB struct {
+	db *sql.DB
+}
+
+func openDB(path string) (*DB, error) {
+	db, err := sql.Open("sqlite3", path+"?_journal_mode=WAL&_busy_timeout=5000")
+	if err != nil {
+		return nil, fmt.Errorf("open server.db: %w", err)
+	}
+	db.SetMaxOpenConns(1)
+	d := &DB{db: db}
+	if err := d.migrate(); err != nil {
+		db.Close()
+		return nil, err
+	}
+	return d, nil
+}
+
+func (d *DB) Close() error { return d.db.Close() }
+
+func (d *DB) migrate() error {
+	type migration struct {
+		version int
+		sql     string
+	}
+	all := []migration{{1, sql001}, {2, sql002}, {3, sql003}, {4, sql004}, {5, sql005}, {6, sql006}}
+
+	if _, err := d.db.Exec(`CREATE TABLE IF NOT EXISTS schema_migrations (
+		version    INTEGER PRIMARY KEY,
+		applied_at INTEGER NOT NULL
+	)`); err != nil {
+		return err
+	}
+
+	rows, err := d.db.Query("SELECT version FROM schema_migrations")
+	if err != nil {
+		return err
+	}
+	applied := map[int]bool{}
+	for rows.Next() {
+		var v int
+		rows.Scan(&v) //nolint:errcheck
+		applied[v] = true
+	}
+	rows.Close()
+
+	for _, m := range all {
+		if applied[m.version] {
+			continue
+		}
+		if _, err := d.db.Exec(m.sql); err != nil {
+			return fmt.Errorf("migration %d: %w", m.version, err)
+		}
+		if _, err := d.db.Exec(
+			"INSERT INTO schema_migrations(version,applied_at) VALUES(?,?)",
+			m.version, time.Now().Unix(),
+		); err != nil {
+			return fmt.Errorf("record migration %d: %w", m.version, err)
+		}
+	}
+	return nil
+}
+
+type StackReview struct {
+	ChangeID   string
+	Status     string
+	ReviewerID int64
+	UpdatedAt  int64
+}
+
+func (d *DB) GetStackReview(repoID int64, changeID string) string {
+	var status string
+	err := d.db.QueryRow(
+		"SELECT status FROM stack_reviews WHERE repo_id = ? AND change_id = ?",
+		repoID, changeID,
+	).Scan(&status)
+	if err != nil {
+		return "open"
+	}
+	return status
+}
+
+func (d *DB) SetStackReview(repoID int64, changeID, status string, reviewerID int64) error {
+	_, err := d.db.Exec(
+		`INSERT INTO stack_reviews (repo_id, change_id, status, reviewer_id, updated_at)
+		 VALUES (?, ?, ?, ?, ?)
+		 ON CONFLICT(repo_id, change_id) DO UPDATE SET
+		   status = excluded.status,
+		   reviewer_id = excluded.reviewer_id,
+		   updated_at = excluded.updated_at`,
+		repoID, changeID, status, reviewerID, time.Now().Unix(),
+	)
+	return err
+}
+
+func (d *DB) ListStackReviews(repoID int64) (map[string]string, error) {
+	rows, err := d.db.Query(
+		"SELECT change_id, status FROM stack_reviews WHERE repo_id = ?", repoID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	m := make(map[string]string)
+	for rows.Next() {
+		var changeID, status string
+		if err := rows.Scan(&changeID, &status); err != nil {
+			return nil, err
+		}
+		m[changeID] = status
+	}
+	return m, rows.Err()
+}

internal/archesrv/db_test.go [A]
--- /dev/null
+++ b/internal/archesrv/db_test.go
@@ -1,0 +1,463 @@
+package archesrv
+
+import (
+	"path/filepath"
+	"testing"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func newTestDB(t *testing.T) *DB {
+	t.Helper()
+	db, err := openDB(filepath.Join(t.TempDir(), "test.db"))
+	if err != nil {
+		t.Fatalf("openDB: %v", err)
+	}
+	t.Cleanup(func() { db.Close() })
+	return db
+}
+
+func TestDB_CreateUser_Roundtrip(t *testing.T) {
+	db := newTestDB(t)
+	u, err := db.CreateUser("alice", "password123", false)
+	if err != nil {
+		t.Fatalf("CreateUser: %v", err)
+	}
+	if u.Username != "alice" {
+		t.Errorf("Username: want alice, got %q", u.Username)
+	}
+	if u.IsAdmin {
+		t.Error("should not be admin")
+	}
+
+	got, _, err := db.GetUserByName("alice")
+	if err != nil {
+		t.Fatalf("GetUserByName: %v", err)
+	}
+	if got.ID != u.ID {
+		t.Errorf("ID mismatch: want %d, got %d", u.ID, got.ID)
+	}
+}
+
+func TestDB_CreateUser_DuplicateIsError(t *testing.T) {
+	db := newTestDB(t)
+	if _, err := db.CreateUser("alice", "pass", false); err != nil {
+		t.Fatalf("first CreateUser: %v", err)
+	}
+	if _, err := db.CreateUser("alice", "pass2", false); err == nil {
+		t.Error("expected error for duplicate username, got nil")
+	}
+}
+
+func TestDB_HasAnyUser(t *testing.T) {
+	db := newTestDB(t)
+	has, err := db.HasAnyUser()
+	if err != nil {
+		t.Fatalf("HasAnyUser: %v", err)
+	}
+	if has {
+		t.Error("fresh DB should have no users")
+	}
+
+	db.CreateUser("alice", "pass", false) //nolint:errcheck
+	has, err = db.HasAnyUser()
+	if err != nil {
+		t.Fatalf("HasAnyUser after create: %v", err)
+	}
+	if !has {
+		t.Error("should have a user after CreateUser")
+	}
+}
+
+func TestDB_DeleteUser(t *testing.T) {
+	db := newTestDB(t)
+	u, _ := db.CreateUser("alice", "pass", false)
+
+	if err := db.DeleteUser(u.ID); err != nil {
+		t.Fatalf("DeleteUser: %v", err)
+	}
+
+	got, _, err := db.GetUserByName("alice")
+	if err != nil {
+		t.Fatalf("GetUserByName after delete: %v", err)
+	}
+	if got != nil {
+		t.Error("user should be nil after delete")
+	}
+}
+
+func TestDB_Password_HashAndCheck(t *testing.T) {
+	hash, err := hashPassword("secret")
+	if err != nil {
+		t.Fatalf("hashPassword: %v", err)
+	}
+	if !checkPassword(hash, "secret") {
+		t.Error("checkPassword: correct password should return true")
+	}
+	if checkPassword(hash, "wrongpassword") {
+		t.Error("checkPassword: wrong password should return false")
+	}
+}
+
+func TestDB_Session_CreateAndLookup(t *testing.T) {
+	db := newTestDB(t)
+	u, _ := db.CreateUser("alice", "pass", false)
+
+	tok, err := db.CreateSession(u.ID)
+	if err != nil {
+		t.Fatalf("CreateSession: %v", err)
+	}
+	if tok == "" {
+		t.Fatal("session token is empty")
+	}
+
+	got, err := db.GetSessionUser(tok)
+	if err != nil {
+		t.Fatalf("GetSessionUser: %v", err)
+	}
+	if got == nil || got.ID != u.ID {
+		t.Errorf("GetSessionUser: want user %d, got %v", u.ID, got)
+	}
+}
+
+func TestDB_Session_DeleteInvalidates(t *testing.T) {
+	db := newTestDB(t)
+	u, _ := db.CreateUser("alice", "pass", false)
+	tok, _ := db.CreateSession(u.ID)
+
+	if err := db.DeleteSession(tok); err != nil {
+		t.Fatalf("DeleteSession: %v", err)
+	}
+
+	got, _ := db.GetSessionUser(tok)
+	if got != nil {
+		t.Error("session should be gone after DeleteSession")
+	}
+}
+
+func TestDB_APIToken_CreateAndLookup(t *testing.T) {
+	db := newTestDB(t)
+	u, _ := db.CreateUser("alice", "pass", false)
+
+	tok, err := db.CreateAPIToken(u.ID, "laptop")
+	if err != nil {
+		t.Fatalf("CreateAPIToken: %v", err)
+	}
+
+	looked, err := db.lookupAPIToken(tok)
+	if err != nil {
+		t.Fatalf("lookupAPIToken: %v", err)
+	}
+	if looked == nil || looked.ID != u.ID {
+		t.Errorf("lookupAPIToken: want user %d, got %v", u.ID, looked)
+	}
+}
+
+func TestDB_APIToken_WrongTokenReturnsNil(t *testing.T) {
+	db := newTestDB(t)
+	u, _ := db.CreateUser("alice", "pass", false)
+	db.CreateAPIToken(u.ID, "test") //nolint:errcheck
+
+	looked, err := db.lookupAPIToken("completelyinvalidtoken")
+	if err != nil {
+		t.Fatalf("lookupAPIToken: %v", err)
+	}
+	if looked != nil {
+		t.Error("wrong token should return nil user")
+	}
+}
+
+func TestDB_APIToken_DeleteRemovesAccess(t *testing.T) {
+	db := newTestDB(t)
+	u, _ := db.CreateUser("alice", "pass", false)
+	tok, _ := db.CreateAPIToken(u.ID, "test")
+
+	tokens, _ := db.ListAPITokens(u.ID)
+	if len(tokens) != 1 {
+		t.Fatalf("want 1 token, got %d", len(tokens))
+	}
+
+	if err := db.DeleteAPIToken(tokens[0].ID, u.ID); err != nil {
+		t.Fatalf("DeleteAPIToken: %v", err)
+	}
+
+	looked, _ := db.lookupAPIToken(tok)
+	if looked != nil {
+		t.Error("token should be invalid after delete")
+	}
+}
+
+func TestDB_CreateRepo_Roundtrip(t *testing.T) {
+	db := newTestDB(t)
+	rec, err := db.CreateRepo("myrepo", "a repo", "private")
+	if err != nil {
+		t.Fatalf("CreateRepo: %v", err)
+	}
+	if rec.Name != "myrepo" {
+		t.Errorf("Name: want myrepo, got %q", rec.Name)
+	}
+	if rec.Visibility != "private" {
+		t.Errorf("Visibility: want private, got %q", rec.Visibility)
+	}
+
+	got, err := db.GetRepo("myrepo")
+	if err != nil || got == nil {
+		t.Fatalf("GetRepo: %v", err)
+	}
+	if got.ID != rec.ID {
+		t.Errorf("ID mismatch")
+	}
+}
+
+func TestDB_ListRepos(t *testing.T) {
+	db := newTestDB(t)
+	db.CreateRepo("alpha", "", "public")  //nolint:errcheck
+	db.CreateRepo("beta", "", "private")  //nolint:errcheck
+	db.CreateRepo("gamma", "", "private") //nolint:errcheck
+
+	repos, err := db.ListRepos()
+	if err != nil {
+		t.Fatalf("ListRepos: %v", err)
+	}
+	if len(repos) != 3 {
+		t.Errorf("want 3 repos, got %d", len(repos))
+	}
+}
+
+func TestDB_DeleteRepo(t *testing.T) {
+	db := newTestDB(t)
+	db.CreateRepo("gone", "", "private") //nolint:errcheck
+
+	if err := db.DeleteRepo("gone"); err != nil {
+		t.Fatalf("DeleteRepo: %v", err)
+	}
+
+	got, _ := db.GetRepo("gone")
+	if got != nil {
+		t.Error("repo should be nil after delete")
+	}
+}
+
+func TestDB_CanRead_Public(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("pub", "", "public")
+	if !db.CanRead(rec, nil) {
+		t.Error("public repo should be readable by anonymous")
+	}
+}
+
+func TestDB_CanRead_PrivateAnonymous(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	if db.CanRead(rec, nil) {
+		t.Error("private repo should not be readable by anonymous")
+	}
+}
+
+func TestDB_CanRead_AdminAlwaysCanRead(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	admin, _ := db.CreateUser("admin", "pass", true)
+	if !db.CanRead(rec, admin) {
+		t.Error("admin should be able to read any repo")
+	}
+}
+
+func TestDB_CanWrite_AdminAlwaysCanWrite(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	admin, _ := db.CreateUser("admin", "pass", true)
+	if !db.CanWrite(rec, admin) {
+		t.Error("admin should be able to write any repo")
+	}
+}
+
+func TestDB_CanWrite_NormalUserCannotWithoutPermission(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	u, _ := db.CreateUser("alice", "pass", false)
+	if db.CanWrite(rec, u) {
+		t.Error("normal user without permission should not be able to write")
+	}
+}
+
+func TestDB_Permissions_SetAndCheck(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	u, _ := db.CreateUser("alice", "pass", false)
+
+	if err := db.SetPermission(rec.ID, u.ID, "write"); err != nil {
+		t.Fatalf("SetPermission: %v", err)
+	}
+
+	if !db.CanWrite(rec, u) {
+		t.Error("user with write permission should be able to write")
+	}
+	if !db.CanRead(rec, u) {
+		t.Error("user with write permission should be able to read")
+	}
+}
+
+func TestDB_Permissions_ReadOnly(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	u, _ := db.CreateUser("alice", "pass", false)
+
+	db.SetPermission(rec.ID, u.ID, "read") //nolint:errcheck
+
+	if !db.CanRead(rec, u) {
+		t.Error("user with read permission should be able to read")
+	}
+	if db.CanWrite(rec, u) {
+		t.Error("user with only read permission should not be able to write")
+	}
+}
+
+func TestDB_Permissions_RemoveRevokesAccess(t *testing.T) {
+	db := newTestDB(t)
+	rec, _ := db.CreateRepo("priv", "", "private")
+	u, _ := db.CreateUser("alice", "pass", false)
+
+	db.SetPermission(rec.ID, u.ID, "write") //nolint:errcheck
+	db.RemovePermission(rec.ID, u.ID)       //nolint:errcheck
+
+	if db.CanWrite(rec, u) {
+		t.Error("user should lose write after RemovePermission")
+	}
+	if db.CanRead(rec, u) {
+		t.Error("user should lose read after RemovePermission")
+	}
+}
+
+func TestDB_Invite_CreateAndGet(t *testing.T) {
+	db := newTestDB(t)
+	admin, _ := db.CreateUser("admin", "pass", true)
+
+	inv, err := db.CreateInvite(admin.ID)
+	if err != nil {
+		t.Fatalf("CreateInvite: %v", err)
+	}
+	if inv.Token == "" {
+		t.Error("token should not be empty")
+	}
+
+	got, err := db.GetInvite(inv.Token)
+	if err != nil || got == nil {
+		t.Fatalf("GetInvite: %v", err)
+	}
+	if got.UsedBy != nil {
+		t.Error("invite should not be used yet")
+	}
+}
+
+func TestDB_Invite_UseOnce(t *testing.T) {
+	db := newTestDB(t)
+	admin, _ := db.CreateUser("admin", "pass", true)
+	inv, _ := db.CreateInvite(admin.ID)
+	user, _ := db.CreateUser("bob", "pass", false)
+
+	if err := db.UseInvite(inv.Token, user.ID); err != nil {
+		t.Fatalf("UseInvite: %v", err)
+	}
+
+	user2, _ := db.CreateUser("carol", "pass", false)
+	if err := db.UseInvite(inv.Token, user2.ID); err == nil {
+		t.Error("using an already-used invite should fail")
+	}
+}
+
+func TestDB_Invite_InvalidTokenReturnsNil(t *testing.T) {
+	db := newTestDB(t)
+	got, err := db.GetInvite("nosuchtoken")
+	if err != nil {
+		t.Fatalf("GetInvite: %v", err)
+	}
+	if got != nil {
+		t.Error("non-existent invite should return nil")
+	}
+}
+
+func TestDB_Invite_DeleteRemoves(t *testing.T) {
+	db := newTestDB(t)
+	admin, _ := db.CreateUser("admin", "pass", true)
+	inv, _ := db.CreateInvite(admin.ID)
+
+	if err := db.DeleteInvite(inv.ID, admin.ID); err != nil {
+		t.Fatalf("DeleteInvite: %v", err)
+	}
+
+	list, _ := db.ListInvites(admin.ID)
+	if len(list) != 0 {
+		t.Errorf("want 0 invites after delete, got %d", len(list))
+	}
+}
+
+func TestDB_Webhook_CreateAndList(t *testing.T) {
+	db := newTestDB(t)
+	repo, _ := db.CreateRepo("myrepo", "", "private")
+
+	wh, err := db.CreateWebhook(repo.ID, "http://example.com/hook", "secret", "push")
+	if err != nil {
+		t.Fatalf("CreateWebhook: %v", err)
+	}
+	if wh.URL != "http://example.com/hook" {
+		t.Errorf("URL mismatch: %q", wh.URL)
+	}
+
+	hooks, err := db.ListWebhooks(repo.ID)
+	if err != nil {
+		t.Fatalf("ListWebhooks: %v", err)
+	}
+	if len(hooks) != 1 {
+		t.Errorf("want 1 webhook, got %d", len(hooks))
+	}
+}
+
+func TestDB_Webhook_Delete(t *testing.T) {
+	db := newTestDB(t)
+	repo, _ := db.CreateRepo("myrepo", "", "private")
+	wh, _ := db.CreateWebhook(repo.ID, "http://example.com/hook", "", "push")
+
+	if err := db.DeleteWebhook(wh.ID); err != nil {
+		t.Fatalf("DeleteWebhook: %v", err)
+	}
+
+	hooks, _ := db.ListWebhooks(repo.ID)
+	if len(hooks) != 0 {
+		t.Errorf("want 0 webhooks after delete, got %d", len(hooks))
+	}
+}
+
+func TestDB_Webhook_HMAC(t *testing.T) {
+	payload := []byte(`{"event":"push"}`)
+	sig1 := computeHMAC("secret", payload)
+	sig2 := computeHMAC("secret", payload)
+	if sig1 != sig2 {
+		t.Error("HMAC should be deterministic")
+	}
+	sig3 := computeHMAC("differentsecret", payload)
+	if sig1 == sig3 {
+		t.Error("different secrets should produce different HMACs")
+	}
+	sigEmpty := computeHMAC("", payload)
+	if sigEmpty == sig1 {
+		t.Error("empty secret should produce different HMAC")
+	}
+}
+
+func TestDB_UpdateRepo(t *testing.T) {
+	db := newTestDB(t)
+	db.CreateRepo("myrepo", "old desc", "private") //nolint:errcheck
+
+	if err := db.UpdateRepo("myrepo", "new desc", "public"); err != nil {
+		t.Fatalf("UpdateRepo: %v", err)
+	}
+
+	rec, _ := db.GetRepo("myrepo")
+	if rec.Description != "new desc" {
+		t.Errorf("Description: want %q, got %q", "new desc", rec.Description)
+	}
+	if rec.Visibility != "public" {
+		t.Errorf("Visibility: want public, got %q", rec.Visibility)
+	}
+}

internal/archesrv/handlers_admin.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_admin.go
@@ -1,0 +1,181 @@
+package archesrv
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"arche/internal/repo"
+)
+
+type adminUsersData struct {
+	User  *User
+	Users []User
+}
+
+func (s *forgeServer) handleAdminUsers(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	users, err := s.db.ListUsers()
+	if err != nil {
+		http.Error(w, "list users: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_admin_users.html", adminUsersData{User: user, Users: users})
+}
+
+func (s *forgeServer) handleAdminCreateUser(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	username := strings.TrimSpace(r.FormValue("username"))
+	password := r.FormValue("password")
+	isAdmin := r.FormValue("is_admin") == "1"
+	if username == "" || password == "" {
+		http.Error(w, "username and password required", http.StatusBadRequest)
+		return
+	}
+	if _, err := s.db.CreateUser(username, password, isAdmin); err != nil {
+		http.Error(w, "create user: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.log.Info("user created", "by", user.Username, "new_user", username, "is_admin", isAdmin)
+	http.Redirect(w, r, "/admin/users", http.StatusFound)
+}
+
+func (s *forgeServer) handleAdminDeleteUser(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	idStr := r.PathValue("id")
+	var targetID int64
+	if _, err := fmt.Sscan(idStr, &targetID); err != nil {
+		http.Error(w, "invalid user id", http.StatusBadRequest)
+		return
+	}
+	if targetID == user.ID {
+		http.Error(w, "cannot delete your own account", http.StatusBadRequest)
+		return
+	}
+	if err := s.db.DeleteUser(targetID); err != nil {
+		http.Error(w, "delete user: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.log.Info("user deleted", "by", user.Username, "target_id", targetID)
+	w.WriteHeader(http.StatusNoContent)
+}
+
+func (s *forgeServer) handleAdminCreateRepo(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+
+	r.ParseForm() //nolint:errcheck
+	name := strings.TrimSpace(r.FormValue("name"))
+	desc := r.FormValue("description")
+	vis := r.FormValue("visibility")
+
+	if name == "" {
+		http.Error(w, "name required", http.StatusBadRequest)
+		return
+	}
+
+	rec, err := s.db.CreateRepo(name, desc, vis)
+	if err != nil {
+		http.Error(w, "create repo: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	path := repoPath(s.dataDir(), name)
+	if _, err := repo.Init(path); err != nil {
+		s.db.DeleteRepo(name) //nolint:errcheck
+		http.Error(w, "init repo: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	s.db.SetPermission(rec.ID, user.ID, "admin") //nolint:errcheck
+	s.log.Info("repo created", "by", user.Username, "repo", rec.Name, "visibility", vis)
+	fmt.Fprintf(w, `{"name":%q}`, rec.Name)
+}
+
+func (s *forgeServer) handleAdminDeleteRepo(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	name := r.PathValue("name")
+	if err := s.db.DeleteRepo(name); err != nil {
+		http.Error(w, "delete repo: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.log.Info("repo deleted", "by", user.Username, "repo", name)
+	w.WriteHeader(http.StatusNoContent)
+}
+
+type srvAdminInvitesData struct {
+	User    *User
+	Invites []InviteToken
+	Link    string
+}
+
+func (s *forgeServer) handleAdminInvites(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	invites, err := s.db.ListInvites(user.ID)
+	if err != nil {
+		http.Error(w, "list invites: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_admin_invites.html", srvAdminInvitesData{User: user, Invites: invites})
+}
+
+func (s *forgeServer) handleAdminCreateInvite(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	inv, err := s.db.CreateInvite(user.ID)
+	if err != nil {
+		http.Error(w, "create invite: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	invites, _ := s.db.ListInvites(user.ID)
+	s.render(w, "srv_admin_invites.html", srvAdminInvitesData{
+		User:    user,
+		Invites: invites,
+		Link:    "/register?invite=" + inv.Token,
+	})
+}
+
+func (s *forgeServer) handleAdminDeleteInvite(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil || !user.IsAdmin {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return
+	}
+	var id int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &id); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	if err := s.db.DeleteInvite(id, user.ID); err != nil {
+		http.Error(w, "delete invite: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}

internal/archesrv/handlers_auth.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_auth.go
@@ -1,0 +1,244 @@
+package archesrv
+
+import (
+	"net/http"
+	"strings"
+	"time"
+)
+
+type setupData struct {
+	User             *User
+	RegistrationOpen bool
+	Error            string
+}
+
+func (s *forgeServer) handleSetup(w http.ResponseWriter, r *http.Request) {
+	has, _ := s.db.HasAnyUser()
+	if has {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	s.render(w, "srv_setup.html", setupData{})
+}
+
+func (s *forgeServer) handleSetupPost(w http.ResponseWriter, r *http.Request) {
+	has, _ := s.db.HasAnyUser()
+	if has {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+
+	r.ParseForm() //nolint:errcheck
+	username := strings.TrimSpace(r.FormValue("username"))
+	password := r.FormValue("password")
+
+	if username == "" || password == "" {
+		s.render(w, "srv_setup.html", setupData{Error: "username and password required"})
+		return
+	}
+
+	u, err := s.db.CreateUser(username, password, true)
+	if err != nil {
+		s.render(w, "srv_setup.html", setupData{Error: "create user: " + err.Error()})
+		return
+	}
+
+	tok, err := s.db.CreateSession(u.ID)
+	if err != nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+
+	http.SetCookie(w, &http.Cookie{
+		Name:     sessionCookie,
+		Value:    tok,
+		Path:     "/",
+		HttpOnly: true,
+		Expires:  time.Now().Add(sessionTTL),
+	})
+	http.Redirect(w, r, "/", http.StatusFound)
+}
+
+type loginData struct {
+	User             *User
+	RegistrationOpen bool
+	Error            string
+}
+
+func (s *forgeServer) handleLoginPage(w http.ResponseWriter, r *http.Request) {
+	s.render(w, "srv_login.html", loginData{RegistrationOpen: s.cfg.Auth.Registration == "open"})
+}
+
+func (s *forgeServer) handleLoginPost(w http.ResponseWriter, r *http.Request) {
+	r.ParseForm() //nolint:errcheck
+	username := r.FormValue("username")
+	password := r.FormValue("password")
+
+	u, hash, err := s.db.GetUserByName(username)
+	if err != nil || u == nil || !checkPassword(hash, password) {
+		s.render(w, "srv_login.html", loginData{Error: "invalid username or password"})
+		return
+	}
+
+	tok, err := s.db.CreateSession(u.ID)
+	if err != nil {
+		http.Error(w, "session: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	http.SetCookie(w, &http.Cookie{
+		Name:     sessionCookie,
+		Value:    tok,
+		Path:     "/",
+		HttpOnly: true,
+		Expires:  time.Now().Add(sessionTTL),
+	})
+	http.Redirect(w, r, "/", http.StatusFound)
+}
+
+func (s *forgeServer) handleLogout(w http.ResponseWriter, r *http.Request) {
+	if c, err := r.Cookie(sessionCookie); err == nil {
+		s.db.DeleteSession(c.Value) //nolint:errcheck
+		http.SetCookie(w, &http.Cookie{
+			Name:    sessionCookie,
+			Value:   "",
+			Path:    "/",
+			MaxAge:  -1,
+			Expires: time.Unix(0, 0),
+		})
+	}
+	http.Redirect(w, r, "/", http.StatusFound)
+}
+
+type registerData struct {
+	User             *User
+	RegistrationOpen bool
+	Error            string
+	InviteRequired   bool
+}
+
+func (s *forgeServer) handleRegisterPage(w http.ResponseWriter, r *http.Request) {
+	s.render(w, "srv_register.html", registerData{InviteRequired: s.cfg.Auth.Registration == "invite"})
+}
+
+func (s *forgeServer) handleRegisterPost(w http.ResponseWriter, r *http.Request) {
+	r.ParseForm() //nolint:errcheck
+	username := strings.TrimSpace(r.FormValue("username"))
+	password := r.FormValue("password")
+	confirm := r.FormValue("confirm")
+	inviteToken := strings.TrimSpace(r.FormValue("invite_token"))
+
+	rd := registerData{InviteRequired: s.cfg.Auth.Registration == "invite"}
+
+	if username == "" || password == "" {
+		rd.Error = "username and password required"
+		s.render(w, "srv_register.html", rd)
+		return
+	}
+	if password != confirm {
+		rd.Error = "passwords do not match"
+		s.render(w, "srv_register.html", rd)
+		return
+	}
+
+	if s.cfg.Auth.Registration == "invite" {
+		if inviteToken == "" {
+			rd.Error = "invite token required"
+			s.render(w, "srv_register.html", rd)
+			return
+		}
+		inv, err := s.db.GetInvite(inviteToken)
+		if err != nil || inv == nil || inv.UsedBy != nil {
+			rd.Error = "invalid or already-used invite token"
+			s.render(w, "srv_register.html", rd)
+			return
+		}
+	}
+
+	u, err := s.db.CreateUser(username, password, false)
+	if err != nil {
+		rd.Error = "could not create account: " + err.Error()
+		s.render(w, "srv_register.html", rd)
+		return
+	}
+
+	if s.cfg.Auth.Registration == "invite" {
+		s.db.UseInvite(inviteToken, u.ID) //nolint:errcheck
+	}
+
+	tok, err := s.db.CreateSession(u.ID)
+	if err != nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	http.SetCookie(w, &http.Cookie{
+		Name:     sessionCookie,
+		Value:    tok,
+		Path:     "/",
+		HttpOnly: true,
+		Expires:  time.Now().Add(sessionTTL),
+	})
+	http.Redirect(w, r, "/", http.StatusFound)
+}
+
+type repoListItem struct {
+	Name        string
+	Description string
+	Visibility  string
+	CreatedAt   string
+	LastCommit  string
+}
+
+type indexData struct {
+	User             *User
+	Repos            []repoListItem
+	RegistrationOpen bool
+}
+
+func (s *forgeServer) handleIndex(w http.ResponseWriter, r *http.Request) {
+	if r.URL.Path != "/" {
+		http.NotFound(w, r)
+		return
+	}
+
+	has, _ := s.db.HasAnyUser()
+	if !has {
+		http.Redirect(w, r, "/setup", http.StatusFound)
+		return
+	}
+
+	user := s.db.currentUser(r)
+	recs, _ := s.db.ListRepos()
+
+	var items []repoListItem
+	for _, rec := range recs {
+		if !s.db.CanRead(&rec, user) {
+			continue
+		}
+
+		item := repoListItem{
+			Name:        rec.Name,
+			Description: rec.Description,
+			Visibility:  rec.Visibility,
+			CreatedAt:   rec.CreatedAt.Format("2006-01-02"),
+		}
+
+		if r, err := openRepo(s.dataDir(), rec.Name); err == nil {
+			if c, _, err2 := r.HeadCommit(); err2 == nil && c.Message != "" {
+				msg := c.Message
+				if idx := strings.IndexByte(msg, '\n'); idx >= 0 {
+					msg = msg[:idx]
+				}
+				item.LastCommit = msg
+			}
+			r.Close()
+		}
+		items = append(items, item)
+	}
+
+	s.render(w, "srv_repo_list.html", indexData{
+		User:             user,
+		Repos:            items,
+		RegistrationOpen: s.cfg.Auth.Registration == "open",
+	})
+}

internal/archesrv/handlers_issues.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_issues.go
@@ -1,0 +1,259 @@
+package archesrv
+
+import (
+	"io"
+	"net/http"
+	"path/filepath"
+	"strings"
+
+	"arche/internal/issuedb"
+)
+
+func (s *forgeServer) openIssueDB(repoName string) (*issuedb.DB, error) {
+	dir := filepath.Join(repoPath(s.dataDir(), repoName), ".arche")
+	return issuedb.Open(dir)
+}
+
+func (s *forgeServer) openIssueDBWithStore(repoName string) (*issuedb.DB, io.Closer, error) {
+	r, err := openRepo(s.dataDir(), repoName)
+	if err != nil {
+		return nil, nil, err
+	}
+	dir := filepath.Join(repoPath(s.dataDir(), repoName), ".arche")
+	idb, err := issuedb.NewWithStore(dir, r.Store)
+	if err != nil {
+		r.Store.Close() //nolint:errcheck
+		return nil, nil, err
+	}
+	return idb, r.Store, nil
+}
+
+type srvIssuesData struct {
+	Repo   string
+	User   *User
+	Issues []issueStubView
+}
+
+type issueStubView struct {
+	ID     string
+	Title  string
+	Status string
+}
+
+func (s *forgeServer) handleRepoIssues(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	idb, err := s.openIssueDB(repoName)
+	if err != nil {
+		http.Error(w, "open issuedb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer idb.Close()
+
+	stubs, err := idb.Issues.ListIssues()
+	if err != nil {
+		http.Error(w, "list issues: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	var items []issueStubView
+	for _, st := range stubs {
+		items = append(items, issueStubView{ID: st.ID, Title: st.Title, Status: st.Status})
+	}
+	s.render(w, "srv_repo_issues.html", srvIssuesData{Repo: repoName, User: user, Issues: items})
+}
+
+func (s *forgeServer) handleRepoCreateIssue(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	r.ParseForm() //nolint:errcheck
+	title := strings.TrimSpace(r.FormValue("title"))
+	body := r.FormValue("body")
+	if title == "" {
+		http.Error(w, "title required", http.StatusBadRequest)
+		return
+	}
+
+	idb, storeCloser, err := s.openIssueDBWithStore(repoName)
+	if err != nil {
+		http.Error(w, "open issuedb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer storeCloser.Close() //nolint:errcheck
+	defer idb.Close()
+
+	id, err := idb.Issues.CreateIssue(title, body, user.Username)
+	if err != nil {
+		http.Error(w, "create issue: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, "/"+repoName+"/issue?id="+id, http.StatusFound)
+}
+
+type srvIssueData struct {
+	Repo     string
+	User     *User
+	ID       string
+	Title    string
+	Status   string
+	Body     string
+	Labels   []string
+	Comments []issueCommentView
+}
+
+type issueCommentView struct {
+	Author string
+	Text   string
+}
+
+func (s *forgeServer) handleRepoIssue(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	id := r.URL.Query().Get("id")
+	if id == "" {
+		http.Error(w, "id required", http.StatusBadRequest)
+		return
+	}
+
+	idb, err := s.openIssueDB(repoName)
+	if err != nil {
+		http.Error(w, "open issuedb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer idb.Close()
+
+	iss, err := idb.Issues.GetIssue(id)
+	if err != nil {
+		http.NotFound(w, r)
+		return
+	}
+
+	var comments []issueCommentView
+	for _, c := range iss.Comments {
+		comments = append(comments, issueCommentView{Author: c.Author, Text: c.Text})
+	}
+
+	s.render(w, "srv_repo_issue.html", srvIssueData{
+		Repo:     repoName,
+		User:     user,
+		ID:       iss.ID,
+		Title:    iss.Title,
+		Status:   iss.Status,
+		Body:     iss.Body,
+		Labels:   iss.Labels,
+		Comments: comments,
+	})
+}
+
+func (s *forgeServer) handleRepoAddComment(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	r.ParseForm() //nolint:errcheck
+	issueID := r.FormValue("issue_id")
+	text := strings.TrimSpace(r.FormValue("text"))
+	if issueID == "" || text == "" {
+		http.Error(w, "issue_id and text required", http.StatusBadRequest)
+		return
+	}
+
+	idb, storeCloser, err := s.openIssueDBWithStore(repoName)
+	if err != nil {
+		http.Error(w, "open issuedb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer storeCloser.Close() //nolint:errcheck
+	defer idb.Close()
+
+	if err := idb.Issues.AddComment(issueID, text, user.Username); err != nil {
+		http.Error(w, "add comment: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, "/"+repoName+"/issue?id="+issueID, http.StatusFound)
+}
+
+func (s *forgeServer) handleRepoSetStatus(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	r.ParseForm() //nolint:errcheck
+	issueID := r.FormValue("issue_id")
+	status := r.FormValue("status")
+	if issueID == "" || status == "" {
+		http.Error(w, "issue_id and status required", http.StatusBadRequest)
+		return
+	}
+
+	idb, storeCloser, err := s.openIssueDBWithStore(repoName)
+	if err != nil {
+		http.Error(w, "open issuedb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer storeCloser.Close() //nolint:errcheck
+	defer idb.Close()
+
+	if err := idb.Issues.SetStatus(issueID, status, user.Username); err != nil {
+		http.Error(w, "set status: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, "/"+repoName+"/issue?id="+issueID, http.StatusFound)
+}

internal/archesrv/handlers_repo.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_repo.go
@@ -1,0 +1,741 @@
+package archesrv
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"html/template"
+	"net/http"
+	"strings"
+
+	"arche/internal/diff"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/revset"
+	"arche/internal/syncpkg"
+
+	"github.com/alecthomas/chroma/v2"
+	chrhtml "github.com/alecthomas/chroma/v2/formatters/html"
+	"github.com/alecthomas/chroma/v2/lexers"
+	"github.com/alecthomas/chroma/v2/styles"
+	"golang.org/x/crypto/ssh"
+)
+
+func (s *forgeServer) requireRepoAccess(w http.ResponseWriter, r *http.Request) (*repo.Repo, *RepoRecord, bool) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return nil, nil, false
+	}
+
+	user := s.db.currentUser(r)
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return nil, nil, false
+	}
+
+	repoObj, err := openRepo(s.dataDir(), repoName)
+	if err != nil {
+		http.Error(w, "open repo: "+err.Error(), http.StatusInternalServerError)
+		return nil, nil, false
+	}
+
+	return repoObj, rec, true
+}
+
+func (s *forgeServer) handleSyncProxy(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.Error(w, "repo not found", http.StatusNotFound)
+		return
+	}
+
+	user := s.db.currentUser(r)
+
+	if r.Method != http.MethodGet && !s.db.CanWrite(rec, user) {
+		user := s.db.currentUser(r)
+		username := "anonymous"
+		if user != nil {
+			username = user.Username
+		}
+		s.log.Warn("sync write denied", "repo", repoName, "user", username)
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+	if r.Method == http.MethodGet && !s.db.CanRead(rec, user) {
+		s.log.Warn("sync read denied", "repo", repoName)
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	repoObj, err := openRepo(s.dataDir(), repoName)
+	if err != nil {
+		http.Error(w, "open repo: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer repoObj.Close()
+
+	action := strings.TrimPrefix(r.URL.Path, "/"+repoName)
+	r2 := r.Clone(r.Context())
+	r2.URL.Path = action
+
+	user = s.db.currentUser(r)
+	pusher := "anonymous"
+	if user != nil {
+		pusher = user.Username
+	}
+
+	srv := syncpkg.NewServer(repoObj, "")
+
+	repoKey := repoName
+	repoCfg := s.cfg.Repo[repoKey]
+	srv.PreUpdateHook = func(bm, oldHex, newHex string) error {
+		if s.cfg.Hooks.PreReceive != "" || s.cfg.Hooks.Update != "" {
+			if err := runPreReceiveHook(s.cfg.Hooks.PreReceive, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec); err != nil {
+				return err
+			}
+			if err := runPreReceiveHook(s.cfg.Hooks.Update, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec); err != nil {
+				return err
+			}
+		}
+		if repoCfg.RequireSignedCommits && user != nil {
+			for _, id := range collectNewCommitIDs(repoObj, oldHex, newHex) {
+				c, err := repoObj.ReadCommit(id)
+				if err != nil {
+					continue
+				}
+				if len(c.CommitSig) == 0 {
+					return fmt.Errorf("commit %s (ch:%s) is unsigned; this repository requires signed commits",
+						hex.EncodeToString(id[:8]), c.ChangeID)
+				}
+				body := object.CommitBodyForSigning(c)
+				keys, _ := s.db.ListSSHKeys(user.ID)
+				verified := false
+				for _, k := range keys {
+					pub, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.PublicKey))
+					if err != nil {
+						continue
+					}
+					if object.VerifyCommitSig(body, c.CommitSig, pub) == nil {
+						verified = true
+						break
+					}
+				}
+				if !verified {
+					return fmt.Errorf("commit %s (ch:%s) has an unverifiable signature; this repository requires commits signed by a registered key",
+						hex.EncodeToString(id[:8]), c.ChangeID)
+				}
+			}
+		}
+		return nil
+	}
+
+	srv.OnBookmarkUpdated = func(bm, oldHex, newHex string) {
+		s.db.FirePushWebhooks(repoName, pusher, bm, oldHex, newHex, collectPushCommits(repoObj, oldHex, newHex))
+		runPostReceiveHook(s.cfg.Hooks.PostReceive, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
+
+		if user != nil {
+			for _, id := range collectNewCommitIDs(repoObj, oldHex, newHex) {
+				c, err := repoObj.ReadCommit(id)
+				if err != nil {
+					continue
+				}
+				_ = s.db.RecordCommitSignature(repoObj, id, c, user.ID)
+			}
+		}
+
+		if allowed, script, _ := s.db.GetRepoHookConfig(rec.ID); allowed && script != "" {
+			if !s.db.hasWriteCollaborator(rec.ID) {
+				runPostReceiveHook(script, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
+			}
+		}
+	}
+	srv.Handler().ServeHTTP(w, r2)
+}
+
+func collectPushCommits(r *repo.Repo, oldHex, newHex string) []CommitRef {
+	if len(newHex) != 64 {
+		return []CommitRef{}
+	}
+	newBytes, err := hex.DecodeString(newHex)
+	if err != nil || len(newBytes) != 32 {
+		return []CommitRef{}
+	}
+	var newID [32]byte
+	copy(newID[:], newBytes)
+
+	var oldID [32]byte
+	if len(oldHex) == 64 {
+		if oldBytes, err2 := hex.DecodeString(oldHex); err2 == nil && len(oldBytes) == 32 {
+			copy(oldID[:], oldBytes)
+		}
+	}
+
+	seen := make(map[[32]byte]bool)
+	queue := [][32]byte{newID}
+	var results []CommitRef
+	const maxCommits = 50
+
+	for len(queue) > 0 && len(results) < maxCommits {
+		id := queue[0]
+		queue = queue[1:]
+		if seen[id] || id == oldID {
+			continue
+		}
+		seen[id] = true
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			break
+		}
+		author := c.Author.Name
+		if c.Author.Email != "" {
+			author += " <" + c.Author.Email + ">"
+		}
+		results = append(results, CommitRef{
+			ID:       hex.EncodeToString(id[:]),
+			ChangeID: "ch:" + c.ChangeID,
+			Message:  c.Message,
+			Author:   author,
+		})
+		for _, p := range c.Parents {
+			if !seen[p] && p != oldID {
+				queue = append(queue, p)
+			}
+		}
+	}
+	return results
+}
+
+func (s *forgeServer) handleRepoHome(w http.ResponseWriter, r *http.Request) {
+	http.Redirect(w, r, "/"+r.PathValue("repo")+"/log", http.StatusFound)
+}
+
+type srvCommitRow struct {
+	HexID      string
+	ShortHex   string
+	ChangeID   string
+	Author     string
+	Date       string
+	Phase      string
+	PhaseClass string
+	Message    string
+	Bookmarks  []string
+	IsHead     bool
+}
+
+type srvLogData struct {
+	Repo      string
+	User      *User
+	Commits   []srvCommitRow
+	WhereExpr string
+	WhereErr  string
+}
+
+func (s *forgeServer) handleRepoLog(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	const maxCommits = 200
+	where := r.URL.Query().Get("where")
+	var whereFilter revset.Func
+	var whereErr string
+	if where != "" {
+		var err error
+		whereFilter, err = revset.Parse(where)
+		if err != nil {
+			whereErr = err.Error()
+		}
+	}
+	_, headID, err := repoObj.HeadCommit()
+	if err != nil {
+		http.Error(w, "HEAD: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	if def := resolveDefaultCommit(repoObj); def != ([32]byte{}) {
+		headID = def
+	}
+
+	headCID, _ := repoObj.HeadChangeID()
+	bmMap := bookmarkMap(repoObj)
+	visited := map[[32]byte]bool{}
+	queue := [][32]byte{headID}
+
+	var rows []srvCommitRow
+	for len(queue) > 0 && len(rows) < maxCommits {
+		id := queue[0]
+		queue = queue[1:]
+
+		if visited[id] {
+			continue
+		}
+		visited[id] = true
+
+		c, err := repoObj.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+
+		phase, _ := repoObj.Store.GetPhase(id)
+		if whereFilter != nil && !whereFilter(id, c, phase) {
+			for _, p := range c.Parents {
+				if !visited[p] {
+					queue = append(queue, p)
+				}
+			}
+			continue
+		}
+		hexID := fullHex(id)
+		msg := c.Message
+		if idx := strings.IndexByte(msg, '\n'); idx >= 0 {
+			msg = msg[:idx]
+		}
+
+		rows = append(rows, srvCommitRow{
+			HexID:      hexID,
+			ShortHex:   shortHex(id),
+			ChangeID:   c.ChangeID,
+			Author:     c.Author.Name,
+			Date:       c.Author.Timestamp.Format("2006-01-02 15:04"),
+			Phase:      phase.String(),
+			PhaseClass: phaseClass(phase),
+			Message:    msg,
+			Bookmarks:  bmMap[hexID],
+			IsHead:     c.ChangeID == headCID,
+		})
+
+		for _, p := range c.Parents {
+			if !visited[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+
+	s.render(w, "srv_repo_log.html", srvLogData{
+		Repo:      rec.Name,
+		User:      s.db.currentUser(r),
+		Commits:   rows,
+		WhereExpr: where,
+		WhereErr:  whereErr,
+	})
+}
+
+type srvCommitData struct {
+	Repo       string
+	User       *User
+	HexID      string
+	ShortHex   string
+	ChangeID   string
+	Author     string
+	Committer  string
+	Date       string
+	Phase      string
+	PhaseClass string
+	SigStatus  string
+	SigKeyID   string
+	Message    string
+	Bookmarks  []string
+	Parents    []srvParentLink
+	Diffs      []srvFileDiff
+}
+
+type srvParentLink struct {
+	HexID    string
+	ShortHex string
+}
+
+type srvDiffLine struct {
+	Class string
+	Text  string
+}
+
+type srvFileDiff struct {
+	Path   string
+	Status string
+	Lines  []srvDiffLine
+}
+
+func (s *forgeServer) handleRepoCommit(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	idStr := r.URL.Query().Get("id")
+	raw, err := hex.DecodeString(idStr)
+	if err != nil || len(raw) != 32 {
+		http.Error(w, "invalid commit id", http.StatusBadRequest)
+		return
+	}
+	var id [32]byte
+	copy(id[:], raw)
+
+	c, err := repoObj.ReadCommit(id)
+	if err != nil {
+		http.NotFound(w, r)
+		return
+	}
+
+	phase, _ := repoObj.Store.GetPhase(id)
+	bmMap := bookmarkMap(repoObj)
+	hexID := fullHex(id)
+
+	var parents []srvParentLink
+	for _, p := range c.Parents {
+		parents = append(parents, srvParentLink{HexID: fullHex(p), ShortHex: shortHex(p)})
+	}
+
+	diffs, _ := diff.CommitDiff(repoObj, id)
+	var rendered []srvFileDiff
+	for _, fd := range diffs {
+		rendered = append(rendered, srvFileDiff{
+			Path:   fd.Path,
+			Status: string(fd.Status),
+			Lines:  parseSrvDiffLines(fd.Patch),
+		})
+	}
+
+	sigStatus := s.db.GetCommitSigStatus(id)
+
+	s.render(w, "srv_repo_commit.html", srvCommitData{
+		Repo:       rec.Name,
+		User:       s.db.currentUser(r),
+		HexID:      hexID,
+		ShortHex:   shortHex(id),
+		ChangeID:   c.ChangeID,
+		Author:     fmt.Sprintf("%s <%s>", c.Author.Name, c.Author.Email),
+		Committer:  fmt.Sprintf("%s <%s>", c.Committer.Name, c.Committer.Email),
+		Date:       c.Author.Timestamp.Format("2006-01-02 15:04:05"),
+		Phase:      phase.String(),
+		PhaseClass: phaseClass(phase),
+		SigStatus:  sigStatus,
+		Message:    c.Message,
+		Bookmarks:  bmMap[hexID],
+		Parents:    parents,
+		Diffs:      rendered,
+	})
+}
+
+func parseSrvDiffLines(patch string) []srvDiffLine {
+	var out []srvDiffLine
+	for _, line := range strings.Split(patch, "\n") {
+		var class string
+		switch {
+		case strings.HasPrefix(line, "+++"), strings.HasPrefix(line, "---"),
+			strings.HasPrefix(line, "diff "), strings.HasPrefix(line, "@@"):
+			class = "diff-hdr"
+		case strings.HasPrefix(line, "+"):
+			class = "diff-add"
+		case strings.HasPrefix(line, "-"):
+			class = "diff-del"
+		}
+		out = append(out, srvDiffLine{Class: class, Text: line})
+	}
+	return out
+}
+
+type srvTreeData struct {
+	Repo      string
+	User      *User
+	CommitHex string
+	ShortHex  string
+	TreePath  string
+	PathParts []srvPathPart
+	Entries   []srvTreeEntry
+}
+
+type srvPathPart struct {
+	Name string
+	Link string
+}
+
+type srvTreeEntry struct {
+	Name  string
+	IsDir bool
+	Mode  string
+	Link  string
+}
+
+func resolveDefaultCommit(r *repo.Repo) [32]byte {
+	bms, err := r.Store.ListBookmarks()
+	if err != nil || len(bms) == 0 {
+		return [32]byte{}
+	}
+	for _, name := range []string{"main", "master"} {
+		for _, bm := range bms {
+			if bm.Name == name {
+				return bm.CommitID
+			}
+		}
+	}
+	return bms[0].CommitID
+}
+
+func (s *forgeServer) handleRepoTree(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	idStr := r.URL.Query().Get("id")
+	treePath := strings.Trim(r.URL.Query().Get("path"), "/")
+
+	var commitID [32]byte
+	if idStr != "" {
+		raw, err := hex.DecodeString(idStr)
+		if err != nil || len(raw) != 32 {
+			http.Error(w, "invalid id", http.StatusBadRequest)
+			return
+		}
+		copy(commitID[:], raw)
+	} else {
+		commitID = resolveDefaultCommit(repoObj)
+		if commitID == ([32]byte{}) {
+			_, id, err := repoObj.HeadCommit()
+			if err != nil {
+				http.Error(w, "HEAD: "+err.Error(), http.StatusInternalServerError)
+				return
+			}
+			commitID = id
+		}
+	}
+
+	c, err := repoObj.ReadCommit(commitID)
+	if err != nil {
+		http.NotFound(w, r)
+		return
+	}
+
+	tree, err := repoObj.ReadTree(c.TreeID)
+	if err != nil {
+		http.Error(w, "tree: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	if treePath != "" {
+		for _, seg := range strings.Split(treePath, "/") {
+			var found *object.TreeEntry
+			for i := range tree.Entries {
+				if tree.Entries[i].Name == seg {
+					found = &tree.Entries[i]
+					break
+				}
+			}
+			if found == nil {
+				http.NotFound(w, r)
+				return
+			}
+			if found.Mode != object.ModeDir {
+				http.Redirect(w, r, fmt.Sprintf("/%s/file?id=%s&path=%s",
+					rec.Name, fullHex(commitID), treePath), http.StatusFound)
+				return
+			}
+			tree, err = repoObj.ReadTree(found.ObjectID)
+			if err != nil {
+				http.Error(w, "subtree: "+err.Error(), http.StatusInternalServerError)
+				return
+			}
+		}
+	}
+
+	commitHex := fullHex(commitID)
+	var parts []srvPathPart
+	if treePath != "" {
+		acc := ""
+		for _, seg := range strings.Split(treePath, "/") {
+			if acc != "" {
+				acc += "/"
+			}
+			acc += seg
+			parts = append(parts, srvPathPart{
+				Name: seg,
+				Link: fmt.Sprintf("/%s/tree?id=%s&path=%s", rec.Name, commitHex, acc),
+			})
+		}
+	}
+
+	var entries []srvTreeEntry
+	for _, e := range tree.Entries {
+		isDir := e.Mode == object.ModeDir
+		childPath := e.Name
+		if treePath != "" {
+			childPath = treePath + "/" + e.Name
+		}
+		var link string
+		if isDir {
+			link = fmt.Sprintf("/%s/tree?id=%s&path=%s", rec.Name, commitHex, childPath)
+		} else {
+			link = fmt.Sprintf("/%s/file?id=%s&path=%s", rec.Name, commitHex, childPath)
+		}
+		entries = append(entries, srvTreeEntry{
+			Name:  e.Name,
+			IsDir: isDir,
+			Mode:  modeStr(e.Mode),
+			Link:  link,
+		})
+	}
+
+	s.render(w, "srv_repo_tree.html", srvTreeData{
+		Repo:      rec.Name,
+		User:      s.db.currentUser(r),
+		CommitHex: commitHex,
+		ShortHex:  shortHex(commitID),
+		TreePath:  treePath,
+		PathParts: parts,
+		Entries:   entries,
+	})
+}
+
+func modeStr(m object.EntryMode) string {
+	switch m {
+	case object.ModeExec:
+		return "exec"
+	case object.ModeSymlink:
+		return "link"
+	default:
+		return "file"
+	}
+}
+
+type srvFileData struct {
+	Repo        string
+	User        *User
+	CommitHex   string
+	ShortHex    string
+	FilePath    string
+	Content     string
+	IsBinary    bool
+	Highlighted template.HTML
+}
+
+func highlightCode(filename, content string) template.HTML {
+	lexer := lexers.Match(filename)
+	if lexer == nil {
+		lexer = lexers.Analyse(content)
+	}
+	if lexer == nil {
+		lexer = lexers.Fallback
+	}
+	lexer = chroma.Coalesce(lexer)
+
+	style := styles.Get("github")
+	if style == nil {
+		style = styles.Fallback
+	}
+
+	fmt := chrhtml.New(
+		chrhtml.WithLineNumbers(true),
+		chrhtml.WithClasses(false),
+		chrhtml.TabWidth(4),
+	)
+
+	iterator, err := lexer.Tokenise(nil, content)
+	if err != nil {
+		return ""
+	}
+	var buf bytes.Buffer
+	if err := fmt.Format(&buf, style, iterator); err != nil {
+		return ""
+	}
+	return template.HTML(buf.String()) //nolint:gosec
+}
+
+func (s *forgeServer) handleRepoFile(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	idStr := r.URL.Query().Get("id")
+	filePath := strings.Trim(r.URL.Query().Get("path"), "/")
+
+	raw, err := hex.DecodeString(idStr)
+	if err != nil || len(raw) != 32 {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	var commitID [32]byte
+	copy(commitID[:], raw)
+
+	c, err := repoObj.ReadCommit(commitID)
+	if err != nil {
+		http.NotFound(w, r)
+		return
+	}
+
+	tree, err := repoObj.ReadTree(c.TreeID)
+	if err != nil {
+		http.Error(w, "tree: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	parts := strings.Split(filePath, "/")
+	for i, seg := range parts {
+		var found *object.TreeEntry
+		for j := range tree.Entries {
+			if tree.Entries[j].Name == seg {
+				found = &tree.Entries[j]
+				break
+			}
+		}
+		if found == nil {
+			http.NotFound(w, r)
+			return
+		}
+
+		if i == len(parts)-1 {
+			if found.Mode == object.ModeDir {
+				http.Error(w, "not a file", http.StatusBadRequest)
+				return
+			}
+			content, err := repoObj.ReadBlob(found.ObjectID)
+			if err != nil {
+				http.Error(w, "blob: "+err.Error(), http.StatusInternalServerError)
+				return
+			}
+			isBin := isBinaryContent(content)
+			var highlighted template.HTML
+			if !isBin && len(content) < 512*1024 {
+				highlighted = highlightCode(filePath, string(content))
+			}
+			s.render(w, "srv_repo_file.html", srvFileData{
+				Repo:        rec.Name,
+				User:        s.db.currentUser(r),
+				CommitHex:   fullHex(commitID),
+				ShortHex:    shortHex(commitID),
+				FilePath:    filePath,
+				Content:     string(content),
+				IsBinary:    isBin,
+				Highlighted: highlighted,
+			})
+			return
+		}
+
+		if found.Mode != object.ModeDir {
+			http.Error(w, "not a directory", http.StatusBadRequest)
+			return
+		}
+		tree, err = repoObj.ReadTree(found.ObjectID)
+		if err != nil {
+			http.Error(w, "subtree: "+err.Error(), http.StatusInternalServerError)
+			return
+		}
+	}
+	http.NotFound(w, r)
+}
+
+func isBinaryContent(data []byte) bool {
+	for _, b := range data {
+		if b == 0 {
+			return true
+		}
+	}
+	return false
+}

internal/archesrv/handlers_settings.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_settings.go
@@ -1,0 +1,462 @@
+package archesrv
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+)
+
+func (s *forgeServer) requireRepoAdmin(w http.ResponseWriter, r *http.Request) (*RepoRecord, *User, bool) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return nil, nil, false
+	}
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return nil, nil, false
+	}
+	if !user.IsAdmin && !s.db.hasRole(rec.ID, user.ID, "admin") {
+		http.Error(w, "forbidden", http.StatusForbidden)
+		return nil, nil, false
+	}
+	return rec, user, true
+}
+
+type srvWebhooksData struct {
+	Repo     string
+	User     *User
+	Webhooks []WebhookRecord
+	Error    string
+}
+
+func (s *forgeServer) handleRepoWebhooks(w http.ResponseWriter, r *http.Request) {
+	rec, user, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	hooks, err := s.db.ListWebhooks(rec.ID)
+	if err != nil {
+		http.Error(w, "list webhooks: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_repo_webhooks.html", srvWebhooksData{Repo: rec.Name, User: user, Webhooks: hooks})
+}
+
+func (s *forgeServer) handleRepoCreateWebhook(w http.ResponseWriter, r *http.Request) {
+	rec, user, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	hookURL := strings.TrimSpace(r.FormValue("url"))
+	secret := r.FormValue("secret")
+	events := r.FormValue("events")
+	if events == "" {
+		events = "push"
+	}
+	if hookURL == "" {
+		hooks, _ := s.db.ListWebhooks(rec.ID)
+		s.render(w, "srv_repo_webhooks.html", srvWebhooksData{Repo: rec.Name, User: user, Webhooks: hooks, Error: "URL required"})
+		return
+	}
+	if _, err := s.db.CreateWebhook(rec.ID, hookURL, secret, events); err != nil {
+		http.Error(w, "create webhook: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, "/"+rec.Name+"/settings/webhooks", http.StatusFound)
+}
+
+func (s *forgeServer) handleRepoDeleteWebhook(w http.ResponseWriter, r *http.Request) {
+	rec, _, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	var id int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &id); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	hook, err := s.db.GetWebhook(id)
+	if err != nil || hook == nil || hook.RepoID != rec.ID {
+		http.Error(w, "webhook not found", http.StatusNotFound)
+		return
+	}
+	if err := s.db.DeleteWebhook(id); err != nil {
+		http.Error(w, "delete webhook: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}
+
+type srvDeliveriesData struct {
+	Repo       string
+	User       *User
+	WebhookID  int64
+	WebhookURL string
+	Deliveries []WebhookDelivery
+}
+
+func (s *forgeServer) handleWebhookDeliveries(w http.ResponseWriter, r *http.Request) {
+	rec, user, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	var id int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &id); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	hook, err := s.db.GetWebhook(id)
+	if err != nil || hook == nil || hook.RepoID != rec.ID {
+		http.Error(w, "webhook not found", http.StatusNotFound)
+		return
+	}
+	deliveries, err := s.db.ListDeliveries(id)
+	if err != nil {
+		http.Error(w, "list deliveries: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_webhook_deliveries.html", srvDeliveriesData{
+		Repo:       rec.Name,
+		User:       user,
+		WebhookID:  id,
+		WebhookURL: hook.URL,
+		Deliveries: deliveries,
+	})
+}
+
+func (s *forgeServer) handleWebhookReplay(w http.ResponseWriter, r *http.Request) {
+	rec, _, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	var hookID, deliveryID int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &hookID); err != nil {
+		http.Error(w, "invalid webhook id", http.StatusBadRequest)
+		return
+	}
+	if _, err := fmt.Sscan(r.PathValue("delivery"), &deliveryID); err != nil {
+		http.Error(w, "invalid delivery id", http.StatusBadRequest)
+		return
+	}
+	hook, err := s.db.GetWebhook(hookID)
+	if err != nil || hook == nil || hook.RepoID != rec.ID {
+		http.Error(w, "webhook not found", http.StatusNotFound)
+		return
+	}
+	if err := s.db.ReplayDelivery(deliveryID); err != nil {
+		http.Error(w, "replay: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, "/"+rec.Name+"/settings/webhooks/"+r.PathValue("id")+"/deliveries", http.StatusFound)
+}
+
+type srvSettingsKeysData struct {
+	User  *User
+	Keys  []SSHKey
+	Error string
+}
+
+func (s *forgeServer) handleSettingsKeys(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	keys, err := s.db.ListSSHKeys(user.ID)
+	if err != nil {
+		http.Error(w, "list keys: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_settings_keys.html", srvSettingsKeysData{User: user, Keys: keys})
+}
+
+func (s *forgeServer) handleSettingsAddKey(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	label := strings.TrimSpace(r.FormValue("label"))
+	publicKey := strings.TrimSpace(r.FormValue("public_key"))
+	if publicKey == "" {
+		keys, _ := s.db.ListSSHKeys(user.ID)
+		s.render(w, "srv_settings_keys.html", srvSettingsKeysData{User: user, Keys: keys, Error: "public key required"})
+		return
+	}
+	if _, err := s.db.AddSSHKey(user.ID, label, publicKey); err != nil {
+		keys, _ := s.db.ListSSHKeys(user.ID)
+		s.render(w, "srv_settings_keys.html", srvSettingsKeysData{User: user, Keys: keys, Error: "invalid key: " + err.Error()})
+		return
+	}
+	http.Redirect(w, r, "/settings/keys", http.StatusFound)
+}
+
+func (s *forgeServer) handleSettingsDeleteKey(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	var id int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &id); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	if err := s.db.DeleteSSHKey(id, user.ID); err != nil {
+		http.Error(w, "delete key: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}
+
+type srvSettingsMTLSData struct {
+	User  *User
+	Certs []MTLSCert
+	Error string
+}
+
+func (s *forgeServer) handleSettingsMTLSCerts(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	certs, err := s.db.ListMTLSCerts(user.ID)
+	if err != nil {
+		http.Error(w, "list certs: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_settings_mtls.html", srvSettingsMTLSData{User: user, Certs: certs})
+}
+
+func (s *forgeServer) handleSettingsAddMTLSCert(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	label := strings.TrimSpace(r.FormValue("label"))
+	certPEM := strings.TrimSpace(r.FormValue("cert_pem"))
+	if certPEM == "" {
+		certs, _ := s.db.ListMTLSCerts(user.ID)
+		s.render(w, "srv_settings_mtls.html", srvSettingsMTLSData{User: user, Certs: certs, Error: "certificate PEM required"})
+		return
+	}
+	if _, err := s.db.AddMTLSCert(user.ID, label, certPEM); err != nil {
+		certs, _ := s.db.ListMTLSCerts(user.ID)
+		s.render(w, "srv_settings_mtls.html", srvSettingsMTLSData{User: user, Certs: certs, Error: "invalid cert: " + err.Error()})
+		return
+	}
+	http.Redirect(w, r, "/settings/mtls", http.StatusFound)
+}
+
+func (s *forgeServer) handleSettingsDeleteMTLSCert(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	var id int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &id); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	if err := s.db.DeleteMTLSCert(id, user.ID); err != nil {
+		http.Error(w, "delete cert: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}
+
+type srvSettingsTokenData struct {
+	User     *User
+	Tokens   []APIToken
+	NewToken string
+	Error    string
+}
+
+func (s *forgeServer) handleSettingsToken(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	tokens, err := s.db.ListAPITokens(user.ID)
+	if err != nil {
+		http.Error(w, "list tokens: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	newTok := r.URL.Query().Get("new")
+	s.render(w, "srv_settings_token.html", srvSettingsTokenData{User: user, Tokens: tokens, NewToken: newTok})
+}
+
+func (s *forgeServer) handleSettingsCreateToken(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Redirect(w, r, "/login", http.StatusFound)
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	label := strings.TrimSpace(r.FormValue("label"))
+	tok, err := s.db.CreateAPIToken(user.ID, label)
+	if err != nil {
+		tokens, _ := s.db.ListAPITokens(user.ID)
+		s.render(w, "srv_settings_token.html", srvSettingsTokenData{User: user, Tokens: tokens, Error: "create token: " + err.Error()})
+		return
+	}
+	http.Redirect(w, r, "/settings/token?new="+tok, http.StatusFound)
+}
+
+func (s *forgeServer) handleSettingsDeleteToken(w http.ResponseWriter, r *http.Request) {
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	var id int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &id); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	if err := s.db.DeleteAPIToken(id, user.ID); err != nil {
+		http.Error(w, "delete token: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}
+
+type srvRepoSettingsData struct {
+	Repo          string
+	User          *User
+	Description   string
+	Visibility    string
+	Collaborators []CollabEntry
+	Error         string
+}
+
+func (s *forgeServer) handleRepoSettingsPage(w http.ResponseWriter, r *http.Request) {
+	rec, user, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	collabs, err := s.db.ListCollaborators(rec.ID)
+	if err != nil {
+		http.Error(w, "list collaborators: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_repo_settings.html", srvRepoSettingsData{
+		Repo:          rec.Name,
+		User:          user,
+		Description:   rec.Description,
+		Visibility:    rec.Visibility,
+		Collaborators: collabs,
+	})
+}
+
+func (s *forgeServer) handleRepoUpdateSettings(w http.ResponseWriter, r *http.Request) {
+	rec, user, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	description := strings.TrimSpace(r.FormValue("description"))
+	visibility := r.FormValue("visibility")
+
+	if err := s.db.UpdateRepo(rec.Name, description, visibility); err != nil {
+		collabs, _ := s.db.ListCollaborators(rec.ID)
+		s.render(w, "srv_repo_settings.html", srvRepoSettingsData{
+			Repo:          rec.Name,
+			User:          user,
+			Description:   description,
+			Visibility:    visibility,
+			Collaborators: collabs,
+			Error:         "update failed: " + err.Error(),
+		})
+		return
+	}
+	http.Redirect(w, r, "/"+rec.Name+"/settings", http.StatusFound)
+}
+
+func (s *forgeServer) handleRepoAddCollaborator(w http.ResponseWriter, r *http.Request) {
+	rec, user, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	r.ParseForm() //nolint:errcheck
+	username := strings.TrimSpace(r.FormValue("username"))
+	role := r.FormValue("role")
+	if role != "read" && role != "write" && role != "admin" {
+		role = "read"
+	}
+
+	renderErr := func(msg string) {
+		collabs, _ := s.db.ListCollaborators(rec.ID)
+		s.render(w, "srv_repo_settings.html", srvRepoSettingsData{
+			Repo:          rec.Name,
+			User:          user,
+			Description:   rec.Description,
+			Visibility:    rec.Visibility,
+			Collaborators: collabs,
+			Error:         msg,
+		})
+	}
+
+	if username == "" {
+		renderErr("username required")
+		return
+	}
+	target, _, err := s.db.GetUserByName(username)
+	if err != nil || target == nil {
+		renderErr("user not found: " + username)
+		return
+	}
+	if err := s.db.SetPermission(rec.ID, target.ID, role); err != nil {
+		renderErr("set permission: " + err.Error())
+		return
+	}
+
+	if role == "write" || role == "admin" {
+		allowed, _, _ := s.db.GetRepoHookConfig(rec.ID)
+		if allowed {
+			_ = s.db.SetRepoAllowShellHooks(rec.ID, false, "")
+			s.log.Warn("allow_shell_hooks revoked", "repo", rec.Name, "collaborator", username, "role", role)
+		}
+	}
+
+	http.Redirect(w, r, "/"+rec.Name+"/settings", http.StatusFound)
+}
+
+func (s *forgeServer) handleRepoRemoveCollaborator(w http.ResponseWriter, r *http.Request) {
+	rec, _, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	var userID int64
+	if _, err := fmt.Sscan(r.PathValue("id"), &userID); err != nil {
+		http.Error(w, "invalid id", http.StatusBadRequest)
+		return
+	}
+	if err := s.db.RemovePermission(rec.ID, userID); err != nil {
+		http.Error(w, "remove permission: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}
+
+func (s *forgeServer) handleRepoDeleteRepo(w http.ResponseWriter, r *http.Request) {
+	rec, _, ok := s.requireRepoAdmin(w, r)
+	if !ok {
+		return
+	}
+	if err := s.db.DeleteRepo(rec.Name); err != nil {
+		http.Error(w, "delete repo: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusNoContent)
+}

internal/archesrv/handlers_stack.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_stack.go
@@ -1,0 +1,347 @@
+package archesrv
+
+import (
+	"encoding/hex"
+	"fmt"
+	"net/http"
+	"sort"
+	"strings"
+	"time"
+
+	"arche/internal/diff"
+	"arche/internal/object"
+	"arche/internal/store"
+)
+
+type stackEntry struct {
+	ChangeID   string
+	HexID      string
+	ShortHex   string
+	Author     string
+	Date       string
+	Phase      string
+	PhaseClass string
+	Message    string
+	DiffStats  string
+	SigStatus  string
+	Review     string
+}
+
+type srvStackData struct {
+	Repo    string
+	User    *User
+	StackID string
+	Entries []stackEntry
+}
+
+func (s *forgeServer) handleRepoStacks(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	bms, err := repoObj.Store.ListBookmarks()
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	type stackBM struct {
+		bm store.Bookmark
+		c  *object.Commit
+	}
+	var stackBMs []stackBM
+	for _, bm := range bms {
+		if !strings.HasPrefix(bm.Name, "stack/") {
+			continue
+		}
+		c, err := repoObj.ReadCommit(bm.CommitID)
+		if err != nil {
+			continue
+		}
+		stackBMs = append(stackBMs, stackBM{bm, c})
+	}
+
+	commitToStackBM := make(map[[32]byte]stackBM)
+	for _, sb := range stackBMs {
+		commitToStackBM[sb.bm.CommitID] = sb
+	}
+
+	type stackGroup struct {
+		root  stackBM
+		chain []stackBM
+	}
+
+	var groups []stackGroup
+	visited := make(map[[32]byte]bool)
+
+	for _, sb := range stackBMs {
+		if visited[sb.bm.CommitID] {
+			continue
+		}
+		cur := sb
+		for {
+			if len(cur.c.Parents) == 0 {
+				break
+			}
+			parent := cur.c.Parents[0]
+			if psb, ok := commitToStackBM[parent]; ok {
+				cur = psb
+			} else {
+				break
+			}
+		}
+		if visited[cur.bm.CommitID] {
+			continue
+		}
+		var chain []stackBM
+		walk := cur
+		for {
+			visited[walk.bm.CommitID] = true
+			chain = append(chain, walk)
+			found := false
+			for _, sb2 := range stackBMs {
+				if len(sb2.c.Parents) > 0 && sb2.c.Parents[0] == walk.bm.CommitID && !visited[sb2.bm.CommitID] {
+					walk = sb2
+					found = true
+					break
+				}
+			}
+			if !found {
+				break
+			}
+		}
+		groups = append(groups, stackGroup{root: cur, chain: chain})
+	}
+
+	reviews, _ := s.db.ListStackReviews(rec.ID)
+
+	type srvStackSummary struct {
+		StackID string
+		Root    string
+		Depth   int
+		Reviews map[string]string
+	}
+	var summaries []srvStackSummary
+	for _, g := range groups {
+		root := "ch:" + strings.TrimPrefix(g.root.bm.Name, "stack/ch-")
+		stackID := strings.TrimPrefix(g.root.bm.Name, "stack/")
+		smap := make(map[string]string, len(g.chain))
+		for _, e := range g.chain {
+			cid := e.c.ChangeID
+			if st, ok := reviews[cid]; ok {
+				smap[cid] = st
+			} else {
+				smap[cid] = "open"
+			}
+		}
+		summaries = append(summaries, srvStackSummary{
+			StackID: stackID,
+			Root:    root,
+			Depth:   len(g.chain),
+			Reviews: smap,
+		})
+	}
+
+	type listData struct {
+		Repo   string
+		User   *User
+		Stacks []srvStackSummary
+	}
+	sort.Slice(summaries, func(i, j int) bool { return summaries[i].StackID < summaries[j].StackID })
+	s.render(w, "srv_repo_stacks.html", listData{
+		Repo:   rec.Name,
+		User:   s.db.currentUser(r),
+		Stacks: summaries,
+	})
+}
+
+func (s *forgeServer) handleRepoStackDetail(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	stackID := r.PathValue("stackid")
+	bmName := "stack/" + stackID
+
+	startBM, err := repoObj.Store.GetBookmark(bmName)
+	if err != nil || startBM == nil {
+		http.NotFound(w, r)
+		return
+	}
+
+	bms, _ := repoObj.Store.ListBookmarks()
+	commitToStackBM := make(map[[32]byte]store.Bookmark)
+	for _, bm := range bms {
+		if strings.HasPrefix(bm.Name, "stack/") {
+			commitToStackBM[bm.CommitID] = bm
+		}
+	}
+
+	type walkEntry struct {
+		bm store.Bookmark
+		c  *object.Commit
+	}
+	var chain []walkEntry
+
+	cur := *startBM
+	curC, err := repoObj.ReadCommit(cur.CommitID)
+	if err != nil {
+		http.Error(w, "read commit: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	var upChain []walkEntry
+	for {
+		upChain = append(upChain, walkEntry{cur, curC})
+		if len(curC.Parents) == 0 {
+			break
+		}
+		if pbm, ok := commitToStackBM[curC.Parents[0]]; ok {
+			pc, err := repoObj.ReadCommit(pbm.CommitID)
+			if err != nil {
+				break
+			}
+			cur = pbm
+			curC = pc
+		} else {
+			break
+		}
+	}
+	for i, j := 0, len(upChain)-1; i < j; i, j = i+1, j-1 {
+		upChain[i], upChain[j] = upChain[j], upChain[i]
+	}
+	chain = append(chain, upChain...)
+
+	visited := make(map[[32]byte]bool)
+	for _, e := range chain {
+		visited[e.bm.CommitID] = true
+	}
+	walk := stackID
+
+	_ = walk
+	for {
+		last := chain[len(chain)-1]
+		found := false
+		for _, bm := range bms {
+			if !strings.HasPrefix(bm.Name, "stack/") || visited[bm.CommitID] {
+				continue
+			}
+			c, err := repoObj.ReadCommit(bm.CommitID)
+			if err != nil {
+				continue
+			}
+			if len(c.Parents) > 0 && c.Parents[0] == last.bm.CommitID {
+				chain = append(chain, walkEntry{bm, c})
+				visited[bm.CommitID] = true
+				found = true
+				break
+			}
+		}
+		if !found {
+			break
+		}
+	}
+
+	reviews, _ := s.db.ListStackReviews(rec.ID)
+	bkMap := bookmarkMap(repoObj)
+
+	var entries []stackEntry
+	for _, e := range chain {
+		commitHex := hex.EncodeToString(e.bm.CommitID[:])
+
+		var diffStats string
+		if len(e.c.Parents) > 0 {
+			parent, err2 := repoObj.ReadCommit(e.c.Parents[0])
+			if err2 == nil {
+				fdiffs, err3 := diff.TreeDiff(repoObj, parent.TreeID, e.c.TreeID)
+				if err3 == nil {
+					added, removed := 0, 0
+					for _, fd := range fdiffs {
+						for _, line := range strings.Split(fd.Patch, "\n") {
+							if strings.HasPrefix(line, "+") && !strings.HasPrefix(line, "+++") {
+								added++
+							} else if strings.HasPrefix(line, "-") && !strings.HasPrefix(line, "---") {
+								removed++
+							}
+						}
+					}
+					diffStats = fmt.Sprintf("+%d -%d across %d file(s)", added, removed, len(fdiffs))
+				}
+			}
+		}
+
+		_ = bkMap
+
+		reviewStatus, ok := reviews[e.c.ChangeID]
+		if !ok {
+			reviewStatus = "open"
+		}
+
+		phase, _ := repoObj.Store.GetPhase(e.bm.CommitID)
+		phaseName := phase.String()
+		phaseClass := strings.ToLower(phaseName)
+
+		sigStatus := s.db.GetCommitSigStatus(e.bm.CommitID)
+
+		entries = append(entries, stackEntry{
+			ChangeID:   object.FormatChangeID(e.c.ChangeID),
+			HexID:      commitHex,
+			ShortHex:   commitHex[:8],
+			Author:     e.c.Author.Name,
+			Date:       e.c.Author.Timestamp.UTC().Format(time.DateTime),
+			Phase:      phaseName,
+			PhaseClass: phaseClass,
+			Message:    e.c.Message,
+			DiffStats:  diffStats,
+			SigStatus:  sigStatus,
+			Review:     reviewStatus,
+		})
+	}
+
+	s.render(w, "srv_repo_stack.html", srvStackData{
+		Repo:    rec.Name,
+		User:    s.db.currentUser(r),
+		StackID: stackID,
+		Entries: entries,
+	})
+}
+
+func (s *forgeServer) handleStackSetReview(w http.ResponseWriter, r *http.Request) {
+	repoObj, rec, ok := s.requireRepoAccess(w, r)
+	if !ok {
+		return
+	}
+	defer repoObj.Close()
+
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+
+	stackID := r.PathValue("stackid")
+	changeID := r.PathValue("changeid")
+
+	if err := r.ParseForm(); err != nil {
+		http.Error(w, "bad form", http.StatusBadRequest)
+		return
+	}
+	status := r.FormValue("status")
+	switch status {
+	case "open", "reviewing", "approved", "needs-revision":
+	default:
+		http.Error(w, "invalid status", http.StatusBadRequest)
+		return
+	}
+
+	if err := s.db.SetStackReview(rec.ID, changeID, status, user.ID); err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	http.Redirect(w, r, "/"+rec.Name+"/stacks/"+stackID, http.StatusSeeOther)
+}

internal/archesrv/handlers_wiki.go [A]
--- /dev/null
+++ b/internal/archesrv/handlers_wiki.go
@@ -1,0 +1,141 @@
+package archesrv
+
+import (
+	"html/template"
+	"net/http"
+	"strings"
+
+	"arche/internal/markdown"
+	"arche/internal/wiki"
+)
+
+type srvWikiListData struct {
+	Repo  string
+	User  *User
+	Pages []wiki.Page
+}
+
+type srvWikiPageData struct {
+	Repo            string
+	User            *User
+	Title           string
+	RawContent      string
+	RenderedContent template.HTML
+	Editing         bool
+}
+
+func (s *forgeServer) handleRepoWikiList(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+	idb, err := s.openIssueDB(repoName)
+	if err != nil {
+		http.Error(w, "open wikidb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer idb.Close()
+	pages, err := idb.Wiki.List()
+	if err != nil {
+		http.Error(w, "list pages: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	s.render(w, "srv_repo_wiki_list.html", srvWikiListData{Repo: repoName, User: user, Pages: pages})
+}
+
+func (s *forgeServer) handleRepoWikiPage(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	title := r.PathValue("title")
+	if title == "" {
+		title = "Home"
+	}
+
+	idb, err := s.openIssueDB(repoName)
+	if err != nil {
+		http.Error(w, "open wikidb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer idb.Close()
+
+	editing := r.URL.Query().Get("edit") == "1" && user != nil
+
+	page, err := idb.Wiki.Get(title)
+	if err != nil {
+		s.render(w, "srv_repo_wiki_page.html", srvWikiPageData{
+			Repo:    repoName,
+			User:    user,
+			Title:   title,
+			Editing: user != nil,
+		})
+		return
+	}
+
+	data := srvWikiPageData{
+		Repo:       repoName,
+		User:       user,
+		Title:      title,
+		RawContent: page.Content,
+		Editing:    editing,
+	}
+	if !editing && page.Content != "" {
+		data.RenderedContent = markdown.Render(page.Content)
+	}
+	s.render(w, "srv_repo_wiki_page.html", data)
+}
+
+func (s *forgeServer) handleRepoWikiSave(w http.ResponseWriter, r *http.Request) {
+	repoName := r.PathValue("repo")
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		http.NotFound(w, r)
+		return
+	}
+	user := s.db.currentUser(r)
+	if user == nil {
+		http.Error(w, "login required", http.StatusUnauthorized)
+		return
+	}
+	if !s.db.CanRead(rec, user) {
+		http.Error(w, "Unauthorized", http.StatusUnauthorized)
+		return
+	}
+
+	title := r.PathValue("title")
+	if title == "" {
+		title = "Home"
+	}
+
+	r.ParseForm() //nolint:errcheck
+	content := strings.TrimRight(r.FormValue("content"), "\r\n")
+
+	idb, err := s.openIssueDB(repoName)
+	if err != nil {
+		http.Error(w, "open wikidb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer idb.Close()
+
+	if err := idb.Wiki.Set(title, content, user.Username); err != nil {
+		http.Error(w, "save page: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	http.Redirect(w, r, "/"+repoName+"/wiki/"+title, http.StatusFound)
+}

internal/archesrv/invites.go [A]
--- /dev/null
+++ b/internal/archesrv/invites.go
@@ -1,0 +1,112 @@
+package archesrv
+
+import (
+	"database/sql"
+	"time"
+)
+
+type InviteToken struct {
+	ID        int64
+	Token     string
+	CreatedBy int64
+	UsedBy    *int64
+	CreatedAt time.Time
+	UsedAt    *time.Time
+}
+
+func (d *DB) CreateInvite(createdBy int64) (*InviteToken, error) {
+	tok, err := generateToken()
+	if err != nil {
+		return nil, err
+	}
+	res, err := d.db.Exec(
+		"INSERT INTO invite_tokens(token,created_by,created_at) VALUES(?,?,?)",
+		tok, createdBy, time.Now().Unix(),
+	)
+	if err != nil {
+		return nil, err
+	}
+	id, _ := res.LastInsertId()
+	return &InviteToken{ID: id, Token: tok, CreatedBy: createdBy, CreatedAt: time.Now()}, nil
+}
+
+func (d *DB) GetInvite(token string) (*InviteToken, error) {
+	var inv InviteToken
+	var usedBy sql.NullInt64
+	var usedAt sql.NullInt64
+	var createdAt int64
+	err := d.db.QueryRow(
+		"SELECT id, token, created_by, used_by, created_at, used_at FROM invite_tokens WHERE token=?",
+		token,
+	).Scan(&inv.ID, &inv.Token, &inv.CreatedBy, &usedBy, &createdAt, &usedAt)
+	if err == sql.ErrNoRows {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	inv.CreatedAt = time.Unix(createdAt, 0)
+	if usedBy.Valid {
+		v := usedBy.Int64
+		inv.UsedBy = &v
+	}
+	if usedAt.Valid {
+		t := time.Unix(usedAt.Int64, 0)
+		inv.UsedAt = &t
+	}
+	return &inv, nil
+}
+
+func (d *DB) UseInvite(token string, userID int64) error {
+	now := time.Now().Unix()
+	res, err := d.db.Exec(
+		"UPDATE invite_tokens SET used_by=?, used_at=? WHERE token=? AND used_by IS NULL",
+		userID, now, token,
+	)
+	if err != nil {
+		return err
+	}
+	n, _ := res.RowsAffected()
+	if n == 0 {
+		return sql.ErrNoRows
+	}
+	return nil
+}
+
+func (d *DB) ListInvites(createdBy int64) ([]InviteToken, error) {
+	rows, err := d.db.Query(
+		`SELECT id, token, created_by, used_by, created_at, used_at
+		 FROM invite_tokens WHERE created_by=? ORDER BY id DESC`,
+		createdBy,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []InviteToken
+	for rows.Next() {
+		var inv InviteToken
+		var usedBy sql.NullInt64
+		var usedAt sql.NullInt64
+		var createdAt int64
+		if err := rows.Scan(&inv.ID, &inv.Token, &inv.CreatedBy, &usedBy, &createdAt, &usedAt); err != nil {
+			return nil, err
+		}
+		inv.CreatedAt = time.Unix(createdAt, 0)
+		if usedBy.Valid {
+			v := usedBy.Int64
+			inv.UsedBy = &v
+		}
+		if usedAt.Valid {
+			t := time.Unix(usedAt.Int64, 0)
+			inv.UsedAt = &t
+		}
+		out = append(out, inv)
+	}
+	return out, rows.Err()
+}
+
+func (d *DB) DeleteInvite(id, adminID int64) error {
+	_, err := d.db.Exec("DELETE FROM invite_tokens WHERE id=? AND used_by IS NULL", id)
+	return err
+}

internal/archesrv/issues_test.go [A]
--- /dev/null
+++ b/internal/archesrv/issues_test.go
@@ -1,0 +1,207 @@
+package archesrv
+
+import (
+	"io"
+	"net/http"
+	"strings"
+	"testing"
+)
+
+func TestForgeServer_Issues_CreateAndList(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/issues", map[string][]string{
+		"title": {"First issue"},
+		"body":  {"some body text"},
+	})
+	if err != nil {
+		t.Fatalf("POST /myrepo/issues: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Fatalf("create issue: got %d", resp.StatusCode)
+	}
+
+	resp2, err := client.Get(ts.URL + "/myrepo/issues")
+	if err != nil {
+		t.Fatalf("GET /myrepo/issues: %v", err)
+	}
+	defer resp2.Body.Close()
+	if resp2.StatusCode != http.StatusOK {
+		t.Errorf("GET /myrepo/issues: want 200, got %d", resp2.StatusCode)
+	}
+	body, _ := io.ReadAll(resp2.Body)
+	if !strings.Contains(string(body), "First issue") {
+		t.Error("issue list page should contain the issue title")
+	}
+}
+
+func TestForgeServer_Issues_UnauthenticatedCannotCreate(t *testing.T) {
+	s, ts := newTestServer(t)
+	setupRepoWithDisk(t, s, "myrepo", "public")
+
+	resp, err := http.PostForm(ts.URL+"/myrepo/issues", map[string][]string{
+		"title": {"sneaky issue"},
+	})
+	if err != nil {
+		t.Fatalf("POST /myrepo/issues: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusUnauthorized {
+		t.Errorf("expected 401, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Issues_MissingTitleIsError(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/issues", map[string][]string{
+		"title": {""},
+	})
+	if err != nil {
+		t.Fatalf("POST: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusBadRequest {
+		t.Errorf("empty title should return 400, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Issues_AddComment(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, _ := client.PostForm(ts.URL+"/myrepo/issues", map[string][]string{
+		"title": {"Bug report"},
+		"body":  {"details"},
+	})
+	resp.Body.Close()
+
+	issueID := extractIssueIDFromList(t, client, ts.URL+"/myrepo/issues")
+
+	resp2, err := client.PostForm(ts.URL+"/myrepo/issue/comment", map[string][]string{
+		"issue_id": {issueID},
+		"text":     {"looks good"},
+	})
+	if err != nil {
+		t.Fatalf("POST comment: %v", err)
+	}
+	resp2.Body.Close()
+	if resp2.StatusCode >= 400 {
+		t.Errorf("add comment: got %d", resp2.StatusCode)
+	}
+}
+
+func TestForgeServer_Issues_SetStatus(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	client.PostForm(ts.URL+"/myrepo/issues", map[string][]string{"title": {"Close me"}}) //nolint:errcheck
+
+	issueID := extractIssueIDFromList(t, client, ts.URL+"/myrepo/issues")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/issue/status", map[string][]string{
+		"issue_id": {issueID},
+		"status":   {"closed"},
+	})
+	if err != nil {
+		t.Fatalf("POST status: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("set status: got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Issues_ViewPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/issues", map[string][]string{
+		"title": {"View me"},
+		"body":  {"body of the issue"},
+	})
+	if err != nil {
+		t.Fatalf("POST issue: %v", err)
+	}
+	resp.Body.Close()
+
+	loc := resp.Header.Get("Location")
+	if loc == "" {
+		t.Skip("no Location header on issue create; skipping view test")
+	}
+
+	resp2, err := client.Get(ts.URL + loc)
+	if err != nil {
+		t.Fatalf("GET issue page: %v", err)
+	}
+	defer resp2.Body.Close()
+	if resp2.StatusCode != http.StatusOK {
+		t.Errorf("issue view: want 200, got %d", resp2.StatusCode)
+	}
+	body, _ := io.ReadAll(resp2.Body)
+	if !strings.Contains(string(body), "View me") {
+		t.Error("issue page should show the issue title")
+	}
+	if !strings.Contains(string(body), "body of the issue") {
+		t.Error("issue page should show the issue body")
+	}
+}
+
+func TestForgeServer_Issues_ViewMissingIssue404(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.Get(ts.URL + "/myrepo/issue?id=nonexistentissue")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusNotFound {
+		t.Errorf("missing issue: want 404, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Issues_ViewOnMissingRepo404(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "adminpass", true) //nolint:errcheck
+	client := loginAs(t, ts, "admin", "adminpass")
+
+	resp, err := client.Get(ts.URL + "/ghost/issues")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusNotFound {
+		t.Errorf("missing repo: want 404, got %d", resp.StatusCode)
+	}
+}
+
+func extractIssueIDFromList(t *testing.T, client *http.Client, listURL string) string {
+	t.Helper()
+	resp, err := client.Get(listURL)
+	if err != nil {
+		t.Fatalf("GET issue list: %v", err)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	resp.Body.Close()
+	page := string(body)
+
+	idx := strings.Index(page, "?id=")
+	if idx == -1 {
+		t.Skip("could not find issue ID in listing; skipping")
+	}
+	end := strings.IndexAny(page[idx+4:], `"& `)
+	if end == -1 {
+		end = 64
+	}
+	return page[idx+4 : idx+4+end]
+}

internal/archesrv/mtls.go [A]
--- /dev/null
+++ b/internal/archesrv/mtls.go
@@ -1,0 +1,69 @@
+package archesrv
+
+import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/tls"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"fmt"
+	"math/big"
+	"net/http"
+	"time"
+)
+
+func (s *forgeServer) RunMTLS(addr, certFile, keyFile string) error {
+	tlsCfg := tlsConfigMTLS()
+
+	if certFile != "" && keyFile != "" {
+		cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+		if err != nil {
+			return fmt.Errorf("mTLS: load server cert: %w", err)
+		}
+		tlsCfg.Certificates = []tls.Certificate{cert}
+	} else {
+		cert, err := generateEphemeralCert()
+		if err != nil {
+			return fmt.Errorf("mTLS: generate self-signed cert: %w", err)
+		}
+		tlsCfg.Certificates = []tls.Certificate{cert}
+		s.log.Warn("mTLS using ephemeral self-signed cert", "hint", "set tls_cert/tls_key for a stable cert")
+	}
+
+	ln, err := tls.Listen("tcp", addr, tlsCfg)
+	if err != nil {
+		return fmt.Errorf("mTLS listen %s: %w", addr, err)
+	}
+	s.log.Info("mTLS listening", "addr", addr)
+
+	srv := &http.Server{Handler: s.routes()}
+	return srv.Serve(ln)
+}
+
+func generateEphemeralCert() (tls.Certificate, error) {
+	priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+	if err != nil {
+		return tls.Certificate{}, err
+	}
+	tmpl := &x509.Certificate{
+		SerialNumber: big.NewInt(1),
+		Subject:      pkix.Name{CommonName: "arche-server"},
+		NotBefore:    time.Now().Add(-time.Minute),
+		NotAfter:     time.Now().Add(10 * 365 * 24 * time.Hour),
+		KeyUsage:     x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:  []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+	}
+	certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &priv.PublicKey, priv)
+	if err != nil {
+		return tls.Certificate{}, err
+	}
+	privDER, err := x509.MarshalECPrivateKey(priv)
+	if err != nil {
+		return tls.Certificate{}, err
+	}
+	certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
+	keyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: privDER})
+	return tls.X509KeyPair(certPEM, keyPEM)
+}

internal/archesrv/mtlscerts.go [A]
--- /dev/null
+++ b/internal/archesrv/mtlscerts.go
@@ -1,0 +1,105 @@
+package archesrv
+
+import (
+	"crypto/sha256"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"time"
+)
+
+type MTLSCert struct {
+	ID          int64
+	UserID      int64
+	Label       string
+	Fingerprint string
+	CertPEM     string
+	AddedAt     time.Time
+}
+
+func certFingerprint(cert *x509.Certificate) string {
+	sum := sha256.Sum256(cert.Raw)
+	return fmt.Sprintf("%x", sum)
+}
+
+func certFingerprintFromPEM(certPEM string) (string, *x509.Certificate, error) {
+	block, _ := pem.Decode([]byte(certPEM))
+	if block == nil || block.Type != "CERTIFICATE" {
+		return "", nil, fmt.Errorf("invalid certificate PEM (expected CERTIFICATE block)")
+	}
+	cert, err := x509.ParseCertificate(block.Bytes)
+	if err != nil {
+		return "", nil, fmt.Errorf("parse certificate: %w", err)
+	}
+	return certFingerprint(cert), cert, nil
+}
+
+func (d *DB) AddMTLSCert(userID int64, label, certPEM string) (*MTLSCert, error) {
+	fp, _, err := certFingerprintFromPEM(certPEM)
+	if err != nil {
+		return nil, err
+	}
+	now := time.Now().Unix()
+	res, err := d.db.Exec(
+		"INSERT INTO mtls_certs(user_id,label,fingerprint,cert_pem,added_at) VALUES(?,?,?,?,?)",
+		userID, label, fp, certPEM, now,
+	)
+	if err != nil {
+		return nil, fmt.Errorf("add mTLS cert: %w", err)
+	}
+	id, _ := res.LastInsertId()
+	return &MTLSCert{
+		ID:          id,
+		UserID:      userID,
+		Label:       label,
+		Fingerprint: fp,
+		CertPEM:     certPEM,
+		AddedAt:     time.Unix(now, 0),
+	}, nil
+}
+
+func (d *DB) ListMTLSCerts(userID int64) ([]MTLSCert, error) {
+	rows, err := d.db.Query(
+		"SELECT id, user_id, label, fingerprint, cert_pem, added_at FROM mtls_certs WHERE user_id=? ORDER BY id",
+		userID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []MTLSCert
+	for rows.Next() {
+		var c MTLSCert
+		var ts int64
+		if err := rows.Scan(&c.ID, &c.UserID, &c.Label, &c.Fingerprint, &c.CertPEM, &ts); err != nil {
+			return nil, err
+		}
+		c.AddedAt = time.Unix(ts, 0)
+		out = append(out, c)
+	}
+	return out, rows.Err()
+}
+
+func (d *DB) DeleteMTLSCert(certID, userID int64) error {
+	_, err := d.db.Exec("DELETE FROM mtls_certs WHERE id=? AND user_id=?", certID, userID)
+	return err
+}
+
+func (d *DB) AuthorizeMTLSCert(fingerprint string) (*User, error) {
+	var userID int64
+	err := d.db.QueryRow(
+		"SELECT user_id FROM mtls_certs WHERE fingerprint=?", fingerprint,
+	).Scan(&userID)
+	if err != nil {
+		return nil, nil //nolint:nilerr
+	}
+	return d.GetUserByID(userID)
+}
+
+func tlsConfigMTLS() *tls.Config {
+	return &tls.Config{
+		ClientAuth: tls.RequireAnyClientCert,
+		MinVersion: tls.VersionTLS12,
+	}
+}

internal/archesrv/pure_test.go [A]
--- /dev/null
+++ b/internal/archesrv/pure_test.go
@@ -1,0 +1,114 @@
+package archesrv
+
+import (
+	"testing"
+
+	"arche/internal/object"
+)
+
+func TestParseSrvDiffLines_AddRemoveContext(t *testing.T) {
+	patch := `+added line
+-removed line
+ context line`
+
+	lines := parseSrvDiffLines(patch)
+	if len(lines) != 3 {
+		t.Fatalf("want 3 lines, got %d", len(lines))
+	}
+	if lines[0].Class != "diff-add" {
+		t.Errorf("line 0 class: want diff-add, got %q", lines[0].Class)
+	}
+	if lines[1].Class != "diff-del" {
+		t.Errorf("line 1 class: want diff-del, got %q", lines[1].Class)
+	}
+	if lines[2].Class != "" {
+		t.Errorf("line 2 class: want empty, got %q", lines[2].Class)
+	}
+}
+
+func TestParseSrvDiffLines_Headers(t *testing.T) {
+	cases := []struct {
+		line      string
+		wantClass string
+	}{
+		{"--- a/foo.go", "diff-hdr"},
+		{"+++ b/foo.go", "diff-hdr"},
+		{"@@ -1,5 +1,6 @@", "diff-hdr"},
+		{"diff --git a/foo.go b/foo.go", "diff-hdr"},
+	}
+	for _, tc := range cases {
+		lines := parseSrvDiffLines(tc.line)
+		if len(lines) == 0 {
+			t.Fatalf("no lines parsed for %q", tc.line)
+		}
+		if lines[0].Class != tc.wantClass {
+			t.Errorf("line %q: want class %q, got %q", tc.line, tc.wantClass, lines[0].Class)
+		}
+	}
+}
+
+func TestParseSrvDiffLines_TextPreserved(t *testing.T) {
+	patch := "+hello world"
+	lines := parseSrvDiffLines(patch)
+	if len(lines) == 0 {
+		t.Fatal("expected at least one line")
+	}
+	if lines[0].Text != patch {
+		t.Errorf("text: want %q, got %q", patch, lines[0].Text)
+	}
+}
+
+func TestParseSrvDiffLines_EmptyInput(t *testing.T) {
+	lines := parseSrvDiffLines("")
+	if len(lines) != 1 {
+		t.Errorf("empty input: want 1 (empty) line, got %d", len(lines))
+	}
+	if lines[0].Text != "" || lines[0].Class != "" {
+		t.Errorf("unexpected non-empty line for empty input: %+v", lines[0])
+	}
+}
+
+func TestIsBinaryContent_NullByte(t *testing.T) {
+	if !isBinaryContent([]byte{0x68, 0x65, 0x00, 0x6c, 0x6f}) {
+		t.Error("expected binary when null byte present")
+	}
+}
+
+func TestIsBinaryContent_PlainText(t *testing.T) {
+	if isBinaryContent([]byte("hello, world\n")) {
+		t.Error("expected non-binary for plain text")
+	}
+}
+
+func TestIsBinaryContent_EmptySlice(t *testing.T) {
+	if isBinaryContent(nil) {
+		t.Error("nil slice should not be binary")
+	}
+	if isBinaryContent([]byte{}) {
+		t.Error("empty slice should not be binary")
+	}
+}
+
+func TestIsBinaryContent_AllFF(t *testing.T) {
+	if isBinaryContent([]byte{0xFF, 0xFE, 0xFD}) {
+		t.Error("0xFF without null should not be considered binary")
+	}
+}
+
+func TestModeStr(t *testing.T) {
+	cases := []struct {
+		mode object.EntryMode
+		want string
+	}{
+		{object.ModeFile, "file"},
+		{object.ModeExec, "exec"},
+		{object.ModeSymlink, "link"},
+		{object.ModeDir, "file"},
+	}
+	for _, tc := range cases {
+		got := modeStr(tc.mode)
+		if got != tc.want {
+			t.Errorf("modeStr(%v): want %q, got %q", tc.mode, tc.want, got)
+		}
+	}
+}

internal/archesrv/repo_access_test.go [A]
--- /dev/null
+++ b/internal/archesrv/repo_access_test.go
@@ -1,0 +1,130 @@
+package archesrv
+
+import (
+	"net/http"
+	"testing"
+)
+
+func TestForgeServer_Index_LoadsAfterSetup(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.Get(ts.URL + "/")
+	if err != nil {
+		t.Fatalf("GET /: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("index after setup: want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Index_RedirectsToSetupWhenEmpty(t *testing.T) {
+	_, ts := newTestServer(t)
+
+	client := &http.Client{
+		CheckRedirect: func(req *http.Request, via []*http.Request) error {
+			return http.ErrUseLastResponse
+		},
+	}
+
+	resp, err := client.Get(ts.URL + "/")
+	if err != nil {
+		t.Fatalf("GET /: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusFound && resp.StatusCode != http.StatusMovedPermanently {
+		t.Errorf("empty DB: want redirect, got %d", resp.StatusCode)
+	}
+	loc := resp.Header.Get("Location")
+	if loc != "/setup" {
+		t.Errorf("redirect target: want /setup, got %q", loc)
+	}
+}
+
+func TestForgeServer_RepoHome_RedirectsToLog(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	noRedir := *client
+	noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+		return http.ErrUseLastResponse
+	}
+
+	resp, err := noRedir.Get(ts.URL + "/myrepo")
+	if err != nil {
+		t.Fatalf("GET /myrepo: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusFound && resp.StatusCode != http.StatusMovedPermanently {
+		t.Errorf("repo home: want redirect, got %d", resp.StatusCode)
+	}
+	loc := resp.Header.Get("Location")
+	if loc != "/myrepo/log" {
+		t.Errorf("redirect target: want /myrepo/log, got %q", loc)
+	}
+}
+
+func TestForgeServer_PrivateRepo_DeniesAnon(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, _ = loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	anon := &http.Client{}
+	for _, path := range []string{
+		"/myrepo/log",
+		"/myrepo/issues",
+		"/myrepo/wiki",
+	} {
+		resp, err := anon.Get(ts.URL + path)
+		if err != nil {
+			t.Fatalf("GET %s: %v", path, err)
+		}
+		resp.Body.Close()
+		if resp.StatusCode != http.StatusUnauthorized {
+			t.Errorf("%s anon on private repo: want 401, got %d", path, resp.StatusCode)
+		}
+	}
+}
+
+func TestForgeServer_PublicRepo_AllowsAnon(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, _ = loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "mypub", "public")
+
+	anon := &http.Client{}
+	for _, path := range []string{
+		"/mypub/issues",
+		"/mypub/wiki",
+	} {
+		resp, err := anon.Get(ts.URL + path)
+		if err != nil {
+			t.Fatalf("GET %s: %v", path, err)
+		}
+		resp.Body.Close()
+		if resp.StatusCode != http.StatusOK {
+			t.Errorf("%s anon on public repo: want 200, got %d", path, resp.StatusCode)
+		}
+	}
+}
+
+func TestForgeServer_MissingRepo_Returns404(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	for _, path := range []string{
+		"/doesnotexist/issues",
+		"/doesnotexist/wiki",
+		"/doesnotexist/log",
+	} {
+		resp, err := client.Get(ts.URL + path)
+		if err != nil {
+			t.Fatalf("GET %s: %v", path, err)
+		}
+		resp.Body.Close()
+		if resp.StatusCode != http.StatusNotFound {
+			t.Errorf("%s: want 404, got %d", path, resp.StatusCode)
+		}
+	}
+}

internal/archesrv/repos.go [A]
--- /dev/null
+++ b/internal/archesrv/repos.go
@@ -1,0 +1,208 @@
+package archesrv
+
+import (
+	"database/sql"
+	"fmt"
+	"path/filepath"
+	"time"
+
+	"arche/internal/repo"
+)
+
+type RepoRecord struct {
+	ID          int64
+	Name        string
+	Description string
+	Visibility  string
+	CreatedAt   time.Time
+}
+
+func (r *RepoRecord) IsPublic() bool { return r.Visibility == "public" }
+
+func repoPath(dataDir, name string) string {
+	return filepath.Join(dataDir, name)
+}
+
+func (d *DB) CreateRepo(name, description, visibility string) (*RepoRecord, error) {
+	if visibility != "public" && visibility != "private" {
+		visibility = "private"
+	}
+	now := time.Now()
+	res, err := d.db.Exec(
+		"INSERT INTO repos(name,description,visibility,created_at) VALUES(?,?,?,?)",
+		name, description, visibility, now.Unix(),
+	)
+	if err != nil {
+		return nil, fmt.Errorf("create repo: %w", err)
+	}
+	id, _ := res.LastInsertId()
+	return &RepoRecord{
+		ID:          id,
+		Name:        name,
+		Description: description,
+		Visibility:  visibility,
+		CreatedAt:   now,
+	}, nil
+}
+
+func (d *DB) GetRepo(name string) (*RepoRecord, error) {
+	r := &RepoRecord{}
+	var ts int64
+	err := d.db.QueryRow(
+		"SELECT id, name, description, visibility, created_at FROM repos WHERE name=?", name,
+	).Scan(&r.ID, &r.Name, &r.Description, &r.Visibility, &ts)
+	if err == sql.ErrNoRows {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	r.CreatedAt = time.Unix(ts, 0)
+	return r, nil
+}
+
+func (d *DB) ListRepos() ([]RepoRecord, error) {
+	rows, err := d.db.Query(
+		"SELECT id, name, description, visibility, created_at FROM repos ORDER BY name",
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []RepoRecord
+	for rows.Next() {
+		var r RepoRecord
+		var ts int64
+		if err := rows.Scan(&r.ID, &r.Name, &r.Description, &r.Visibility, &ts); err != nil {
+			return nil, err
+		}
+		r.CreatedAt = time.Unix(ts, 0)
+		out = append(out, r)
+	}
+	return out, rows.Err()
+}
+
+func (d *DB) DeleteRepo(name string) error {
+	_, err := d.db.Exec("DELETE FROM repos WHERE name=?", name)
+	return err
+}
+
+func (d *DB) CanRead(rec *RepoRecord, u *User) bool {
+	if rec.IsPublic() {
+		return true
+	}
+	if u == nil {
+		return false
+	}
+	if u.IsAdmin {
+		return true
+	}
+	return d.hasRole(rec.ID, u.ID, "read", "write", "admin")
+}
+
+func (d *DB) CanWrite(rec *RepoRecord, u *User) bool {
+	if u == nil {
+		return false
+	}
+	if u.IsAdmin {
+		return true
+	}
+	return d.hasRole(rec.ID, u.ID, "write", "admin")
+}
+
+func (d *DB) hasRole(repoID, userID int64, roles ...string) bool {
+	for _, role := range roles {
+		var count int
+		d.db.QueryRow( //nolint:errcheck
+			"SELECT COUNT(*) FROM repo_permissions WHERE repo_id=? AND user_id=? AND role=?",
+			repoID, userID, role,
+		).Scan(&count)
+		if count > 0 {
+			return true
+		}
+	}
+	return false
+}
+
+func (d *DB) SetPermission(repoID, userID int64, role string) error {
+	_, err := d.db.Exec(
+		`INSERT INTO repo_permissions(repo_id,user_id,role) VALUES(?,?,?)
+		 ON CONFLICT(repo_id,user_id) DO UPDATE SET role=excluded.role`,
+		repoID, userID, role,
+	)
+	return err
+}
+
+func (d *DB) RemovePermission(repoID, userID int64) error {
+	_, err := d.db.Exec("DELETE FROM repo_permissions WHERE repo_id=? AND user_id=?", repoID, userID)
+	return err
+}
+
+type CollabEntry struct {
+	UserID   int64
+	Username string
+	Role     string
+}
+
+func (d *DB) ListCollaborators(repoID int64) ([]CollabEntry, error) {
+	rows, err := d.db.Query(
+		`SELECT u.id, u.username, rp.role
+		 FROM repo_permissions rp
+		 JOIN users u ON u.id = rp.user_id
+		 WHERE rp.repo_id = ?
+		 ORDER BY u.username`,
+		repoID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []CollabEntry
+	for rows.Next() {
+		var e CollabEntry
+		if err := rows.Scan(&e.UserID, &e.Username, &e.Role); err != nil {
+			return nil, err
+		}
+		out = append(out, e)
+	}
+	return out, rows.Err()
+}
+
+func (d *DB) UpdateRepo(name, description, visibility string) error {
+	if visibility != "public" && visibility != "private" {
+		visibility = "private"
+	}
+	_, err := d.db.Exec(
+		"UPDATE repos SET description=?, visibility=? WHERE name=?",
+		description, visibility, name,
+	)
+	return err
+}
+
+func (d *DB) GetRepoHookConfig(repoID int64) (allowShellHooks bool, postReceiveScript string, err error) {
+	err = d.db.QueryRow(
+		"SELECT allow_shell_hooks, post_receive_script FROM repos WHERE id=?", repoID,
+	).Scan(&allowShellHooks, &postReceiveScript)
+	return
+}
+
+func (d *DB) SetRepoAllowShellHooks(repoID int64, allow bool, scriptPath string) error {
+	_, err := d.db.Exec(
+		"UPDATE repos SET allow_shell_hooks=?, post_receive_script=? WHERE id=?",
+		allow, scriptPath, repoID,
+	)
+	return err
+}
+
+func (d *DB) hasWriteCollaborator(repoID int64) bool {
+	var count int
+	d.db.QueryRow( //nolint:errcheck
+		`SELECT COUNT(*) FROM repo_permissions WHERE repo_id=? AND (role='write' OR role='admin')`,
+		repoID,
+	).Scan(&count)
+	return count > 0
+}
+
+func openRepo(dataDir, name string) (*repo.Repo, error) {
+	return repo.Open(repoPath(dataDir, name))
+}

internal/archesrv/server.go [A]
--- /dev/null
+++ b/internal/archesrv/server.go
@@ -1,0 +1,229 @@
+package archesrv
+
+import (
+	"embed"
+	"encoding/hex"
+	"flag"
+	"fmt"
+	"html/template"
+	"log/slog"
+	"net/http"
+	"os"
+	"path/filepath"
+	"runtime/debug"
+	"strings"
+
+	"arche/internal/markdown"
+	"arche/internal/object"
+	"arche/internal/repo"
+)
+
+//go:embed templates/*.html
+var tmplFS embed.FS
+
+type forgeServer struct {
+	db  *DB
+	cfg Config
+	log *slog.Logger
+}
+
+func (s *forgeServer) dataDir() string { return s.cfg.Storage.DataDir }
+
+func Run() error {
+	configPath := flag.String("config", "server.toml", "path to server.toml")
+	flag.Parse()
+
+	cfg, err := LoadConfig(*configPath)
+	if err != nil {
+		return err
+	}
+
+	if err := os.MkdirAll(cfg.Storage.DataDir, 0o755); err != nil {
+		return fmt.Errorf("create data dir: %w", err)
+	}
+
+	dbPath := filepath.Join(cfg.Storage.DataDir, "server.db")
+	db, err := openDB(dbPath)
+	if err != nil {
+		return err
+	}
+	defer db.Close()
+
+	logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
+	s := &forgeServer{db: db, cfg: cfg, log: logger}
+
+	for repoName, repoCfg := range cfg.Repo {
+		rec, err := db.GetRepo(repoName)
+		if err != nil || rec == nil {
+			continue
+		}
+		_ = db.SetRepoAllowShellHooks(rec.ID, repoCfg.AllowShellHooks, repoCfg.PostReceive)
+	}
+	addr := cfg.Server.ListenHTTP
+	if addr == "" {
+		addr = ":8080"
+	}
+
+	if sshAddr := cfg.Server.ListenSSH; sshAddr != "" {
+		go func() {
+			if err := s.RunSSH(sshAddr); err != nil {
+				s.log.Error("SSH listener failed", "err", err)
+			}
+		}()
+	}
+
+	if mtlsAddr := cfg.Server.ListenMTLS; mtlsAddr != "" {
+		go func() {
+			if err := s.RunMTLS(mtlsAddr, cfg.Server.TLSCert, cfg.Server.TLSKey); err != nil {
+				s.log.Error("mTLS listener failed", "err", err)
+			}
+		}()
+	}
+
+	s.log.Info("listening", "addr", "http://"+addr)
+	return http.ListenAndServe(addr, s.routes())
+}
+
+func (s *forgeServer) routes() http.Handler {
+	mux := http.NewServeMux()
+
+	mux.HandleFunc("GET /setup", s.handleSetup)
+	mux.HandleFunc("POST /setup", s.handleSetupPost)
+
+	mux.HandleFunc("GET /login", s.handleLoginPage)
+	mux.HandleFunc("POST /login", s.handleLoginPost)
+	mux.HandleFunc("GET /logout", s.handleLogout)
+
+	if s.cfg.Auth.Registration == "open" || s.cfg.Auth.Registration == "invite" {
+		mux.HandleFunc("GET /register", s.handleRegisterPage)
+		mux.HandleFunc("POST /register", s.handleRegisterPost)
+	}
+
+	mux.HandleFunc("GET /settings/keys", s.handleSettingsKeys)
+	mux.HandleFunc("POST /settings/keys", s.handleSettingsAddKey)
+	mux.HandleFunc("DELETE /settings/keys/{id}", s.handleSettingsDeleteKey)
+
+	mux.HandleFunc("GET /settings/mtls", s.handleSettingsMTLSCerts)
+	mux.HandleFunc("POST /settings/mtls", s.handleSettingsAddMTLSCert)
+	mux.HandleFunc("DELETE /settings/mtls/{id}", s.handleSettingsDeleteMTLSCert)
+
+	mux.HandleFunc("GET /settings/token", s.handleSettingsToken)
+	mux.HandleFunc("POST /settings/tokens", s.handleSettingsCreateToken)
+	mux.HandleFunc("DELETE /settings/tokens/{id}", s.handleSettingsDeleteToken)
+
+	mux.HandleFunc("POST /admin/repos", s.handleAdminCreateRepo)
+	mux.HandleFunc("DELETE /admin/repos/{name}", s.handleAdminDeleteRepo)
+
+	mux.HandleFunc("GET /admin/users", s.handleAdminUsers)
+	mux.HandleFunc("POST /admin/users", s.handleAdminCreateUser)
+	mux.HandleFunc("DELETE /admin/users/{id}", s.handleAdminDeleteUser)
+
+	mux.HandleFunc("GET /admin/invites", s.handleAdminInvites)
+	mux.HandleFunc("POST /admin/invites", s.handleAdminCreateInvite)
+	mux.HandleFunc("DELETE /admin/invites/{id}", s.handleAdminDeleteInvite)
+
+	mux.HandleFunc("/{repo}/arche/v1/", s.handleSyncProxy)
+
+	mux.HandleFunc("GET /{repo}/issues", s.handleRepoIssues)
+	mux.HandleFunc("POST /{repo}/issues", s.handleRepoCreateIssue)
+	mux.HandleFunc("GET /{repo}/issue", s.handleRepoIssue)
+	mux.HandleFunc("POST /{repo}/issue/comment", s.handleRepoAddComment)
+	mux.HandleFunc("POST /{repo}/issue/status", s.handleRepoSetStatus)
+
+	mux.HandleFunc("GET /{repo}/wiki", s.handleRepoWikiList)
+	mux.HandleFunc("GET /{repo}/wiki/{title}", s.handleRepoWikiPage)
+	mux.HandleFunc("POST /{repo}/wiki/{title}", s.handleRepoWikiSave)
+
+	mux.HandleFunc("GET /{repo}/settings", s.handleRepoSettingsPage)
+	mux.HandleFunc("POST /{repo}/settings", s.handleRepoUpdateSettings)
+	mux.HandleFunc("POST /{repo}/settings/collaborators", s.handleRepoAddCollaborator)
+	mux.HandleFunc("DELETE /{repo}/settings/collaborators/{id}", s.handleRepoRemoveCollaborator)
+	mux.HandleFunc("POST /{repo}/settings/delete", s.handleRepoDeleteRepo)
+
+	mux.HandleFunc("GET /{repo}/settings/webhooks", s.handleRepoWebhooks)
+	mux.HandleFunc("POST /{repo}/settings/webhooks", s.handleRepoCreateWebhook)
+	mux.HandleFunc("DELETE /{repo}/settings/webhooks/{id}", s.handleRepoDeleteWebhook)
+	mux.HandleFunc("GET /{repo}/settings/webhooks/{id}/deliveries", s.handleWebhookDeliveries)
+	mux.HandleFunc("POST /{repo}/settings/webhooks/{id}/deliveries/{delivery}/replay", s.handleWebhookReplay)
+	mux.HandleFunc("GET /{repo}/log", s.handleRepoLog)
+	mux.HandleFunc("GET /{repo}/commit", s.handleRepoCommit)
+	mux.HandleFunc("GET /{repo}/tree", s.handleRepoTree)
+	mux.HandleFunc("GET /{repo}/file", s.handleRepoFile)
+	mux.HandleFunc("GET /{repo}/stacks", s.handleRepoStacks)
+	mux.HandleFunc("GET /{repo}/stacks/{stackid}", s.handleRepoStackDetail)
+	mux.HandleFunc("POST /{repo}/stacks/{stackid}/reviews/{changeid}", s.handleStackSetReview)
+	mux.HandleFunc("GET /{repo}", s.handleRepoHome)
+
+	mux.HandleFunc("GET /{$}", s.handleIndex)
+
+	return s.recoverMiddleware(s.logMiddleware(mux))
+}
+
+type statusRecorder struct {
+	http.ResponseWriter
+	code int
+}
+
+func (r *statusRecorder) WriteHeader(code int) {
+	r.code = code
+	r.ResponseWriter.WriteHeader(code)
+}
+
+func (s *forgeServer) logMiddleware(next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		rec := &statusRecorder{ResponseWriter: w, code: http.StatusOK}
+		next.ServeHTTP(rec, r)
+		s.log.Info("request", "method", r.Method, "path", r.URL.Path, "status", rec.code)
+	})
+}
+
+func (s *forgeServer) recoverMiddleware(next http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		defer func() {
+			if rec := recover(); rec != nil {
+				s.log.Error("handler panic",
+					"panic", rec,
+					"method", r.Method,
+					"path", r.URL.Path,
+					"stack", string(debug.Stack()),
+				)
+				http.Error(w, "internal server error", http.StatusInternalServerError)
+			}
+		}()
+		next.ServeHTTP(w, r)
+	})
+}
+
+func (s *forgeServer) render(w http.ResponseWriter, page string, data any) {
+	funcs := template.FuncMap{
+		"markdown":         markdown.Render,
+		"registrationOpen": func() bool { return s.cfg.Auth.Registration == "open" },
+	}
+	t, err := template.New("").Funcs(funcs).ParseFS(tmplFS,
+		"templates/srv_base.html",
+		"templates/"+page,
+	)
+	if err != nil {
+		http.Error(w, "template: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "text/html; charset=utf-8")
+	if err := t.ExecuteTemplate(w, page, data); err != nil {
+		s.log.Error("render failed", "page", page, "err", err)
+	}
+}
+
+func fullHex(id [32]byte) string       { return hex.EncodeToString(id[:]) }
+func shortHex(id [32]byte) string      { return hex.EncodeToString(id[:])[:8] }
+func phaseClass(p object.Phase) string { return strings.ToLower(p.String()) }
+
+func bookmarkMap(r *repo.Repo) map[string][]string {
+	bms, _ := r.Store.ListBookmarks()
+	m := make(map[string][]string, len(bms))
+	for _, b := range bms {
+		k := fullHex(b.CommitID)
+		m[k] = append(m[k], b.Name)
+	}
+	return m
+}

internal/archesrv/server_test.go [A]
--- /dev/null
+++ b/internal/archesrv/server_test.go
@@ -1,0 +1,282 @@
+package archesrv
+
+import (
+	"io"
+	"log/slog"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"os"
+	"path/filepath"
+	"testing"
+	"time"
+
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/syncpkg"
+	"arche/internal/wc"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func newTestServer(t *testing.T) (*forgeServer, *httptest.Server) {
+	t.Helper()
+	dir := t.TempDir()
+	db, err := openDB(filepath.Join(dir, "server.db"))
+	if err != nil {
+		t.Fatalf("openDB: %v", err)
+	}
+	t.Cleanup(func() { db.Close() })
+
+	cfg := DefaultConfig()
+	cfg.Storage.DataDir = dir
+
+	s := &forgeServer{
+		db:  db,
+		cfg: cfg,
+		log: slog.New(slog.NewTextHandler(io.Discard, nil)),
+	}
+	ts := httptest.NewServer(s.routes())
+	t.Cleanup(ts.Close)
+	return s, ts
+}
+
+func makeLocalRepoWithCommit(t *testing.T) *repo.Repo {
+	t.Helper()
+	dir := t.TempDir()
+	r, err := repo.Init(dir)
+	if err != nil {
+		t.Fatalf("repo.Init local: %v", err)
+	}
+	t.Cleanup(func() { r.Close() })
+
+	if err := os.WriteFile(filepath.Join(dir, "hello.txt"), []byte("hello arche\n"), 0o644); err != nil {
+		t.Fatalf("WriteFile: %v", err)
+	}
+
+	w := wc.New(r)
+	_, commitID, err := w.Snap("initial commit")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		t.Fatalf("Store.Begin: %v", err)
+	}
+	if err := r.Store.SetBookmark(tx, store.Bookmark{Name: "main", CommitID: commitID}); err != nil {
+		r.Store.Rollback(tx)
+		t.Fatalf("SetBookmark: %v", err)
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		t.Fatalf("Store.Commit: %v", err)
+	}
+	return r
+}
+
+func TestForgeServer_SetupPage(t *testing.T) {
+	_, ts := newTestServer(t)
+	resp, err := http.Get(ts.URL + "/setup")
+	if err != nil {
+		t.Fatalf("GET /setup: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("GET /setup: want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_SyncPushPull(t *testing.T) {
+	s, ts := newTestServer(t)
+
+	admin, err := s.db.CreateUser("admin", "adminpass", true)
+	if err != nil {
+		t.Fatalf("CreateUser: %v", err)
+	}
+	token, err := s.db.CreateAPIToken(admin.ID, "test")
+	if err != nil {
+		t.Fatalf("CreateAPIToken: %v", err)
+	}
+
+	if _, err := s.db.CreateRepo("testrepo", "test repo", "private"); err != nil {
+		t.Fatalf("CreateRepo: %v", err)
+	}
+	if _, err := repo.Init(filepath.Join(s.dataDir(), "testrepo")); err != nil {
+		t.Fatalf("repo.Init server side: %v", err)
+	}
+
+	localRepo := makeLocalRepoWithCommit(t)
+
+	remoteURL := ts.URL + "/arche/v1/testrepo"
+
+	pushClient := syncpkg.NewClient(localRepo, remoteURL, token)
+	if err := pushClient.Push(); err != nil {
+		t.Fatalf("Push: %v", err)
+	}
+
+	pullDir := t.TempDir()
+	pullRepo, err := repo.Init(pullDir)
+	if err != nil {
+		t.Fatalf("repo.Init pull: %v", err)
+	}
+	defer pullRepo.Close()
+
+	pullClient := syncpkg.NewClient(pullRepo, remoteURL, token)
+	if err := pullClient.Pull(); err != nil {
+		t.Fatalf("Pull: %v", err)
+	}
+
+	localBMs, err := localRepo.Store.ListBookmarks()
+	if err != nil {
+		t.Fatalf("ListBookmarks local: %v", err)
+	}
+	pullBMs, err := pullRepo.Store.ListBookmarks()
+	if err != nil {
+		t.Fatalf("ListBookmarks pulled: %v", err)
+	}
+	if len(pullBMs) == 0 {
+		t.Fatal("pulled repo has no bookmarks after Pull()")
+	}
+	for _, lb := range localBMs {
+		found := false
+		for _, pb := range pullBMs {
+			if lb.Name == pb.Name && lb.CommitID == pb.CommitID {
+				found = true
+				break
+			}
+		}
+		if !found {
+			t.Errorf("bookmark %q (commitID %x) missing from pulled repo", lb.Name, lb.CommitID[:8])
+		}
+	}
+
+	time.Sleep(50 * time.Millisecond)
+}
+
+func TestForgeServer_SyncUnauthorized(t *testing.T) {
+	s, ts := newTestServer(t)
+
+	admin, _ := s.db.CreateUser("admin", "adminpass", true)
+	_, _ = s.db.CreateAPIToken(admin.ID, "test")
+	_, _ = s.db.CreateRepo("privaterepo", "", "private")
+	_, _ = repo.Init(filepath.Join(s.dataDir(), "privaterepo"))
+
+	resp, err := http.Get(ts.URL + "/arche/v1/privaterepo/arche/v1/info")
+	if err != nil {
+		t.Fatalf("GET info: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusUnauthorized {
+		t.Errorf("expected 401 for unauthenticated read, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_SyncWrongToken(t *testing.T) {
+	s, ts := newTestServer(t)
+
+	admin, _ := s.db.CreateUser("admin", "adminpass", true)
+	_, _ = s.db.CreateAPIToken(admin.ID, "test")
+	_, _ = s.db.CreateRepo("privaterepo", "", "private")
+	_, _ = repo.Init(filepath.Join(s.dataDir(), "privaterepo"))
+
+	req, _ := http.NewRequest(http.MethodGet, ts.URL+"/arche/v1/privaterepo/arche/v1/info", nil)
+	req.Header.Set("Authorization", "Bearer wrongtoken")
+	resp, err := http.DefaultClient.Do(req)
+	if err != nil {
+		t.Fatalf("GET info: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusUnauthorized {
+		t.Errorf("expected 401 for wrong token, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_AdminCRUD(t *testing.T) {
+	s, ts := newTestServer(t)
+
+	_, err := s.db.CreateUser("admin", "adminpass", true)
+	if err != nil {
+		t.Fatalf("CreateUser: %v", err)
+	}
+
+	jar := newCookieJar()
+	client := &http.Client{
+		Jar: jar,
+		CheckRedirect: func(req *http.Request, via []*http.Request) error {
+			return http.ErrUseLastResponse
+		},
+	}
+
+	loginResp, err := client.PostForm(ts.URL+"/login", map[string][]string{
+		"username": {"admin"},
+		"password": {"adminpass"},
+	})
+	if err != nil {
+		t.Fatalf("POST /login: %v", err)
+	}
+	loginResp.Body.Close()
+	if loginResp.StatusCode != http.StatusFound && loginResp.StatusCode != http.StatusSeeOther {
+		t.Fatalf("POST /login: want redirect (302/303), got %d", loginResp.StatusCode)
+	}
+
+	repoResp, err := client.PostForm(ts.URL+"/admin/repos", map[string][]string{
+		"name":        {"myrepo"},
+		"description": {"a test repo"},
+		"visibility":  {"private"},
+	})
+	if err != nil {
+		t.Fatalf("POST /admin/repos: %v", err)
+	}
+	repoResp.Body.Close()
+	if repoResp.StatusCode >= 400 {
+		t.Errorf("POST /admin/repos: got %d", repoResp.StatusCode)
+	}
+
+	rec, err := s.db.GetRepo("myrepo")
+	if err != nil || rec == nil {
+		t.Fatalf("GetRepo: repo not found after create: %v", err)
+	}
+	if rec.Description != "a test repo" {
+		t.Errorf("Description: want %q, got %q", "a test repo", rec.Description)
+	}
+
+	userResp, err := client.PostForm(ts.URL+"/admin/users", map[string][]string{
+		"username": {"bob"},
+		"password": {"bobpass"},
+	})
+	if err != nil {
+		t.Fatalf("POST /admin/users: %v", err)
+	}
+	userResp.Body.Close()
+	if userResp.StatusCode >= 400 {
+		t.Errorf("POST /admin/users: got %d", userResp.StatusCode)
+	}
+
+	users, err := s.db.ListUsers()
+	if err != nil {
+		t.Fatalf("ListUsers: %v", err)
+	}
+	found := false
+	for _, u := range users {
+		if u.Username == "bob" {
+			found = true
+		}
+	}
+	if !found {
+		t.Error("user 'bob' not found after admin create")
+	}
+}
+
+type simpleCookieJar struct {
+	cookies []*http.Cookie
+}
+
+func newCookieJar() *simpleCookieJar { return &simpleCookieJar{} }
+
+func (j *simpleCookieJar) SetCookies(_ *url.URL, cookies []*http.Cookie) {
+	j.cookies = append(j.cookies, cookies...)
+}
+
+func (j *simpleCookieJar) Cookies(_ *url.URL) []*http.Cookie {
+	return j.cookies
+}

internal/archesrv/serverhooks.go [A]
--- /dev/null
+++ b/internal/archesrv/serverhooks.go
@@ -1,0 +1,65 @@
+package archesrv
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+	"os"
+	"os/exec"
+	"strings"
+	"time"
+)
+
+func runPreReceiveHook(scriptPath, refName, oldHex, newHex string, timeoutSec int) error {
+	if scriptPath == "" {
+		return nil
+	}
+
+	if timeoutSec <= 0 {
+		timeoutSec = 30
+	}
+	ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutSec)*time.Second)
+	defer cancel()
+
+	input := fmt.Sprintf("%s %s %s\n", oldHex, newHex, refName)
+	c := exec.CommandContext(ctx, scriptPath, refName, oldHex, newHex)
+	c.Stdin = strings.NewReader(input)
+	c.Stdout = os.Stdout
+	c.Stderr = os.Stderr
+	if err := c.Run(); err != nil {
+		if ctx.Err() != nil {
+			return fmt.Errorf("hook %s timed out after %ds", scriptPath, timeoutSec)
+		}
+		return fmt.Errorf("hook %s: %w", scriptPath, err)
+	}
+	return nil
+}
+
+func runPostReceiveHook(scriptPath, refName, oldHex, newHex string, timeoutSec int) {
+	if scriptPath == "" {
+		return
+	}
+	if timeoutSec <= 0 {
+		timeoutSec = 30
+	}
+	input := fmt.Sprintf("%s %s %s\n", oldHex, newHex, refName)
+	c := exec.Command(scriptPath, refName, oldHex, newHex)
+	c.Stdin = strings.NewReader(input)
+	c.Stdout = os.Stdout
+	c.Stderr = os.Stderr
+
+	done := make(chan error, 1)
+	go func() { done <- c.Run() }()
+
+	select {
+	case err := <-done:
+		if err != nil {
+			slog.Error("post-receive hook failed", "script", scriptPath, "err", err)
+		}
+	case <-time.After(time.Duration(timeoutSec) * time.Second):
+		if c.Process != nil {
+			c.Process.Kill() //nolint:errcheck
+		}
+		slog.Error("post-receive hook timed out", "script", scriptPath, "timeout_sec", timeoutSec)
+	}
+}

internal/archesrv/settings_test.go [A]
--- /dev/null
+++ b/internal/archesrv/settings_test.go
@@ -1,0 +1,279 @@
+package archesrv
+
+import (
+	"crypto/ed25519"
+	"crypto/rand"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+	"testing"
+
+	"golang.org/x/crypto/ssh"
+)
+
+func TestForgeServer_Settings_TokenCRUD(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.PostForm(ts.URL+"/settings/tokens", map[string][]string{
+		"label": {"ci"},
+	})
+	if err != nil {
+		t.Fatalf("POST /settings/tokens: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("create token: got %d", resp.StatusCode)
+	}
+
+	admin, _, _ := s.db.GetUserByName("admin")
+	tokens, err := s.db.ListAPITokens(admin.ID)
+	if err != nil {
+		t.Fatalf("ListAPITokens: %v", err)
+	}
+	if len(tokens) == 0 {
+		t.Fatal("expected at least one token")
+	}
+
+	req, _ := http.NewRequest(http.MethodDelete,
+		fmt.Sprintf("%s/settings/tokens/%d", ts.URL, tokens[0].ID), nil)
+	resp2, err := client.Do(req)
+	if err != nil {
+		t.Fatalf("DELETE token: %v", err)
+	}
+	resp2.Body.Close()
+	if resp2.StatusCode >= 400 {
+		t.Errorf("delete token: got %d", resp2.StatusCode)
+	}
+
+	tokens2, _ := s.db.ListAPITokens(admin.ID)
+	if len(tokens2) != 0 {
+		t.Error("token should be gone after delete")
+	}
+}
+
+func TestForgeServer_Settings_TokenPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.Get(ts.URL + "/settings/token")
+	if err != nil {
+		t.Fatalf("GET /settings/token: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("token settings page: want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Settings_SSHKey_CRUD(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	pubKeyStr := genTestSSHPubKey(t)
+
+	resp, err := client.PostForm(ts.URL+"/settings/keys", map[string][]string{
+		"label":      {"laptop"},
+		"public_key": {pubKeyStr},
+	})
+	if err != nil {
+		t.Fatalf("POST /settings/keys: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("add SSH key: got %d", resp.StatusCode)
+	}
+
+	admin, _, _ := s.db.GetUserByName("admin")
+	keys, err := s.db.ListSSHKeys(admin.ID)
+	if err != nil {
+		t.Fatalf("ListSSHKeys: %v", err)
+	}
+	if len(keys) == 0 {
+		t.Fatal("expected at least one key")
+	}
+	if keys[0].Label != "laptop" {
+		t.Errorf("key label: want laptop, got %q", keys[0].Label)
+	}
+
+	req, _ := http.NewRequest(http.MethodDelete,
+		fmt.Sprintf("%s/settings/keys/%d", ts.URL, keys[0].ID), nil)
+	resp2, err := client.Do(req)
+	if err != nil {
+		t.Fatalf("DELETE key: %v", err)
+	}
+	resp2.Body.Close()
+	if resp2.StatusCode >= 400 {
+		t.Errorf("delete SSH key: got %d", resp2.StatusCode)
+	}
+
+	keys2, _ := s.db.ListSSHKeys(admin.ID)
+	if len(keys2) != 0 {
+		t.Error("SSH key should be gone after deletion")
+	}
+}
+
+func TestForgeServer_Settings_SSHKey_InvalidKeyRejected(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.PostForm(ts.URL+"/settings/keys", map[string][]string{
+		"label":      {"bad"},
+		"public_key": {"not-a-real-ssh-key"},
+	})
+	if err != nil {
+		t.Fatalf("POST: %v", err)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	resp.Body.Close()
+
+	admin, _, _ := s.db.GetUserByName("admin")
+	keys, _ := s.db.ListSSHKeys(admin.ID)
+	if len(keys) != 0 {
+		t.Error("invalid key should not be stored")
+	}
+	if resp.StatusCode >= 500 {
+		t.Errorf("want 200 (error form) or 4xx, got %d", resp.StatusCode)
+	}
+	_ = body
+}
+
+func TestForgeServer_Settings_KeysPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+
+	resp, err := client.Get(ts.URL + "/settings/keys")
+	if err != nil {
+		t.Fatalf("GET /settings/keys: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("keys page: want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Settings_RepoSettingsPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.Get(ts.URL + "/myrepo/settings")
+	if err != nil {
+		t.Fatalf("GET /myrepo/settings: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("repo settings page: want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Settings_UpdateRepoDescription(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/settings", map[string][]string{
+		"description": {"my updated description"},
+		"visibility":  {"private"},
+	})
+	if err != nil {
+		t.Fatalf("POST settings: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("update settings: got %d", resp.StatusCode)
+	}
+
+	rec, _ := s.db.GetRepo("myrepo")
+	if rec.Description != "my updated description" {
+		t.Errorf("description: want 'my updated description', got %q", rec.Description)
+	}
+}
+
+func TestForgeServer_Settings_UpdateRepoVisibility(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	r0, _ := http.Get(ts.URL + "/myrepo/issues")
+	r0.Body.Close()
+	if r0.StatusCode != http.StatusUnauthorized {
+		t.Fatalf("expected 401 for private repo, got %d", r0.StatusCode)
+	}
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/settings", map[string][]string{
+		"description": {""},
+		"visibility":  {"public"},
+	})
+	if err != nil {
+		t.Fatalf("POST settings: %v", err)
+	}
+	resp.Body.Close()
+
+	r2, _ := http.Get(ts.URL + "/myrepo/issues")
+	r2.Body.Close()
+	if r2.StatusCode != http.StatusOK {
+		t.Errorf("after making public, anon should get 200, got %d", r2.StatusCode)
+	}
+}
+
+func TestForgeServer_Settings_DeleteRepo(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/settings/delete", nil)
+	if err != nil {
+		t.Fatalf("POST /myrepo/settings/delete: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("repo delete: got %d", resp.StatusCode)
+	}
+
+	rec, _ := s.db.GetRepo("myrepo")
+	if rec != nil {
+		t.Error("repo should not exist after deletion")
+	}
+}
+
+func TestForgeServer_Settings_NonAdminCannotDeleteRepo(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "adminpass", true) //nolint:errcheck
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	alice, _ := s.db.CreateUser("alice", "pass", false)
+	rec, _ := s.db.GetRepo("myrepo")
+	s.db.SetPermission(rec.ID, alice.ID, "write") //nolint:errcheck
+
+	aliceClient := loginAs(t, ts, "alice", "pass")
+	resp, err := aliceClient.PostForm(ts.URL+"/myrepo/settings/delete", nil)
+	if err != nil {
+		t.Fatalf("POST: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode < 400 {
+		t.Errorf("non-admin should not delete repo, got %d", resp.StatusCode)
+	}
+
+	if _, err := s.db.GetRepo("myrepo"); err != nil {
+	}
+	rec2, _ := s.db.GetRepo("myrepo")
+	if rec2 == nil {
+		t.Error("repo should still exist — non-admin should not have deleted it")
+	}
+}
+
+func genTestSSHPubKey(t *testing.T) string {
+	t.Helper()
+	pub, _, err := ed25519.GenerateKey(rand.Reader)
+	if err != nil {
+		t.Fatalf("generate ed25519 key: %v", err)
+	}
+	sshPub, err := ssh.NewPublicKey(pub)
+	if err != nil {
+		t.Fatalf("ssh.NewPublicKey: %v", err)
+	}
+	return strings.TrimSpace(string(ssh.MarshalAuthorizedKey(sshPub)))
+}

internal/archesrv/signatures.go [A]
--- /dev/null
+++ b/internal/archesrv/signatures.go
@@ -1,0 +1,116 @@
+package archesrv
+
+import (
+	"bytes"
+	"encoding/hex"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+
+	"golang.org/x/crypto/ssh"
+)
+
+type CommitSigStatus struct {
+	CommitID   [32]byte
+	Status     string
+	KeyID      string
+	VerifiedAt time.Time
+}
+
+func (d *DB) RecordCommitSignature(r *repo.Repo, commitID [32]byte, c *object.Commit, userID int64) error {
+	var existing int
+	_ = d.db.QueryRow("SELECT COUNT(*) FROM commit_signatures WHERE commit_id=?", commitID[:]).Scan(&existing)
+	if existing > 0 {
+		return nil
+	}
+
+	status := "unsigned"
+	keyID := ""
+
+	if len(c.CommitSig) > 0 {
+		body := object.CommitBodyForSigning(c)
+
+		keys, err := d.ListSSHKeys(userID)
+		if err == nil {
+			for _, k := range keys {
+				pub, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k.PublicKey))
+				if err != nil {
+					continue
+				}
+				if err := object.VerifyCommitSig(body, c.CommitSig, pub); err == nil {
+					status = "verified"
+					keyID = ssh.FingerprintSHA256(pub)
+					break
+				}
+			}
+		}
+
+		if status == "unsigned" {
+			var sig ssh.Signature
+			if err := ssh.Unmarshal(c.CommitSig, &sig); err == nil {
+				status = "unknown_key"
+			} else {
+				status = "invalid"
+			}
+		}
+	}
+
+	_, err := d.db.Exec(
+		"INSERT OR IGNORE INTO commit_signatures (commit_id, status, key_id, verified_at) VALUES (?,?,?,?)",
+		commitID[:], status, keyID, time.Now().Unix(),
+	)
+	return err
+}
+
+func (d *DB) GetCommitSigStatus(commitID [32]byte) string {
+	var status string
+	if err := d.db.QueryRow(
+		"SELECT status FROM commit_signatures WHERE commit_id=?", commitID[:],
+	).Scan(&status); err != nil {
+		return "unsigned"
+	}
+	return status
+}
+
+func collectNewCommitIDs(r *repo.Repo, oldHex, newHex string) [][32]byte {
+	if len(newHex) != 64 {
+		return nil
+	}
+	newBytes, err := hex.DecodeString(newHex)
+	if err != nil || len(newBytes) != 32 {
+		return nil
+	}
+	var newID [32]byte
+	copy(newID[:], newBytes)
+
+	var oldID [32]byte
+	if len(oldHex) == 64 {
+		if b, err := hex.DecodeString(oldHex); err == nil && len(b) == 32 {
+			copy(oldID[:], b)
+		}
+	}
+
+	seen := make(map[[32]byte]bool)
+	queue := [][32]byte{newID}
+	var result [][32]byte
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		if seen[id] || bytes.Equal(id[:], oldID[:]) {
+			continue
+		}
+		seen[id] = true
+		result = append(result, id)
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+		for _, p := range c.Parents {
+			if !seen[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+	return result
+}

internal/archesrv/sql/001_initial.sql [A]
--- /dev/null
+++ b/internal/archesrv/sql/001_initial.sql
@@ -1,0 +1,42 @@
+CREATE TABLE IF NOT EXISTS schema_migrations (
+    version    INTEGER PRIMARY KEY,
+    applied_at INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS repos (
+    id          INTEGER PRIMARY KEY,
+    name        TEXT    NOT NULL UNIQUE,
+    description TEXT    NOT NULL DEFAULT '',
+    visibility  TEXT    NOT NULL DEFAULT 'private', -- 'public' | 'private'
+    created_at  INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS users (
+    id            INTEGER PRIMARY KEY,
+    username      TEXT    NOT NULL UNIQUE,
+    password_hash TEXT    NOT NULL,
+    is_admin      INTEGER NOT NULL DEFAULT 0,
+    created_at    INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS api_tokens (
+    id         INTEGER PRIMARY KEY,
+    user_id    INTEGER NOT NULL REFERENCES users(id),
+    token_hash TEXT    NOT NULL UNIQUE,
+    label      TEXT    NOT NULL DEFAULT '',
+    created_at INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS repo_permissions (
+    repo_id INTEGER NOT NULL REFERENCES repos(id),
+    user_id INTEGER NOT NULL REFERENCES users(id),
+    role    TEXT    NOT NULL, -- 'read' | 'write' | 'admin'
+    PRIMARY KEY (repo_id, user_id)
+);
+
+CREATE TABLE IF NOT EXISTS sessions (
+    id         INTEGER PRIMARY KEY,
+    user_id    INTEGER NOT NULL REFERENCES users(id),
+    token      TEXT    NOT NULL UNIQUE,
+    expires_at INTEGER NOT NULL
+);

internal/archesrv/sql/002_ssh_webhooks_invites.sql [A]
--- /dev/null
+++ b/internal/archesrv/sql/002_ssh_webhooks_invites.sql
@@ -1,0 +1,37 @@
+CREATE TABLE IF NOT EXISTS ssh_keys (
+    id         INTEGER PRIMARY KEY,
+    user_id    INTEGER NOT NULL REFERENCES users(id),
+    label      TEXT    NOT NULL DEFAULT '',
+    public_key TEXT    NOT NULL UNIQUE,
+    added_at   INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS webhooks (
+    id         INTEGER PRIMARY KEY,
+    repo_id    INTEGER NOT NULL REFERENCES repos(id),
+    url        TEXT    NOT NULL,
+    secret     TEXT    NOT NULL,
+    events     TEXT    NOT NULL DEFAULT 'push',
+    active     INTEGER NOT NULL DEFAULT 1,
+    created_at INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS webhook_deliveries (
+    id            INTEGER PRIMARY KEY,
+    webhook_id    INTEGER NOT NULL REFERENCES webhooks(id),
+    event         TEXT    NOT NULL,
+    payload       TEXT    NOT NULL,
+    response_code INTEGER,
+    response_body TEXT,
+    error         TEXT,
+    delivered_at  INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS invite_tokens (
+    id         INTEGER PRIMARY KEY,
+    token      TEXT    NOT NULL UNIQUE,
+    created_by INTEGER NOT NULL REFERENCES users(id),
+    used_by    INTEGER REFERENCES users(id),
+    created_at INTEGER NOT NULL,
+    used_at    INTEGER
+);

internal/archesrv/sql/003_per_repo_hooks.sql [A]
--- /dev/null
+++ b/internal/archesrv/sql/003_per_repo_hooks.sql
@@ -1,0 +1,5 @@
+-- Per-repo shell hook opt-in
+-- allow_shell_hooks is revoked automatically when a collaborator with
+-- write or admin access is added (enforced in application logic).
+ALTER TABLE repos ADD COLUMN allow_shell_hooks INTEGER NOT NULL DEFAULT 0;
+ALTER TABLE repos ADD COLUMN post_receive_script TEXT NOT NULL DEFAULT '';

internal/archesrv/sql/004_mtls_certs.sql [A]
--- /dev/null
+++ b/internal/archesrv/sql/004_mtls_certs.sql
@@ -1,0 +1,8 @@
+CREATE TABLE IF NOT EXISTS mtls_certs (
+    id          INTEGER PRIMARY KEY,
+    user_id     INTEGER NOT NULL REFERENCES users(id),
+    label       TEXT    NOT NULL DEFAULT '',
+    fingerprint TEXT    NOT NULL UNIQUE,
+    cert_pem    TEXT    NOT NULL,
+    added_at    INTEGER NOT NULL
+);

internal/archesrv/sql/005_commit_signatures.sql [A]
--- /dev/null
+++ b/internal/archesrv/sql/005_commit_signatures.sql
@@ -1,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS commit_signatures (
+    commit_id   BLOB    PRIMARY KEY,
+    status      TEXT    NOT NULL, -- 'verified' | 'unknown_key' | 'invalid' | 'unsigned'
+    key_id      TEXT    NOT NULL DEFAULT '',
+    verified_at INTEGER NOT NULL DEFAULT 0
+);

internal/archesrv/sql/006_stack_reviews.sql [A]
--- /dev/null
+++ b/internal/archesrv/sql/006_stack_reviews.sql
@@ -1,0 +1,8 @@
+CREATE TABLE IF NOT EXISTS stack_reviews (
+    repo_id     INTEGER NOT NULL,
+    change_id   TEXT    NOT NULL,
+    status      TEXT    NOT NULL DEFAULT 'open', -- 'open' | 'reviewing' | 'approved' | 'needs-revision'
+    reviewer_id INTEGER,
+    updated_at  INTEGER NOT NULL DEFAULT 0,
+    PRIMARY KEY (repo_id, change_id)
+);

internal/archesrv/ssh.go [A]
--- /dev/null
+++ b/internal/archesrv/ssh.go
@@ -1,0 +1,247 @@
+package archesrv
+
+import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"arche/internal/syncpkg"
+
+	"golang.org/x/crypto/ssh"
+)
+
+func (s *forgeServer) RunSSH(listenAddr string) error {
+	hostKey, err := s.loadOrCreateHostKey()
+	if err != nil {
+		return fmt.Errorf("host key: %w", err)
+	}
+
+	cfg := &ssh.ServerConfig{
+		PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
+			user, err := s.db.AuthorizeSSHKey(key)
+			if err != nil || user == nil {
+				return nil, fmt.Errorf("unauthorized")
+			}
+			fp := hex.EncodeToString(key.Marshal())
+			return &ssh.Permissions{
+				Extensions: map[string]string{
+					"user-id":  fmt.Sprintf("%d", user.ID),
+					"username": user.Username,
+					"is-admin": fmt.Sprintf("%v", user.IsAdmin),
+					"key-fp":   fp,
+				},
+			}, nil
+		},
+	}
+	cfg.AddHostKey(hostKey)
+
+	ln, err := net.Listen("tcp", listenAddr)
+	if err != nil {
+		return fmt.Errorf("ssh listen %s: %w", listenAddr, err)
+	}
+	defer ln.Close()
+	s.log.Info("SSH listening", "addr", listenAddr)
+
+	for {
+		conn, err := ln.Accept()
+		if err != nil {
+			return err
+		}
+		go s.handleSSHConn(conn, cfg)
+	}
+}
+
+func (s *forgeServer) handleSSHConn(netConn net.Conn, cfg *ssh.ServerConfig) {
+	defer netConn.Close()
+
+	sshConn, chans, reqs, err := ssh.NewServerConn(netConn, cfg)
+	if err != nil {
+		return
+	}
+	defer sshConn.Close()
+	go ssh.DiscardRequests(reqs)
+
+	for newChan := range chans {
+		if newChan.ChannelType() != "session" {
+			newChan.Reject(ssh.UnknownChannelType, "unknown channel type") //nolint:errcheck
+			continue
+		}
+		ch, requests, err := newChan.Accept()
+		if err != nil {
+			return
+		}
+		go s.handleSSHSession(sshConn, ch, requests)
+	}
+}
+
+func (s *forgeServer) handleSSHSession(conn *ssh.ServerConn, ch ssh.Channel, requests <-chan *ssh.Request) {
+	defer ch.Close()
+
+	for req := range requests {
+		if req.Type != "exec" {
+			if req.WantReply {
+				req.Reply(false, nil) //nolint:errcheck
+			}
+			continue
+		}
+
+		if len(req.Payload) < 4 {
+			req.Reply(false, nil) //nolint:errcheck
+			return
+		}
+		cmdLen := int(req.Payload[0])<<24 | int(req.Payload[1])<<16 | int(req.Payload[2])<<8 | int(req.Payload[3])
+		if len(req.Payload) < 4+cmdLen {
+			req.Reply(false, nil) //nolint:errcheck
+			return
+		}
+		cmd := string(req.Payload[4 : 4+cmdLen])
+		req.Reply(true, nil) //nolint:errcheck
+
+		exitCode := s.execSSHCommand(conn, ch, cmd)
+		exitPayload := []byte{0, 0, 0, byte(exitCode)}
+		ch.SendRequest("exit-status", false, exitPayload) //nolint:errcheck
+		return
+	}
+}
+
+func (s *forgeServer) execSSHCommand(conn *ssh.ServerConn, ch ssh.Channel, cmd string) int {
+	const prefix = "arche-sync "
+	if !strings.HasPrefix(cmd, prefix) {
+		fmt.Fprintf(ch.Stderr(), "arche-server: unknown command %q\n", cmd)
+		return 1
+	}
+	repoName := strings.TrimSpace(strings.TrimPrefix(cmd, prefix))
+	repoName = strings.Trim(repoName, "/'\"")
+
+	rec, err := s.db.GetRepo(repoName)
+	if err != nil || rec == nil {
+		fmt.Fprintf(ch.Stderr(), "arche-server: repository %q not found\n", repoName)
+		return 1
+	}
+
+	username := conn.Permissions.Extensions["username"]
+	isAdmin := conn.Permissions.Extensions["is-admin"] == "true"
+	var user *User
+	if username != "" {
+		user = &User{Username: username, IsAdmin: isAdmin}
+	}
+	canRead := s.db.CanRead(rec, user)
+	canWrite := s.db.CanWrite(rec, user)
+	if !canRead {
+		fmt.Fprintf(ch.Stderr(), "arche-server: access denied\n")
+		return 1
+	}
+
+	repoObj, err := openRepo(s.dataDir(), repoName)
+	if err != nil {
+		fmt.Fprintf(ch.Stderr(), "arche-server: open repo: %v\n", err)
+		return 1
+	}
+	defer repoObj.Close()
+
+	srv := syncpkg.NewServerAuth(repoObj, canWrite)
+	if s.cfg.Hooks.PreReceive != "" || s.cfg.Hooks.Update != "" {
+		srv.PreUpdateHook = func(bm, oldHex, newHex string) error {
+			if err := runPreReceiveHook(s.cfg.Hooks.PreReceive, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec); err != nil {
+				return err
+			}
+			return runPreReceiveHook(s.cfg.Hooks.Update, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
+		}
+	}
+	srv.OnBookmarkUpdated = func(bm, oldHex, newHex string) {
+		s.db.FirePushWebhooks(repoName, username, bm, oldHex, newHex, collectPushCommits(repoObj, oldHex, newHex))
+		runPostReceiveHook(s.cfg.Hooks.PostReceive, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
+
+		if allowed, script, _ := s.db.GetRepoHookConfig(rec.ID); allowed && script != "" {
+			if !s.db.hasWriteCollaborator(rec.ID) {
+				runPostReceiveHook(script, bm, oldHex, newHex, s.cfg.Hooks.TimeoutSec)
+			}
+		}
+	}
+
+	serveHTTPOverSSH(ch, srv.Handler())
+	return 0
+}
+
+func serveHTTPOverSSH(ch ssh.Channel, handler http.Handler) {
+	local, remote := net.Pipe()
+	defer local.Close()
+
+	go func() {
+		io.Copy(local, ch) //nolint:errcheck
+		local.Close()
+	}()
+	go func() {
+		io.Copy(ch, local) //nolint:errcheck
+		ch.CloseWrite()    //nolint:errcheck
+	}()
+
+	l := &singleConnListener{conn: remote}
+	httpSrv := &http.Server{Handler: handler}
+	httpSrv.Serve(l) //nolint:errcheck
+}
+
+type singleConnListener struct {
+	conn net.Conn
+	done chan struct{}
+}
+
+func (l *singleConnListener) Accept() (net.Conn, error) {
+	if l.done == nil {
+		l.done = make(chan struct{})
+		return l.conn, nil
+	}
+	<-l.done
+	return nil, io.EOF
+}
+
+func (l *singleConnListener) Close() error {
+	if l.done != nil {
+		select {
+		case <-l.done:
+		default:
+			close(l.done)
+		}
+	}
+	return nil
+}
+
+func (l *singleConnListener) Addr() net.Addr { return l.conn.LocalAddr() }
+
+func (s *forgeServer) loadOrCreateHostKey() (ssh.Signer, error) {
+	keyPath := filepath.Join(s.cfg.Storage.DataDir, "ssh_host_ecdsa_key")
+
+	if _, err := os.Stat(keyPath); os.IsNotExist(err) {
+		privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+		if err != nil {
+			return nil, fmt.Errorf("generate host key: %w", err)
+		}
+		signer, err := ssh.NewSignerFromKey(privKey)
+		if err != nil {
+			return nil, err
+		}
+
+		f, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+		if err != nil {
+			return nil, fmt.Errorf("save host key: %w", err)
+		}
+		f.Write(signer.PublicKey().Marshal()) //nolint:errcheck
+		f.Close()
+
+		return signer, nil
+	}
+
+	privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+	if err != nil {
+		return nil, fmt.Errorf("generate host key: %w", err)
+	}
+	return ssh.NewSignerFromKey(privKey)
+}

internal/archesrv/sshkeys.go [A]
--- /dev/null
+++ b/internal/archesrv/sshkeys.go
@@ -1,0 +1,84 @@
+package archesrv
+
+import (
+	"bytes"
+	"time"
+
+	"golang.org/x/crypto/ssh"
+)
+
+type SSHKey struct {
+	ID        int64
+	UserID    int64
+	Label     string
+	PublicKey string
+	AddedAt   time.Time
+}
+
+func (d *DB) AddSSHKey(userID int64, label, publicKey string) (*SSHKey, error) {
+	if _, _, _, _, err := ssh.ParseAuthorizedKey([]byte(publicKey)); err != nil {
+		return nil, err
+	}
+	res, err := d.db.Exec(
+		"INSERT INTO ssh_keys(user_id,label,public_key,added_at) VALUES(?,?,?,?)",
+		userID, label, publicKey, time.Now().Unix(),
+	)
+	if err != nil {
+		return nil, err
+	}
+	id, _ := res.LastInsertId()
+	return &SSHKey{ID: id, UserID: userID, Label: label, PublicKey: publicKey}, nil
+}
+
+func (d *DB) ListSSHKeys(userID int64) ([]SSHKey, error) {
+	rows, err := d.db.Query(
+		"SELECT id, user_id, label, public_key, added_at FROM ssh_keys WHERE user_id=? ORDER BY id",
+		userID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var keys []SSHKey
+	for rows.Next() {
+		var k SSHKey
+		var ts int64
+		if err := rows.Scan(&k.ID, &k.UserID, &k.Label, &k.PublicKey, &ts); err != nil {
+			return nil, err
+		}
+		k.AddedAt = time.Unix(ts, 0)
+		keys = append(keys, k)
+	}
+	return keys, rows.Err()
+}
+
+func (d *DB) DeleteSSHKey(keyID, userID int64) error {
+	_, err := d.db.Exec("DELETE FROM ssh_keys WHERE id=? AND user_id=?", keyID, userID)
+	return err
+}
+
+func (d *DB) AuthorizeSSHKey(pubKey ssh.PublicKey) (*User, error) {
+	keyBytes := pubKey.Marshal()
+
+	rows, err := d.db.Query("SELECT user_id, public_key FROM ssh_keys")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	for rows.Next() {
+		var userID int64
+		var keyStr string
+		if err := rows.Scan(&userID, &keyStr); err != nil {
+			continue
+		}
+		stored, _, _, _, err := ssh.ParseAuthorizedKey([]byte(keyStr))
+		if err != nil {
+			continue
+		}
+		if bytes.Equal(stored.Marshal(), keyBytes) {
+			return d.GetUserByID(userID)
+		}
+	}
+	return nil, rows.Err()
+}

internal/archesrv/templates/srv_admin_invites.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_admin_invites.html
@@ -1,0 +1,73 @@
+{{ define "title" }}Invites — admin{{ end }} {{ define "navextra" }}<a href="/admin/users">users</a>
+<a href="/admin/invites">invites</a>{{ end }} {{ define "srv_admin_invites.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 16px">Invite links</h2>
+    {{ if .Link }}
+    <div style="background: #e8f5e9;
+                border: 1px solid #a5d6a7;
+                border-radius: 3px;
+                padding: 12px 16px;
+                margin-bottom: 20px">
+        <p style="font-size: 13px; margin-bottom: 6px; color: #2e7d32">
+            <strong>New invite link created:</strong>
+        </p>
+        <code style="font-size: 13px; word-break: break-all">{{.Link}}</code>
+        <p style="font-size: 12px; color: #666; margin-top: 6px">
+            Share this link with the person you want to invite. It can only be used
+            once.
+        </p>
+    </div>
+    {{ end }} {{ if .Invites }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>Token</th>
+                <th>Created</th>
+                <th>Status</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Invites }}
+            <tr>
+                <td style="font-family: monospace; font-size: 12px">{{.Token}}</td>
+                <td style="font-size: 12px; color: #888">{{.CreatedAt.Format "2006-01-02 15:04"}}</td>
+                <td>
+                    {{ if .UsedBy }}
+                    <span class="badge badge-secret">used</span>
+                {{ else }}
+                    <span class="badge badge-public">unused</span>
+                    {{ end }}
+                </td>
+                <td>
+                    {{ if not .UsedBy }}
+                    <button onclick="deleteInvite({{.ID}})"
+                            style="background: #c0392b;
+                                   font-size: 12px;
+                                   padding: 2px 8px">Revoke</button>
+                    {{ end }}
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-bottom: 24px">No invites yet.</p>
+    {{ end }}
+    <form method="post" action="/admin/invites">
+        <button type="submit">Generate invite link</button>
+    </form>
+</div>
+<script>
+  function deleteInvite(id) {
+    if (!confirm("Revoke this invite?")) return;
+    fetch("/admin/invites/" + id, { method: "DELETE" }).then(function (r) {
+      if (r.ok) {
+        location.reload();
+      } else {
+        r.text().then(alert);
+      }
+    });
+  }
+</script>
+{{ template "foot" . }} {{ end }}

internal/archesrv/templates/srv_admin_users.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_admin_users.html
@@ -1,0 +1,71 @@
+{{ define "title" }}Users — admin{{ end }}
+{{ define "navextra" }}<a href="/admin/users">users</a> <a href="/admin/invites">invites</a>{{ end }}
+{{ define "srv_admin_users.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 16px">User management</h2>
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>ID</th>
+                <th>Username</th>
+                <th>Role</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Users }}
+            <tr>
+                <td style="color: #aaa; font-size: 12px">{{.ID}}</td>
+                <td>{{.Username}}</td>
+                <td>
+                    {{ if .IsAdmin }}<span class="badge badge-private">admin</span>{{ else }}<span class="badge badge-public">user</span>{{ end }}
+                </td>
+                <td>
+                    {{ if ne .ID $.User.ID }}
+                    <button onclick="deleteUser({{.ID}}, '{{.Username}}')"
+                            style="background: #c0392b;
+                                   font-size: 12px;
+                                   padding: 2px 8px">Delete</button>
+                {{ else }}
+                    <span style="font-size: 12px; color: #aaa">(you)</span>
+                    {{ end }}
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+    <details>
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Create user</summary>
+        <form method="post" action="/admin/users" style="margin-top: 12px">
+            <div class="field">
+                <label>Username</label>
+                <input type="text" name="username" required />
+            </div>
+            <div class="field">
+                <label>Password</label>
+                <input type="password" name="password" required />
+            </div>
+            <div class="field">
+                <label style="display: flex; align-items: center; gap: 6px; cursor: pointer">
+                    <input type="checkbox" name="is_admin" value="1" />
+                    Admin
+                </label>
+            </div>
+            <button type="submit">Create user</button>
+        </form>
+    </details>
+</div>
+<script>
+function deleteUser(id, name) {
+	if (!confirm('Delete user "' + name + '"? This cannot be undone.')) return;
+	fetch("/admin/users/" + id, { method: "DELETE" }).then(function(r) {
+		if (r.ok) {
+			location.reload();
+		} else {
+			r.text().then(alert);
+		}
+	});
+}
+</script>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_base.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_base.html
@@ -1,0 +1,291 @@
+{{ define "head" }}
+<!DOCTYPE html>
+<html lang="en">
+    <head>
+        <meta charset="utf-8" />
+        <meta name="viewport" content="width=device-width, initial-scale=1" />
+        <title>{{ block "title" . }}arche forge{{ end }}</title>
+        <style>
+      /* Reset & base */
+      *,
+      *::before,
+      *::after {
+        box-sizing: border-box;
+        margin: 0;
+        padding: 0;
+      }
+      body {
+        font-family: monospace;
+        font-size: 14px;
+        background: #fff;
+        color: #111;
+      }
+      a {
+        color: #0645ad;
+        text-decoration: none;
+      }
+      a:hover {
+        text-decoration: underline;
+      }
+      pre,
+      code {
+        font-family: monospace;
+      }
+
+      /* Layout */
+      nav {
+        background: #2b2b2b;
+        color: #ddd;
+        padding: 6px 16px;
+        display: flex;
+        align-items: center;
+        gap: 20px;
+      }
+      nav a {
+        color: #ccc;
+        font-size: 13px;
+      }
+      nav a:hover {
+        color: #fff;
+        text-decoration: none;
+      }
+      nav .brand {
+        color: #fff;
+        font-weight: bold;
+        font-size: 15px;
+      }
+      nav .spacer {
+        flex: 1;
+      }
+      .container {
+        max-width: 960px;
+        margin: 0 auto;
+        padding: 20px 16px;
+      }
+
+      /* Tables */
+      table {
+        width: 100%;
+        border-collapse: collapse;
+      }
+      th {
+        text-align: left;
+        background: #f4f4f4;
+        padding: 6px 8px;
+        border-bottom: 1px solid #ddd;
+        font-size: 12px;
+        text-transform: uppercase;
+        color: #666;
+      }
+      td {
+        padding: 6px 8px;
+        border-bottom: 1px solid #eee;
+        vertical-align: top;
+      }
+      tr:last-child td {
+        border-bottom: none;
+      }
+
+      /* Badges */
+      .badge {
+        display: inline-block;
+        padding: 1px 6px;
+        border-radius: 3px;
+        font-size: 11px;
+        font-weight: bold;
+      }
+      .badge-public {
+        background: #d4edda;
+        color: #155724;
+      }
+      .badge-private {
+        background: #f8d7da;
+        color: #721c24;
+      }
+      .badge-draft {
+        background: #fff3cd;
+        color: #856404;
+      }
+      .badge-public-phase {
+        background: #cfe2ff;
+        color: #084298;
+      }
+      .badge-secret {
+        background: #e2e3e5;
+        color: #383d41;
+      }
+
+      /* Phase classes for commit rows */
+      .phase-draft {
+        border-left: 3px solid #ffc107;
+      }
+      .phase-public {
+        border-left: 3px solid #0d6efd;
+      }
+      .phase-secret {
+        border-left: 3px solid #6c757d;
+      }
+
+      /* Diff */
+      .diff-block {
+        background: #f8f8f8;
+        border: 1px solid #ddd;
+        border-radius: 3px;
+        margin: 8px 0;
+        overflow-x: auto;
+      }
+      .diff-block pre {
+        padding: 8px;
+        line-height: 1.4;
+        white-space: pre;
+      }
+      .diff-add {
+        background: #e6ffed;
+        color: #22863a;
+      }
+      .diff-del {
+        background: #ffeef0;
+        color: #cb2431;
+      }
+      .diff-hdr {
+        color: #6a737d;
+      }
+
+      /* Forms */
+      form label {
+        display: block;
+        margin-bottom: 4px;
+        font-size: 13px;
+        color: #333;
+      }
+      form input[type="text"],
+      form input[type="password"] {
+        width: 280px;
+        padding: 6px 8px;
+        border: 1px solid #ccc;
+        border-radius: 3px;
+        font-size: 13px;
+      }
+      form .field {
+        margin-bottom: 12px;
+      }
+      button,
+      input[type="submit"] {
+        padding: 6px 14px;
+        background: #2b2b2b;
+        color: #fff;
+        border: none;
+        border-radius: 3px;
+        font-size: 13px;
+        cursor: pointer;
+      }
+      button:hover {
+        background: #444;
+      }
+      .error {
+        color: #c00;
+        margin-bottom: 8px;
+        font-size: 13px;
+      }
+
+      /* Tree */
+      .tree-path {
+        font-size: 13px;
+        margin-bottom: 8px;
+        color: #555;
+      }
+      .tree-path a {
+        color: #0645ad;
+      }
+      .entry-dir a {
+        color: #0645ad;
+        font-weight: bold;
+      }
+      .entry-file a {
+        color: #111;
+      }
+
+      /* Repo header */
+      h1.repo-name {
+        font-size: 20px;
+        margin-bottom: 4px;
+      }
+      .repo-meta {
+        font-size: 12px;
+        color: #666;
+        margin-bottom: 12px;
+      }
+
+      /* Commit log */
+      .log-row {
+        display: flex;
+        align-items: baseline;
+        gap: 8px;
+        padding: 5px 0;
+        border-bottom: 1px solid #f0f0f0;
+      }
+      .log-sha {
+        font-size: 12px;
+        color: #666;
+        font-family: monospace;
+        min-width: 70px;
+      }
+      .log-msg {
+        flex: 1;
+      }
+      .log-author {
+        font-size: 12px;
+        color: #888;
+        min-width: 100px;
+        text-align: right;
+      }
+      .log-date {
+        font-size: 12px;
+        color: #aaa;
+        min-width: 120px;
+        text-align: right;
+      }
+      .bookmark-tag {
+        background: #f0f4ff;
+        border: 1px solid #b0c0ff;
+        border-radius: 3px;
+        padding: 0 4px;
+        font-size: 11px;
+        color: #0645ad;
+        margin-left: 4px;
+      }
+      .head-tag {
+        background: #ffe082;
+        border: 1px solid #fbc02d;
+        border-radius: 3px;
+        padding: 0 4px;
+        font-size: 11px;
+      }
+        </style>
+    </head>
+    <body>
+        <nav>
+            <a class="brand" href="/">
+                <svg width="18"
+                     height="18"
+                     viewBox="0 0 128 128"
+                     xmlns="http://www.w3.org/2000/svg"
+                     style="vertical-align: middle;
+                            margin-right: 6px">
+                    <circle cx="64" cy="64" r="48" stroke="white" stroke-width="8" fill="none" />
+                    <circle cx="64" cy="64" r="10" fill="white" />
+                </svg>
+            arche</a>
+            {{ block "navextra" . }}{{ end }}
+            <span class="spacer"></span>
+            {{ if .User }}<a href="/settings/keys">settings</a>
+            <a href="/settings/token">token</a>{{ if .User.IsAdmin }}
+            <a href="/admin/users">admin</a>{{ end }}
+            <span style="color: #aaa; font-size: 12px">{{.User.Username}}</span>
+            <a href="/logout">logout</a>{{ else }}<a href="/login">login</a>{{ if
+            registrationOpen }} <a href="/register">register</a>{{ end }}{{ end }}
+        </nav>
+        {{ end }} {{ define "foot" }}
+    </body>
+</html>
+{{ end }}

internal/archesrv/templates/srv_login.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_login.html
@@ -1,0 +1,21 @@
+{{ define "title" }}Login — arche forge{{ end }}
+{{ define "srv_login.html" }}{{ template "head" . }}
+<div class="container" style="max-width: 360px; margin-top: 60px">
+    <h2 style="margin-bottom: 16px">Sign in</h2>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }}
+    <form method="post" action="/login">
+        <div class="field">
+            <label>Username</label>
+            <input type="text" name="username" autofocus autocomplete="username" />
+        </div>
+        <div class="field">
+            <label>Password</label>
+            <input type="password" name="password" autocomplete="current-password" />
+        </div>
+        <button type="submit">Sign in</button>
+    </form>
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_register.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_register.html
@@ -1,0 +1,43 @@
+{{ define "title" }}Create account — arche forge{{ end }}
+{{ define "srv_register.html" }}{{ template "head" . }}
+<div class="container" style="max-width: 360px; margin-top: 60px">
+    <h2 style="margin-bottom: 16px">Create account</h2>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }}
+    <form method="post" action="/register">
+        {{ if .InviteRequired }}
+        <div class="field">
+            <label>Invite token</label>
+            <input type="text"
+                   name="invite_token"
+                   autofocus
+                   autocomplete="off"
+                   placeholder="paste your invite token" />
+        </div>
+        {{ end }}
+        <div class="field">
+            <label>Username</label>
+            <input
+                type="text"
+                name="username"
+                {{if
+                not
+                .InviteRequired}}autofocus
+                {{ end }}autocomplete="username"
+                />
+            </div>
+            <div class="field">
+                <label>Password</label>
+                <input type="password" name="password" autocomplete="new-password" />
+            </div>
+            <div class="field">
+                <label>Confirm password</label>
+                <input type="password" name="confirm" autocomplete="new-password" />
+            </div>
+            <button type="submit">Register</button>
+            <a href="/login" style="margin-left: 12px; font-size: 13px">Sign in instead</a>
+        </form>
+    </div>
+    {{ template "foot" . }}
+    {{ end }}

internal/archesrv/templates/srv_repo_commit.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_commit.html
@@ -1,0 +1,88 @@
+{{ define "title" }}{{.Repo}} — commit {{.ShortHex}}{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_commit.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">{{.Repo}} / commit</h1>
+    <table style="width: auto; margin-bottom: 16px">
+        <tr>
+            <th style="width: 90px">commit</th>
+            <td>
+                <code>{{.HexID}}</code>
+            </td>
+        </tr>
+        <tr>
+            <th>change</th>
+            <td>
+                <code>{{.ChangeID}}</code>
+            </td>
+        </tr>
+        <tr>
+            <th>author</th>
+            <td>{{.Author}}</td>
+        </tr>
+        <tr>
+            <th>committer</th>
+            <td>{{.Committer}}</td>
+        </tr>
+        <tr>
+            <th>date</th>
+            <td>{{.Date}}</td>
+        </tr>
+        <tr>
+            <th>phase</th>
+            <td>
+                <span class="badge badge-{{.PhaseClass}}">{{.Phase}}</span>
+            </td>
+        </tr>
+        {{ if .Bookmarks }}
+        <tr>
+            <th>bookmarks</th>
+            <td>
+                {{ range .Bookmarks }}<span class="bookmark-tag">{{.}}</span> {{ end }}
+            </td>
+        </tr>
+        {{ end }} {{ if .Parents }}
+        <tr>
+            <th>parents</th>
+            <td>
+                {{ range .Parents }}<a href="/{{$.Repo}}/commit?id={{.HexID}}"><code>{{.ShortHex}}</code></a>
+                {{ end }}
+            </td>
+        </tr>
+        {{ end }}
+        <tr>
+            <th>signature</th>
+            <td>
+                {{ if eq .SigStatus "verified" }}
+                <span class="badge badge-public" title="Signed by a registered key">&#10003; Verified</span>
+                {{ else if eq .SigStatus "unknown_key" }}
+                <span class="badge badge-draft"
+                      title="Signature present but key not registered">Signed (unknown key)</span>
+                {{ else if eq .SigStatus "invalid" }}
+                <span class="badge"
+                      style="background:#cc0000;
+                             color:#fff"
+                      title="Signature verification failed">&#x26A0; Invalid signature</span>
+            {{ else }}
+                <span style="color:#888">Unsigned</span>
+                {{ end }}
+            </td>
+        </tr>
+    </table>
+    <pre style="background: #f9f9f9; border: 1px solid #ddd; padding: 10px; border-radius: 3px; white-space: pre-wrap; margin-bottom: 16px;">
+{{.Message}}</pre>
+    {{ range .Diffs }}
+    <div style="margin-bottom: 12px">
+        <div style="font-size: 13px; padding: 4px 0; color: #333">
+            <strong>{{.Path}}</strong>
+            <span style="color: #888; font-size: 12px; margin-left: 6px">[{{.Status}}]</span>
+        </div>
+        <div class="diff-block">
+            <pre>{{range .Lines}}<span class="{{.Class}}">{{.Text}}
+</span>{{end}}</pre>
+        </div>
+    </div>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_file.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_file.html
@@ -1,0 +1,28 @@
+{{ define "title" }}{{.Repo}} — {{.FilePath}}{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_file.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">
+        {{.Repo}} / <code style="font-size: 18px">{{.FilePath}}</code>
+    </h1>
+    <div class="repo-meta">
+        commit
+        <a href="/{{.Repo}}/commit?id={{.CommitHex}}"><code>{{.ShortHex}}</code></a>
+    </div>
+    {{ if .IsBinary }}
+    <p style="color: #888; font-style: italic">Binary file — cannot display.</p>
+    {{ else if .Highlighted }}
+    <div style="margin-top: 12px;
+                border: 1px solid #ddd;
+                border-radius: 3px;
+                overflow-x: auto;
+                font-size: 13px">{{.Highlighted}}</div>
+{{ else }}
+    <div class="diff-block" style="margin-top: 12px">
+        <pre style="padding: 10px; white-space: pre-wrap; word-break: break-all">
+{{.Content}}</pre>
+    </div>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_issue.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_issue.html
@@ -1,0 +1,94 @@
+{{ define "title" }}{{.Repo}} — issue {{.ID}}{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_issue.html" }}{{ template "head" . }}
+<div class="container">
+    <div style="margin-bottom: 8px">
+        <a href="/{{.Repo}}/issues" style="font-size: 13px; color: #666">← issues</a>
+    </div>
+    <h1 class="repo-name" style="margin-bottom: 6px">{{.Title}}</h1>
+    <div class="repo-meta"
+         style="display: flex;
+                align-items: center;
+                gap: 10px;
+                margin-bottom: 16px">
+        <span>
+            {{ if eq .Status "open" }}
+            <span class="badge badge-public">open</span>
+            {{ else if eq .Status "closed" }}
+            <span class="badge badge-private">closed</span>
+        {{ else }}
+            <span class="badge badge-secret">{{.Status}}</span>
+            {{ end }}
+        </span>
+        <span style="color: #888">{{.ID}}</span>
+        {{ if .Labels }} {{ range .Labels }}<span class="bookmark-tag">{{.}}</span>{{ end }} {{ end }} {{ if .User }}
+        <form method="post" action="/{{.Repo}}/issue/status" style="margin: 0">
+            <input type="hidden" name="issue_id" value="{{.ID}}" />
+            {{ if eq .Status "open" }}
+            <input type="hidden" name="status" value="closed" />
+            <button type="submit"
+                    style="background: #c0392b;
+                           font-size: 12px;
+                           padding: 2px 10px">Close</button>
+        {{ else }}
+            <input type="hidden" name="status" value="open" />
+            <button type="submit"
+                    style="background: #27ae60;
+                           font-size: 12px;
+                           padding: 2px 10px">Reopen</button>
+            {{ end }}
+        </form>
+        {{ end }}
+    </div>
+    {{ if .Body }}
+    <div style="background: #f8f8f8;
+                border: 1px solid #ddd;
+                border-radius: 3px;
+                padding: 12px 16px;
+                margin-bottom: 24px;
+                white-space: pre-wrap;
+                font-size: 13px;
+                line-height: 1.5">{{.Body}}</div>
+    {{ end }} {{ if .Comments }}
+    <h3 style="font-size: 14px; color: #555; margin-bottom: 10px">Comments</h3>
+    {{ range .Comments }}
+    <div style="border: 1px solid #e0e0e0;
+                border-radius: 3px;
+                margin-bottom: 12px">
+        <div style="background: #f4f4f4;
+                    padding: 6px 12px;
+                    font-size: 12px;
+                    color: #555;
+                    border-bottom: 1px solid #e0e0e0">
+            <strong>{{.Author}}</strong>
+        </div>
+        <div style="padding: 10px 12px;
+                    white-space: pre-wrap;
+                    font-size: 13px;
+                    line-height: 1.5">{{.Text}}</div>
+    </div>
+    {{ end }} {{ end }} {{ if .User }}
+    <h3 style="font-size: 14px;
+               color: #555;
+               margin-bottom: 10px;
+               margin-top: 24px">Add comment</h3>
+    <form method="post" action="/{{.Repo}}/issue/comment">
+        <input type="hidden" name="issue_id" value="{{.ID}}" />
+        <div class="field">
+            <textarea name="text"
+                      rows="4"
+                      required
+                      style="width: 500px;
+                             padding: 6px 8px;
+                             border: 1px solid #ccc;
+                             border-radius: 3px;
+                             font-family: monospace;
+                             font-size: 13px"
+                      placeholder="Leave a comment…"></textarea>
+        </div>
+        <button type="submit">Comment</button>
+    </form>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_issues.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_issues.html
@@ -1,0 +1,64 @@
+{{ define "title" }}{{.Repo}} — issues{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_issues.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">{{.Repo}} / issues</h1>
+    {{ if .Issues }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>ID</th>
+                <th>Title</th>
+                <th>Status</th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Issues }}
+            <tr>
+                <td style="color: #aaa; font-size: 12px; min-width: 80px">
+                    <a href="/{{$.Repo}}/issue?id={{.ID}}">{{.ID}}</a>
+                </td>
+                <td>
+                    <a href="/{{$.Repo}}/issue?id={{.ID}}">{{.Title}}</a>
+                </td>
+                <td>
+                    {{ if eq .Status "open" }}
+                    <span class="badge badge-public">open</span>
+                    {{ else if eq .Status "closed" }}
+                    <span class="badge badge-private">closed</span>
+                {{ else }}
+                    <span class="badge badge-secret">{{.Status}}</span>
+                    {{ end }}
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-top: 12px; margin-bottom: 24px">No issues yet.</p>
+    {{ end }} {{ if .User }}
+    <details>
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Create issue</summary>
+        <form method="post" action="/{{.Repo}}/issues" style="margin-top: 12px">
+            <div class="field">
+                <label>Title</label>
+                <input type="text" name="title" required style="width: 400px" />
+            </div>
+            <div class="field">
+                <label>Description</label>
+                <textarea name="body"
+                          rows="6"
+                          style="width: 400px;
+                                 padding: 6px 8px;
+                                 border: 1px solid #ccc;
+                                 border-radius: 3px;
+                                 font-family: monospace;
+                                 font-size: 13px"></textarea>
+            </div>
+            <button type="submit">Create issue</button>
+        </form>
+    </details>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_list.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_list.html
@@ -1,0 +1,85 @@
+{{ define "title" }}Repositories — arche forge{{ end }}
+{{ define "srv_repo_list.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 16px">Repositories</h2>
+    {{ if .Repos }}
+    <table>
+        <thead>
+            <tr>
+                <th>Name</th>
+                <th>Description</th>
+                <th>Visibility</th>
+                <th>Last commit</th>
+                <th>Created</th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Repos }}
+            <tr>
+                <td>
+                    <a href="/{{.Name}}">{{.Name}}</a>
+                </td>
+                <td>{{.Description}}</td>
+                <td>
+                    <span class="badge badge-{{.Visibility}}">{{.Visibility}}</span>
+                </td>
+                <td style="color: #888; font-size: 12px">{{.LastCommit}}</td>
+                <td style="color: #aaa; font-size: 12px">{{.CreatedAt}}</td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888">No repositories yet.</p>
+    {{ end }} {{ if .User }}{{ if .User.IsAdmin }}
+    <details style="margin-top: 24px">
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Create repository</summary>
+        <form method="post"
+              action="/admin/repos"
+              style="margin-top: 10px"
+              id="create-repo-form">
+            <div class="field">
+                <label>Name</label>
+                <input type="text"
+                       name="name"
+                       id="repo-name"
+                       pattern="[a-zA-Z0-9._-]+"
+                       required />
+            </div>
+            <div class="field">
+                <label>Description</label>
+                <input type="text" name="description" id="repo-desc" />
+            </div>
+            <div class="field">
+                <label>Visibility</label>
+                <select name="visibility"
+                        id="repo-vis"
+                        style="padding: 5px;
+                               border: 1px solid #ccc;
+                               border-radius: 3px">
+                    <option value="private">private</option>
+                    <option value="public">public</option>
+                </select>
+            </div>
+            <button type="button" onclick="submitCreate()">Create</button>
+        </form>
+    </details>
+    <script>
+    function submitCreate() {
+      var form = document.getElementById("create-repo-form");
+      var data = new FormData(form);
+      fetch("/admin/repos", {
+        method: "POST",
+        body: new URLSearchParams(data),
+      }).then(function (r) {
+        if (r.ok) {
+          location.reload();
+        } else {
+          r.text().then(alert);
+        }
+      });
+    }
+    </script>
+    {{ end }}{{ end }}
+</div>
+{{ template "foot" . }} {{ end }}

internal/archesrv/templates/srv_repo_log.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_log.html
@@ -1,0 +1,62 @@
+{{ define "title" }}{{.Repo}} — log{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_log.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">{{.Repo}} / log</h1>
+    <form method="get"
+          style="display:flex;
+                 gap:8px;
+                 margin-bottom:16px;
+                 align-items:center">
+        <input name="where"
+               value="{{.WhereExpr}}"
+               placeholder='filter: author(alice) or draft()'
+               style="flex:1;
+                      padding:6px 10px;
+                      font-family:monospace;
+                      font-size:13px;
+                      background:#1e1e1e;
+                      color:#ccc;
+                      border:1px solid #444;
+                      border-radius:4px;
+                      outline:none">
+        <button type="submit"
+                style="padding:6px 14px;
+                       background:#2a6;
+                       color:#fff;
+                       border:none;
+                       border-radius:4px;
+                       cursor:pointer;
+                       font-size:13px">Filter</button>
+        {{ if .WhereExpr }}<a href="?where=" style="color:#888; font-size:12px; white-space:nowrap">✕ clear</a>{{ end }}
+    </form>
+    {{ if .WhereErr }}
+    <p style="color:#e55;
+              font-family:monospace;
+              font-size:12px;
+              margin:-8px 0 12px">parse error: {{.WhereErr}}</p>
+    {{ end }}
+    {{ if .WhereExpr }}
+    <p style="color:#888; font-size:12px; margin:-4px 0 10px">
+        Showing {{ len .Commits }} commit(s) matching <code style="color:#adf">{{.WhereExpr}}</code>
+    </p>
+    {{ end }}
+    {{ if .Commits }} {{ range .Commits }}
+    <div class="log-row phase-{{.PhaseClass}}">
+        <span class="log-sha"><a href="/{{$.Repo}}/commit?id={{.HexID}}">{{.ShortHex}}</a></span>
+        <span class="log-msg">
+            <a href="/{{$.Repo}}/commit?id={{.HexID}}">{{.Message}}</a>
+            {{ if .IsHead }}<span class="head-tag">HEAD</span>{{ end }} {{ range .Bookmarks }}<span class="bookmark-tag">{{.}}</span>{{ end }}
+        </span>
+        <span class="log-author">{{.Author}}</span>
+        <span class="log-date">{{.Date}}</span>
+        <span class="badge badge-{{.PhaseClass}}">{{.Phase}}</span>
+    </div>
+{{ end }} {{ else }}
+    <p style="color: #888; margin-top: 12px">
+    {{ if .WhereExpr }}No commits match this filter.{{ else }}No commits yet.{{ end }}
+    </p>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_settings.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_settings.html
@@ -1,0 +1,138 @@
+{{ define "title" }}{{.Repo}} — settings{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a> <a href="/{{.Repo}}/settings">settings</a>{{ end }}
+{{ define "srv_repo_settings.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 16px">{{.Repo}} / settings</h2>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }}
+    <section style="margin-bottom: 32px">
+        <h3 style="margin-bottom: 12px; font-size: 15px">General</h3>
+        <form method="post" action="/{{.Repo}}/settings">
+            <div class="field">
+                <label>Description</label>
+                <input type="text"
+                       name="description"
+                       value="{{.Description}}"
+                       style="width: 400px" />
+            </div>
+            <div class="field">
+                <label>Visibility</label>
+                <select name="visibility"
+                        style="padding: 6px 8px;
+                               border: 1px solid #ccc;
+                               border-radius: 3px;
+                               font-size: 13px">
+                    <option value="public" {{ if eq .Visibility "public" }}selected{{ end }}>Public
+                    </option>
+                    <option value="private" {{ if eq .Visibility "private" }}selected{{ end }}>Private
+                    </option>
+                </select>
+            </div>
+            <button type="submit">Save</button>
+        </form>
+    </section>
+    <section style="margin-bottom: 32px">
+        <h3 style="margin-bottom: 12px; font-size: 15px">Collaborators</h3>
+        {{ if .Collaborators }}
+        <table style="margin-bottom: 16px">
+            <thead>
+                <tr>
+                    <th>Username</th>
+                    <th>Role</th>
+                    <th></th>
+                </tr>
+            </thead>
+            <tbody>
+                {{ range .Collaborators }}
+                <tr>
+                    <td>{{.Username}}</td>
+                    <td style="font-size: 12px; color: #666">{{.Role}}</td>
+                    <td>
+                        <button onclick="removeCollab({{.UserID}})"
+                                style="background: #c0392b;
+                                       font-size: 12px;
+                                       padding: 2px 8px">Remove</button>
+                    </td>
+                </tr>
+                {{ end }}
+            </tbody>
+        </table>
+    {{ else }}
+        <p style="color: #888; margin-bottom: 16px">No collaborators yet.</p>
+        {{ end }}
+        <details>
+            <summary style="cursor: pointer; font-size: 13px; color: #555">Add collaborator</summary>
+            <form method="post"
+                  action="/{{.Repo}}/settings/collaborators"
+                  style="margin-top: 12px">
+                <div class="field">
+                    <label>Username</label>
+                    <input type="text"
+                           name="username"
+                           required
+                           style="width: 280px"
+                           placeholder="username" />
+                </div>
+                <div class="field">
+                    <label>Role</label>
+                    <select name="role"
+                            style="padding: 6px 8px;
+                                   border: 1px solid #ccc;
+                                   border-radius: 3px;
+                                   font-size: 13px">
+                        <option value="read">read</option>
+                        <option value="write">write</option>
+                        <option value="admin">admin</option>
+                    </select>
+                </div>
+                <button type="submit">Add</button>
+            </form>
+        </details>
+    </section>
+    <section style="border: 1px solid #f5c6cb;
+                    border-radius: 4px;
+                    padding: 16px;
+                    background: #fff8f8">
+        <h3 style="margin-bottom: 12px; font-size: 15px; color: #721c24">Danger zone</h3>
+        <p style="font-size: 13px; color: #555; margin-bottom: 12px">
+            Deleting this repository is permanent. All data will be lost.
+        </p>
+        <button onclick="deleteRepo()" style="background: #c0392b; font-size: 13px">Delete repository</button>
+    </section>
+</div>
+<script>
+  function removeCollab(userID) {
+    if (!confirm("Remove this collaborator?")) return;
+    fetch(
+      "/{{.Repo}}/settings/collaborators/" + userID,
+      { method: "DELETE" },
+    ).then(function (r) {
+      if (r.ok) {
+        location.reload();
+      } else {
+        r.text().then(alert);
+      }
+    });
+  }
+
+  function deleteRepo() {
+    if (
+      !confirm(
+        "Permanently delete {{.Repo}}? This cannot be undone.",
+      )
+    )
+      return;
+    fetch("/{{.Repo}}/settings/delete", { method: "POST" }).then(
+      function (r) {
+        if (r.ok) {
+          window.location.href = "/";
+        } else {
+          r.text().then(alert);
+        }
+      },
+    );
+  }
+</script>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_stack.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_stack.html
@@ -1,0 +1,84 @@
+{{ define "title" }}{{.Repo}} — stack {{.StackID}}{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_stack.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 class="repo-name">
+        {{.Repo}} / stack: <code>{{.StackID}}</code>
+    </h2>
+    <p>Showing {{ len .Entries }} change(s) in dependency order (oldest ancestor first).</p>
+    {{ range .Entries }}
+    <div class="commit-block"
+         id="{{.ChangeID}}"
+         style="border: 1px solid #333;
+                border-radius: 4px;
+                margin-bottom: 24px;
+                padding: 16px">
+        <div style="display: flex;
+                    justify-content: space-between;
+                    align-items: flex-start">
+            <div>
+                <code><a href="/{{$.Repo}}/commit/{{.HexID}}">{{.ShortHex}}</a></code>
+                &nbsp;
+                <span class="phase-badge phase-{{.PhaseClass}}">{{.Phase}}</span>
+                {{ if .SigStatus }}
+                <span class="sig-badge">{{.SigStatus}}</span>
+                {{ end }}
+                &nbsp;
+                <strong>{{.Message}}</strong>
+            </div>
+            <div style="font-size: 12px; color: #888;">{{.Author}} &middot; {{.Date}}</div>
+        </div>
+        <div style="margin: 8px 0; font-size: 12px; color: #aaa;">
+            <code>ch:{{.ChangeID}}</code>
+            {{ if .DiffStats }}
+            &middot; {{ .DiffStats }}
+            {{ end }}
+        </div>
+        <div style="margin-top: 12px;
+                    display: flex;
+                    align-content: center;
+                    gap: 12px">
+            <strong style="font-size: 13px;">Review:
+            <span class="badge badge-{{ if eq .Review "approved" }}success{{ else if eq .Review "needs-revision" }}danger{{ else if eq .Review "reviewing" }}warning{{ else }}default{{ end }}">
+                {{.Review}}
+            </span>
+        </strong>
+        {{ if $.User }}
+        <form method="POST"
+              action="/{{$.Repo}}/stacks/{{$.StackID}}/reviews/{{.ChangeID}}"
+              style="display: inline-flex;
+                     gap: 6px;
+                     align-items: center">
+            <select name="status" style="font-size: 12px; padding: 2px 6px;">
+                <option value="open" {{ if eq .Review "open" }}selected{{ end }}>open
+                </option>
+                <option value="reviewing" {{ if eq .Review "reviewing" }}selected{{ end }}>reviewing
+                </option>
+                <option value="approved" {{ if eq .Review "approved" }}selected{{ end }}>approved
+                </option>
+                <option value="needs-revision"
+                        {{ if eq .Review "needs-revision" }}
+                        selected
+                        {{ end }}>needs-revision
+                </option>
+            </select>
+            <button type="submit" style="font-size: 12px; padding: 2px 8px;">Update</button>
+        </form>
+        {{ end }}
+    </div>
+    {{ if .DiffStats }}
+    <details style="margin-top: 12px;">
+        <summary style="font-size: 12px; color: #888; cursor: pointer;">Show diff</summary>
+        <pre style="margin-top: 8px;
+                    background: #1a1a1a;
+                    padding: 12px;
+                    border-radius: 4px;
+                    font-size: 12px;
+                    overflow-x: auto">{{.DiffStats}}</pre>
+    </details>
+    {{ end }}
+</div>
+{{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_stacks.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_stacks.html
@@ -1,0 +1,40 @@
+{{ define "title" }}{{.Repo}} — stacks{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_stacks.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">{{.Repo}} / stacks</h1>
+    {{ if .Stacks }}
+    <table>
+        <thead>
+            <tr>
+                <th>Stack</th>
+                <th>Depth</th>
+                <th>Status</th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Stacks }}
+            <tr>
+                <td>
+                    <a href="/{{$.Repo}}/stacks/{{.StackID}}"><code>{{.Root}}</code></a>
+                </td>
+                <td>{{.Depth}} change(s)</td>
+                <td>
+                    {{ range $cid, $st := .Reviews }}
+                <span class="badge badge-{{ if eq $st "approved" }}success{{ else if eq $st "needs-revision" }}danger{{ else if eq $st "reviewing" }}warning{{ else }}default{{ end }}">
+                    {{$cid}}: {{$st}}
+                </span>
+                {{ end }}
+            </td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+{{ else }}
+<p style="color: #888">
+    No stacks found. Use <code>arche stack push</code> to publish a stack for review.
+</p>
+{{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_tree.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_tree.html
@@ -1,0 +1,34 @@
+{{ define "title" }}{{.Repo}} — tree{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_tree.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">{{.Repo}} / tree</h1>
+    <div class="repo-meta">
+        commit
+        <a href="/{{.Repo}}/commit?id={{.CommitHex}}"><code>{{.ShortHex}}</code></a>
+    </div>
+    <div class="tree-path">
+        <a href="/{{.Repo}}/tree?id={{.CommitHex}}">/</a>
+        {{ range .PathParts }} / <a href="{{.Link}}">{{.Name}}</a>{{ end }}
+    </div>
+    <table>
+        <thead>
+            <tr>
+                <th>Name</th>
+                <th>Type</th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Entries }}
+        <tr class="{{ if .IsDir }}entry-dir{{ else }}entry-file{{ end }}">
+            <td>
+                <a href="{{.Link}}">{{ if .IsDir }}📁 {{ end }}{{.Name}}</a>
+            </td>
+            <td style="color: #888; font-size: 12px">{{ if .IsDir }}dir{{ else }}{{.Mode}}{{ end }}</td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_webhooks.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_webhooks.html
@@ -1,0 +1,92 @@
+{{ define "title" }}{{.Repo}} — webhooks{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a> <a href="/{{.Repo}}/settings">settings</a>{{ end }}
+{{ define "srv_repo_webhooks.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 16px">{{.Repo}} / webhooks</h2>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }} {{ if .Webhooks }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>URL</th>
+                <th>Events</th>
+                <th>Active</th>
+                <th>Deliveries</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Webhooks }}
+            <tr>
+                <td style="font-family: monospace;
+                           font-size: 12px;
+                           max-width: 300px;
+                           overflow: hidden;
+                           text-overflow: ellipsis">{{.URL}}</td>
+                <td style="font-size: 12px; color: #666">{{.Events}}</td>
+                <td>
+                    {{ if .Active }}
+                    <span class="badge badge-public">active</span>
+                {{ else }}
+                    <span class="badge badge-private">inactive</span>
+                    {{ end }}
+                </td>
+                <td>
+                    <a href="/{{$.Repo}}/settings/webhooks/{{.ID}}/deliveries"
+                       style="font-size: 12px">history</a>
+                </td>
+                <td>
+                    <button onclick="deleteHook({{.ID}})"
+                            style="background: #c0392b;
+                                   font-size: 12px;
+                                   padding: 2px 8px">Delete</button>
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-bottom: 24px">No webhooks configured.</p>
+    {{ end }}
+    <details>
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Add webhook</summary>
+        <form method="post"
+              action="/{{.Repo}}/settings/webhooks"
+              style="margin-top: 12px">
+            <div class="field">
+                <label>Payload URL</label>
+                <input type="text"
+                       name="url"
+                       required
+                       style="width: 400px"
+                       placeholder="https://example.com/hooks/arche" />
+            </div>
+            <div class="field">
+                <label>Secret (optional, used for HMAC-SHA256 signature)</label>
+                <input type="password" name="secret" style="width: 280px" />
+            </div>
+            <div class="field">
+                <label>Events</label>
+                <input type="text" name="events" value="push" style="width: 200px" />
+            </div>
+            <button type="submit">Add webhook</button>
+        </form>
+    </details>
+</div>
+<script>
+function deleteHook(id) {
+	if (!confirm("Delete this webhook?")) return;
+	fetch("/{{.Repo}}/settings/webhooks/" + id, { method: "DELETE" }).then(
+		function(r) {
+			if (r.ok) {
+				location.reload();
+			} else {
+				r.text().then(alert);
+			}
+		},
+	);
+}
+</script>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_wiki_list.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_wiki_list.html
@@ -1,0 +1,32 @@
+{{ define "title" }}{{.Repo}} — wiki{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_wiki_list.html" }}{{ template "head" . }}
+<div class="container">
+    <h1 class="repo-name">{{.Repo}} / wiki</h1>
+    {{ if .Pages }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>Page</th>
+                <th>Last edited by</th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Pages }}
+            <tr>
+                <td>
+                    <a href="/{{$.Repo}}/wiki/{{.Title}}">{{.Title}}</a>
+                </td>
+                <td style="font-size: 12px; color: #666">{{.Author}}</td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-top: 12px; margin-bottom: 24px">No wiki pages yet.</p>
+    {{ end }} {{ if .User }}
+    <a href="/{{.Repo}}/wiki/Home?edit=1" style="font-size: 13px">+ New page (Home)</a>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_repo_wiki_page.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_repo_wiki_page.html
@@ -1,0 +1,32 @@
+{{ define "title" }}{{.Repo}} — wiki — {{.Title}}{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a>{{ end }}
+{{ define "srv_repo_wiki_page.html" }}{{ template "head" . }}
+<div class="container">
+    <div style="display: flex;
+                align-items: baseline;
+                gap: 12px;
+                margin-bottom: 16px">
+        <h1 class="repo-name" style="margin-bottom: 0">{{.Repo}} / wiki / {{.Title}}</h1>
+        {{ if .User }}
+        <a href="?edit=1" style="font-size: 12px">edit</a>
+        {{ end }}
+    </div>
+    {{ if .Editing }}
+    <form method="post" action="/{{.Repo}}/wiki/{{.Title}}">
+        <div class="field">
+            <textarea name="content" rows="20" style="width: 100%; max-width: 800px; padding: 8px; border: 1px solid #ccc; border-radius: 3px; font-family: monospace; font-size: 13px;">
+{{.RawContent}}</textarea>
+        </div>
+        <button type="submit" style="margin-right: 8px">Save</button>
+        <a href="/{{.Repo}}/wiki/{{.Title}}" style="font-size: 13px">cancel</a>
+    </form>
+    {{ else if .RenderedContent }}
+    <div style="max-width: 800px; line-height: 1.6; font-size: 14px">{{.RenderedContent}}</div>
+{{ else }}
+    <p style="color: #888; font-style: italic">
+        This page has no content yet. {{ if .User }}<a href="?edit=1">Create it.</a>{{ end }}
+    </p>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_settings_keys.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_settings_keys.html
@@ -1,0 +1,84 @@
+{{ define "title" }}SSH keys — settings{{ end }}
+{{ define "navextra" }}<a href="/settings/keys">SSH keys</a> <a href="/settings/mtls">mTLS</a> <a href="/settings/token">API tokens</a>{{ end }}
+{{ define "srv_settings_keys.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 16px">SSH keys</h2>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }} {{ if .Keys }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>Label</th>
+                <th>Key (truncated)</th>
+                <th>Added</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Keys }}
+            <tr>
+                <td>
+                    {{ if .Label }}{{.Label}}{{ else }}<span style="color: #aaa">—</span>{{ end }}
+                </td>
+                <td style="font-family: monospace;
+                           font-size: 12px;
+                           max-width: 240px;
+                           overflow: hidden;
+                           text-overflow: ellipsis;
+                           white-space: nowrap">{{.PublicKey | printf "%.60s"}}…</td>
+                <td style="font-size: 12px; color: #888">{{.AddedAt.Format "2006-01-02"}}</td>
+                <td>
+                    <button onclick="deleteKey({{.ID}})"
+                            style="background: #c0392b;
+                                   font-size: 12px;
+                                   padding: 2px 8px">Delete</button>
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-bottom: 24px">No SSH keys registered.</p>
+    {{ end }}
+    <details>
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Add SSH key</summary>
+        <form method="post" action="/settings/keys" style="margin-top: 12px">
+            <div class="field">
+                <label>Label (optional)</label>
+                <input type="text"
+                       name="label"
+                       style="width: 280px"
+                       placeholder="e.g. work laptop" />
+            </div>
+            <div class="field">
+                <label>Public key</label>
+                <textarea name="public_key"
+                          rows="3"
+                          required
+                          style="width: 500px;
+                                 padding: 6px 8px;
+                                 border: 1px solid #ccc;
+                                 border-radius: 3px;
+                                 font-family: monospace;
+                                 font-size: 12px"
+                          placeholder="ssh-ed25519 AAAA..."></textarea>
+            </div>
+            <button type="submit">Add key</button>
+        </form>
+    </details>
+</div>
+<script>
+function deleteKey(id) {
+	if (!confirm("Delete this SSH key?")) return;
+	fetch("/settings/keys/" + id, { method: "DELETE" }).then(function(r) {
+		if (r.ok) {
+			location.reload();
+		} else {
+			r.text().then(alert);
+		}
+	});
+}
+</script>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_settings_mtls.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_settings_mtls.html
@@ -1,0 +1,100 @@
+{{ define "title" }}mTLS certificates — settings{{ end }}
+{{ define "navextra" }}<a href="/settings/keys">SSH keys</a> <a href="/settings/mtls">mTLS</a> <a href="/settings/token">API tokens</a>{{ end }}
+{{ define "srv_settings_mtls.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 4px">mTLS client certificates</h2>
+    <p style="font-size: 13px; color: #666; margin-bottom: 16px">
+        Register client TLS certificates to authenticate with
+        <code>arche+mtls://</code> remotes without a bearer token. The server
+        identifies you by the SHA-256 fingerprint of the certificate.
+    </p>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }} {{ if .Certs }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>Label</th>
+                <th>Fingerprint (SHA-256)</th>
+                <th>Added</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Certs }}
+            <tr>
+                <td>
+                    {{ if .Label }}{{.Label}}{{ else }}<span style="color: #aaa">—</span>{{ end }}
+                </td>
+                <td style="font-family: monospace;
+                           font-size: 11px;
+                           max-width: 300px;
+                           overflow: hidden;
+                           text-overflow: ellipsis;
+                           white-space: nowrap">{{.Fingerprint}}</td>
+                <td style="font-size: 12px; color: #888">{{.AddedAt.Format "2006-01-02"}}</td>
+                <td>
+                    <button onclick="deleteCert({{.ID}})"
+                            style="background: #c0392b;
+                                   font-size: 12px;
+                                   padding: 2px 8px">Delete</button>
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-bottom: 24px">No mTLS certificates registered.</p>
+    {{ end }}
+    <details>
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Add certificate</summary>
+        <p style="font-size: 12px; color: #666; margin: 8px 0">
+            Generate a self-signed certificate with:
+            <br />
+            <code style="background: #f4f4f4; padding: 2px 6px; border-radius: 2px">
+                openssl req -x509 -newkey ec -pkeyopt ec_paramgen_curve:P-256 -keyout
+            client.key -out client.crt -days 3650 -nodes -subj "/CN=arche-client" </code>
+            <br />
+            Then paste the contents of <code>client.crt</code> below and store
+            <code>client.crt</code> and <code>client.key</code> at
+            <code>~/.config/arche/mtls/</code>.
+        </p>
+        <form method="post" action="/settings/mtls" style="margin-top: 12px">
+            <div class="field">
+                <label>Label (optional)</label>
+                <input type="text"
+                       name="label"
+                       style="width: 280px"
+                       placeholder="e.g. work laptop" />
+            </div>
+            <div class="field">
+                <label>Certificate (PEM — paste contents of client.crt)</label>
+                <textarea name="cert_pem"
+                          rows="6"
+                          required
+                          style="width: 500px;
+                                 padding: 6px 8px;
+                                 border: 1px solid #ccc;
+                                 border-radius: 3px;
+                                 font-family: monospace;
+                                 font-size: 12px"
+                          placeholder="-----BEGIN CERTIFICATE-----&#10;...&#10;-----END CERTIFICATE-----"></textarea>
+            </div>
+            <button type="submit">Add certificate</button>
+        </form>
+    </details>
+</div>
+<script>
+  function deleteCert(id) {
+    if (!confirm("Delete this mTLS certificate?")) return;
+    fetch("/settings/mtls/" + id, { method: "DELETE" }).then(function (r) {
+      if (r.ok) {
+        location.reload();
+      } else {
+        alert("Delete failed");
+      }
+    });
+  }
+</script>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_settings_token.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_settings_token.html
@@ -1,0 +1,87 @@
+{{ define "title" }}API tokens — settings{{ end }} {{ define "navextra" }}<a href="/settings/keys">SSH keys</a>
+<a href="/settings/mtls">mTLS</a> <a href="/settings/token">API tokens</a>{{ end
+}} {{ define "srv_settings_token.html" }}{{ template "head" . }}
+<div class="container">
+    <h2 style="margin-bottom: 4px">API tokens</h2>
+    <p style="font-size: 13px; color: #666; margin-bottom: 16px">
+        API tokens let <code>arche</code> authenticate to this server without a
+        password. Use them in <code>~/.config/arche/config.toml</code> as the
+        <code>remote.token</code> value, or pass via
+        <code>Authorization: Bearer &lt;token&gt;</code>.
+    </p>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }} {{ if .NewToken }}
+    <div style="background: #e8f5e9;
+                border: 1px solid #a5d6a7;
+                border-radius: 3px;
+                padding: 12px 16px;
+                margin-bottom: 20px">
+        <p style="font-size: 13px; color: #2e7d32; margin-bottom: 6px">
+            <strong>New token created — copy it now, it won't be shown again:</strong>
+        </p>
+        <code style="font-size: 13px;
+                     word-break: break-all;
+                     background: #fff;
+                     display: block;
+                     padding: 8px;
+                     border-radius: 2px;
+                     border: 1px solid #c8e6c9">{{.NewToken}}</code>
+    </div>
+    {{ end }} {{ if .Tokens }}
+    <table style="margin-bottom: 24px">
+        <thead>
+            <tr>
+                <th>Label</th>
+                <th>Created</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Tokens }}
+            <tr>
+                <td>
+                    {{ if .Label }}{{.Label}}{{ else }}<span style="color: #aaa">—</span>{{ end }}
+                </td>
+                <td style="font-size: 12px; color: #888">{{.CreatedAt}}</td>
+                <td>
+                    <button onclick="deleteToken({{.ID}})"
+                            style="background: #c0392b;
+                                   font-size: 12px;
+                                   padding: 2px 8px">Revoke</button>
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-bottom: 24px">No API tokens yet.</p>
+    {{ end }}
+    <details>
+        <summary style="cursor: pointer; font-size: 13px; color: #555">Create new token</summary>
+        <form method="post" action="/settings/tokens" style="margin-top: 12px">
+            <div class="field">
+                <label>Label (optional)</label>
+                <input type="text"
+                       name="label"
+                       style="width: 280px"
+                       placeholder="e.g. laptop, CI" />
+            </div>
+            <button type="submit">Create token</button>
+        </form>
+    </details>
+</div>
+<script>
+  function deleteToken(id) {
+    if (!confirm("Revoke this token? Any scripts using it will stop working."))
+      return;
+    fetch("/settings/tokens/" + id, { method: "DELETE" }).then(function (r) {
+      if (r.ok) {
+        location.reload();
+      } else {
+        r.text().then(alert);
+      }
+    });
+  }
+</script>
+{{ template "foot" . }} {{ end }}

internal/archesrv/templates/srv_setup.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_setup.html
@@ -1,0 +1,22 @@
+{{ define "title" }}Setup — arche forge{{ end }}
+{{ define "srv_setup.html" }}{{ template "head" . }}
+<div class="container" style="max-width: 400px; margin-top: 60px">
+    <h2 style="margin-bottom: 8px">First-run setup</h2>
+    <p style="margin-bottom: 16px; color: #555; font-size: 13px">No users exist yet. Create the admin account.</p>
+    {{ if .Error }}
+    <p class="error">{{.Error}}</p>
+    {{ end }}
+    <form method="post" action="/setup">
+        <div class="field">
+            <label>Admin username</label>
+            <input type="text" name="username" autofocus autocomplete="username" />
+        </div>
+        <div class="field">
+            <label>Password</label>
+            <input type="password" name="password" autocomplete="new-password" />
+        </div>
+        <button type="submit">Create admin account</button>
+    </form>
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/templates/srv_webhook_deliveries.html [A]
--- /dev/null
+++ b/internal/archesrv/templates/srv_webhook_deliveries.html
@@ -1,0 +1,62 @@
+{{ define "title" }}{{.Repo}} — webhook deliveries{{ end }}
+{{ define "navextra" }}<a href="/{{.Repo}}">{{.Repo}}</a> <a href="/{{.Repo}}/log">log</a> <a href="/{{.Repo}}/tree">tree</a> <a href="/{{.Repo}}/issues">issues</a> <a href="/{{.Repo}}/stacks">stacks</a> <a href="/{{.Repo}}/wiki">wiki</a> <a href="/{{.Repo}}/settings">settings</a>{{ end }}
+{{ define "srv_webhook_deliveries.html" }}{{ template "head" . }}
+<div class="container">
+    <div style="margin-bottom: 8px">
+        <a href="/{{.Repo}}/settings/webhooks"
+           style="font-size: 13px;
+                  color: #666">← webhooks</a>
+    </div>
+    <h2 style="margin-bottom: 4px">Delivery history</h2>
+    <p style="font-size: 13px;
+              color: #666;
+              margin-bottom: 16px;
+              font-family: monospace">{{.WebhookURL}}</p>
+    {{ if .Deliveries }}
+    <table>
+        <thead>
+            <tr>
+                <th>ID</th>
+                <th>Event</th>
+                <th>Status</th>
+                <th>Delivered at</th>
+                <th>Error</th>
+                <th></th>
+            </tr>
+        </thead>
+        <tbody>
+            {{ range .Deliveries }}
+            <tr>
+                <td style="color: #aaa; font-size: 12px">{{.ID}}</td>
+                <td style="font-size: 12px">{{.Event}}</td>
+                <td>
+                    {{ if .Error }}
+                    <span class="badge badge-private">error</span>
+                    {{ else if ge .ResponseCode 200 }}{{ if lt .ResponseCode 300 }}
+                    <span class="badge badge-public">{{.ResponseCode}}</span>
+                {{ else }}
+                    <span class="badge badge-draft">{{.ResponseCode}}</span>
+                    {{ end }}{{ end }}
+                </td>
+                <td style="font-size: 12px; color: #888">{{.DeliveredAt.Format "2006-01-02 15:04:05"}}</td>
+                <td style="font-size: 12px; color: #c0392b">{{.Error}}</td>
+                <td>
+                    <form method="POST"
+                          action="/{{$.Repo}}/settings/webhooks/{{$.WebhookID}}/deliveries/{{.ID}}/replay"
+                          style="display:inline">
+                        <button type="submit"
+                                style="font-size: 11px;
+                                       padding: 2px 8px;
+                                       cursor: pointer">Replay</button>
+                    </form>
+                </td>
+            </tr>
+            {{ end }}
+        </tbody>
+    </table>
+{{ else }}
+    <p style="color: #888; margin-top: 12px">No deliveries yet.</p>
+    {{ end }}
+</div>
+{{ template "foot" . }}
+{{ end }}

internal/archesrv/testhelpers_test.go [A]
--- /dev/null
+++ b/internal/archesrv/testhelpers_test.go
@@ -1,0 +1,82 @@
+package archesrv
+
+import (
+	"io"
+	"log/slog"
+	"net/http"
+	"net/http/httptest"
+	"path/filepath"
+	"testing"
+
+	"arche/internal/repo"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func newTestServerWith(t *testing.T, fn func(cfg *Config)) (*forgeServer, *httptest.Server) {
+	t.Helper()
+	dir := t.TempDir()
+	db, err := openDB(filepath.Join(dir, "server.db"))
+	if err != nil {
+		t.Fatalf("openDB: %v", err)
+	}
+	t.Cleanup(func() { db.Close() })
+
+	cfg := DefaultConfig()
+	cfg.Storage.DataDir = dir
+	if fn != nil {
+		fn(&cfg)
+	}
+	s := &forgeServer{
+		db:  db,
+		cfg: cfg,
+		log: slog.New(slog.NewTextHandler(io.Discard, nil)),
+	}
+	ts := httptest.NewServer(s.routes())
+	t.Cleanup(ts.Close)
+	return s, ts
+}
+
+func loginAsAdmin(t *testing.T, s *forgeServer, ts *httptest.Server) (*User, *http.Client) {
+	t.Helper()
+	admin, err := s.db.CreateUser("admin", "adminpass", true)
+	if err != nil {
+		t.Fatalf("CreateUser admin: %v", err)
+	}
+	return admin, loginAs(t, ts, "admin", "adminpass")
+}
+
+func loginAs(t *testing.T, ts *httptest.Server, username, password string) *http.Client {
+	t.Helper()
+	jar := newCookieJar()
+	client := &http.Client{
+		Jar: jar,
+		CheckRedirect: func(_ *http.Request, _ []*http.Request) error {
+			return http.ErrUseLastResponse
+		},
+	}
+	resp, err := client.PostForm(ts.URL+"/login", map[string][]string{
+		"username": {username},
+		"password": {password},
+	})
+	if err != nil {
+		t.Fatalf("POST /login: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusFound && resp.StatusCode != http.StatusSeeOther {
+		t.Fatalf("login %q: want redirect, got %d", username, resp.StatusCode)
+	}
+	return client
+}
+
+func setupRepoWithDisk(t *testing.T, s *forgeServer, name, vis string) *RepoRecord {
+	t.Helper()
+	rec, err := s.db.CreateRepo(name, "", vis)
+	if err != nil {
+		t.Fatalf("CreateRepo %q: %v", name, err)
+	}
+	if _, err := repo.Init(filepath.Join(s.dataDir(), name)); err != nil {
+		t.Fatalf("repo.Init %q: %v", name, err)
+	}
+	return rec
+}

internal/archesrv/webhooks.go [A]
--- /dev/null
+++ b/internal/archesrv/webhooks.go
@@ -1,0 +1,263 @@
+package archesrv
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"log/slog"
+	"net/http"
+	"time"
+
+	"github.com/google/uuid"
+)
+
+type WebhookRecord struct {
+	ID        int64
+	RepoID    int64
+	URL       string
+	Secret    string
+	Events    string
+	Active    bool
+	CreatedAt time.Time
+}
+
+type WebhookDelivery struct {
+	ID           int64
+	WebhookID    int64
+	Event        string
+	Payload      string
+	ResponseCode int
+	ResponseBody string
+	Error        string
+	DeliveredAt  time.Time
+}
+
+type PushPayload struct {
+	Event     string      `json:"event"`
+	Repo      string      `json:"repo"`
+	Pusher    string      `json:"pusher"`
+	PushID    string      `json:"push_id"`
+	Bookmark  string      `json:"bookmark"`
+	OldCommit string      `json:"old_commit"`
+	NewCommit string      `json:"new_commit"`
+	Commits   []CommitRef `json:"commits"`
+}
+
+func newPushID() string {
+	return uuid.Must(uuid.NewV7()).String()
+}
+
+type CommitRef struct {
+	ID       string `json:"id"`
+	ChangeID string `json:"change_id"`
+	Message  string `json:"message"`
+	Author   string `json:"author"`
+}
+
+func (d *DB) CreateWebhook(repoID int64, url, secret, events string) (*WebhookRecord, error) {
+	res, err := d.db.Exec(
+		"INSERT INTO webhooks(repo_id,url,secret,events,active,created_at) VALUES(?,?,?,?,1,?)",
+		repoID, url, secret, events, time.Now().Unix(),
+	)
+	if err != nil {
+		return nil, err
+	}
+	id, _ := res.LastInsertId()
+	return &WebhookRecord{ID: id, RepoID: repoID, URL: url, Secret: secret, Events: events, Active: true}, nil
+}
+
+func (d *DB) ListWebhooks(repoID int64) ([]WebhookRecord, error) {
+	rows, err := d.db.Query(
+		"SELECT id, repo_id, url, secret, events, active, created_at FROM webhooks WHERE repo_id=? ORDER BY id",
+		repoID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []WebhookRecord
+	for rows.Next() {
+		var w WebhookRecord
+		var active int
+		var ts int64
+		if err := rows.Scan(&w.ID, &w.RepoID, &w.URL, &w.Secret, &w.Events, &active, &ts); err != nil {
+			return nil, err
+		}
+		w.Active = active == 1
+		w.CreatedAt = time.Unix(ts, 0)
+		out = append(out, w)
+	}
+	return out, rows.Err()
+}
+
+func (d *DB) GetWebhook(id int64) (*WebhookRecord, error) {
+	var w WebhookRecord
+	var active int
+	var ts int64
+	err := d.db.QueryRow(
+		"SELECT id, repo_id, url, secret, events, active, created_at FROM webhooks WHERE id=?", id,
+	).Scan(&w.ID, &w.RepoID, &w.URL, &w.Secret, &w.Events, &active, &ts)
+	if err != nil {
+		return nil, err
+	}
+	w.Active = active == 1
+	w.CreatedAt = time.Unix(ts, 0)
+	return &w, nil
+}
+
+func (d *DB) DeleteWebhook(id int64) error {
+	_, err := d.db.Exec("DELETE FROM webhooks WHERE id=?", id)
+	return err
+}
+
+func (d *DB) ListDeliveries(webhookID int64) ([]WebhookDelivery, error) {
+	rows, err := d.db.Query(
+		`SELECT id, webhook_id, event, payload, COALESCE(response_code,0),
+		 COALESCE(response_body,''), COALESCE(error,''), delivered_at
+		 FROM webhook_deliveries WHERE webhook_id=? ORDER BY id DESC LIMIT 50`,
+		webhookID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []WebhookDelivery
+	for rows.Next() {
+		var d WebhookDelivery
+		var ts int64
+		if err := rows.Scan(&d.ID, &d.WebhookID, &d.Event, &d.Payload,
+			&d.ResponseCode, &d.ResponseBody, &d.Error, &ts); err != nil {
+			return nil, err
+		}
+		d.DeliveredAt = time.Unix(ts, 0)
+		out = append(out, d)
+	}
+	return out, rows.Err()
+}
+
+func (d *DB) recordDelivery(webhookID int64, event, payload string, code int, body, errStr string) {
+	d.db.Exec( //nolint:errcheck
+		`INSERT INTO webhook_deliveries(webhook_id,event,payload,response_code,response_body,error,delivered_at)
+		 VALUES(?,?,?,?,?,?,?)`,
+		webhookID, event, payload, code, body, errStr, time.Now().Unix(),
+	)
+}
+
+func (d *DB) FirePushWebhooks(repoName, pusher, bookmark, oldCommit, newCommit string, commits []CommitRef) {
+	hooks, err := d.db.Query(
+		"SELECT id, url, secret FROM webhooks WHERE repo_id=(SELECT id FROM repos WHERE name=?) AND active=1",
+		repoName,
+	)
+	if err != nil {
+		return
+	}
+	type hook struct {
+		id     int64
+		url    string
+		secret string
+	}
+	var hs []hook
+	for hooks.Next() {
+		var h hook
+		hooks.Scan(&h.id, &h.url, &h.secret) //nolint:errcheck
+		hs = append(hs, h)
+	}
+	hooks.Close()
+
+	if len(hs) == 0 {
+		return
+	}
+
+	if commits == nil {
+		commits = []CommitRef{}
+	}
+
+	payload := PushPayload{
+		Event:     "push",
+		Repo:      repoName,
+		Pusher:    pusher,
+		PushID:    newPushID(),
+		Bookmark:  bookmark,
+		OldCommit: oldCommit,
+		NewCommit: newCommit,
+		Commits:   commits,
+	}
+	payloadBytes, _ := json.Marshal(payload)
+
+	for _, h := range hs {
+		h := h
+		go d.deliverWebhook(h.id, h.url, h.secret, "push", payloadBytes)
+	}
+}
+
+func (d *DB) deliverWebhook(webhookID int64, url, secret, event string, payload []byte) {
+	sig := computeHMAC(secret, payload)
+
+	var lastCode int
+	var lastBody string
+	var lastErr string
+
+	for attempt := 0; attempt < 5; attempt++ {
+		if attempt > 0 {
+			time.Sleep(time.Duration(1<<uint(attempt-1)) * time.Second)
+		}
+
+		req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(payload))
+		if err != nil {
+			lastErr = err.Error()
+			continue
+		}
+		req.Header.Set("Content-Type", "application/json")
+		req.Header.Set("X-Arche-Event", event)
+		req.Header.Set("X-Arche-Signature", "sha256="+sig)
+
+		client := &http.Client{Timeout: 10 * time.Second}
+		resp, err := client.Do(req)
+		if err != nil {
+			lastErr = err.Error()
+			continue
+		}
+
+		buf := new(bytes.Buffer)
+		buf.ReadFrom(resp.Body) //nolint:errcheck
+		resp.Body.Close()
+
+		lastCode = resp.StatusCode
+		lastBody = buf.String()
+		lastErr = ""
+
+		if resp.StatusCode >= 200 && resp.StatusCode < 300 {
+			d.recordDelivery(webhookID, event, string(payload), lastCode, lastBody, "")
+			return
+		}
+	}
+	d.recordDelivery(webhookID, event, string(payload), lastCode, lastBody, fmt.Sprintf("failed after 5 attempts: %s", lastErr))
+	slog.Warn("webhook delivery failed", "webhook_id", webhookID, "url", url, "event", event, "last_err", lastErr, "last_code", lastCode)
+}
+
+func computeHMAC(secret string, payload []byte) string {
+	mac := hmac.New(sha256.New, []byte(secret))
+	mac.Write(payload)
+	return hex.EncodeToString(mac.Sum(nil))
+}
+
+func (d *DB) ReplayDelivery(deliveryID int64) error {
+	var webhookID int64
+	var event, payload string
+	err := d.db.QueryRow(
+		`SELECT webhook_id, event, payload FROM webhook_deliveries WHERE id=?`,
+		deliveryID,
+	).Scan(&webhookID, &event, &payload)
+	if err != nil {
+		return fmt.Errorf("delivery not found: %w", err)
+	}
+	hook, err := d.GetWebhook(webhookID)
+	if err != nil {
+		return fmt.Errorf("webhook not found: %w", err)
+	}
+	go d.deliverWebhook(hook.ID, hook.URL, hook.Secret, event, []byte(payload))
+	return nil
+}

internal/archesrv/webhooks_test.go [A]
--- /dev/null
+++ b/internal/archesrv/webhooks_test.go
@@ -1,0 +1,270 @@
+package archesrv
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+	"testing"
+	"time"
+)
+
+func TestForgeServer_Webhook_FireAndRecord(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	rec := setupRepoWithDisk(t, s, "myrepo", "private")
+
+	type delivery struct {
+		body []byte
+		sig  string
+	}
+	ch := make(chan delivery, 1)
+	hookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		body, _ := io.ReadAll(r.Body)
+		ch <- delivery{body, r.Header.Get("X-Arche-Signature")}
+		w.WriteHeader(http.StatusOK)
+	}))
+	defer hookSrv.Close()
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{
+		"url":    {hookSrv.URL},
+		"secret": {"mysecret"},
+		"events": {"push"},
+	})
+	if err != nil {
+		t.Fatalf("POST webhook: %v", err)
+	}
+	resp.Body.Close()
+
+	s.db.FirePushWebhooks("myrepo", "admin", "main", "000", "aaa", nil)
+
+	var got delivery
+	select {
+	case got = <-ch:
+	case <-time.After(3 * time.Second):
+		t.Fatal("webhook not delivered within 3s")
+	}
+
+	if !strings.HasPrefix(got.sig, "sha256=") {
+		t.Errorf("signature header malformed: %q", got.sig)
+	}
+
+	expectedSig := "sha256=" + computeHMAC("mysecret", got.body)
+	if got.sig != expectedSig {
+		t.Errorf("HMAC mismatch:\nwant %q\ngot  %q", expectedSig, got.sig)
+	}
+
+	hooks, _ := s.db.ListWebhooks(rec.ID)
+	if len(hooks) == 0 {
+		t.Fatal("no webhooks found")
+	}
+	var ds []WebhookDelivery
+	for deadline := time.Now().Add(3 * time.Second); time.Now().Before(deadline); time.Sleep(30 * time.Millisecond) {
+		ds, _ = s.db.ListDeliveries(hooks[0].ID)
+		if len(ds) > 0 {
+			break
+		}
+	}
+	if len(ds) == 0 {
+		t.Error("delivery should be recorded")
+	}
+}
+
+func TestForgeServer_Webhook_PushPayloadShape(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	ch := make(chan []byte, 1)
+	hookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		body, _ := io.ReadAll(r.Body)
+		ch <- body
+		w.WriteHeader(http.StatusOK)
+	}))
+	defer hookSrv.Close()
+
+	resp, _ := client.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{
+		"url": {hookSrv.URL}, "events": {"push"},
+	})
+	resp.Body.Close()
+
+	s.db.FirePushWebhooks("myrepo", "alice", "main", "aabbcc", "ddeeff", nil)
+
+	var payloadBody []byte
+	select {
+	case payloadBody = <-ch:
+	case <-time.After(3 * time.Second):
+		t.Fatal("no payload delivered within 3s")
+	}
+
+	var p PushPayload
+	if err := json.Unmarshal(payloadBody, &p); err != nil {
+		t.Fatalf("unmarshal payload: %v", err)
+	}
+	if p.Repo != "myrepo" {
+		t.Errorf("Repo: want myrepo, got %q", p.Repo)
+	}
+	if p.Pusher != "alice" {
+		t.Errorf("Pusher: want alice, got %q", p.Pusher)
+	}
+	if p.Bookmark != "main" {
+		t.Errorf("Bookmark: want main, got %q", p.Bookmark)
+	}
+	if p.OldCommit != "aabbcc" {
+		t.Errorf("OldCommit: want aabbcc, got %q", p.OldCommit)
+	}
+}
+
+func TestForgeServer_Webhook_NoSecretStillDelivers(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	ch := make(chan []byte, 1)
+	hookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		body, _ := io.ReadAll(r.Body)
+		ch <- body
+		w.WriteHeader(http.StatusOK)
+	}))
+	defer hookSrv.Close()
+
+	resp, _ := client.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{
+		"url":    {hookSrv.URL},
+		"secret": {""},
+		"events": {"push"},
+	})
+	resp.Body.Close()
+
+	s.db.FirePushWebhooks("myrepo", "admin", "main", "0", "1", nil)
+
+	select {
+	case <-ch:
+	case <-time.After(3 * time.Second):
+		t.Error("webhook with empty secret should still be delivered")
+	}
+}
+
+func TestForgeServer_Webhook_DeleteWebhook(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	rec := setupRepoWithDisk(t, s, "myrepo", "private")
+
+	hookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+		w.WriteHeader(http.StatusOK)
+	}))
+	defer hookSrv.Close()
+
+	resp, _ := client.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{
+		"url": {hookSrv.URL},
+	})
+	resp.Body.Close()
+
+	hooks, _ := s.db.ListWebhooks(rec.ID)
+	if len(hooks) == 0 {
+		t.Fatal("webhook not created")
+	}
+	hookID := hooks[0].ID
+
+	req, _ := http.NewRequest(http.MethodDelete,
+		fmt.Sprintf("%s/myrepo/settings/webhooks/%d", ts.URL, hookID), nil)
+	resp2, err := client.Do(req)
+	if err != nil {
+		t.Fatalf("DELETE webhook: %v", err)
+	}
+	resp2.Body.Close()
+	if resp2.StatusCode != http.StatusNoContent {
+		t.Errorf("DELETE webhook: want 204, got %d", resp2.StatusCode)
+	}
+
+	hooks2, _ := s.db.ListWebhooks(rec.ID)
+	if len(hooks2) != 0 {
+		t.Error("webhook should not exist after deletion")
+	}
+}
+
+func TestForgeServer_Webhook_ListPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	hookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+		w.WriteHeader(http.StatusOK)
+	}))
+	defer hookSrv.Close()
+
+	client.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{"url": {hookSrv.URL}}) //nolint:errcheck
+
+	resp, err := client.Get(ts.URL + "/myrepo/settings/webhooks")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("webhook list: want 200, got %d", resp.StatusCode)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	if !strings.Contains(string(body), hookSrv.URL) {
+		t.Error("webhook list page should mention the webhook URL")
+	}
+}
+
+func TestForgeServer_Webhook_DeliveriesPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	rec := setupRepoWithDisk(t, s, "myrepo", "private")
+
+	ch := make(chan struct{}, 1)
+	hookSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		io.ReadAll(r.Body) //nolint:errcheck
+		ch <- struct{}{}
+		w.WriteHeader(http.StatusOK)
+	}))
+	defer hookSrv.Close()
+
+	client.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{"url": {hookSrv.URL}}) //nolint:errcheck
+	s.db.FirePushWebhooks("myrepo", "admin", "main", "0", "1", nil)
+
+	select {
+	case <-ch:
+	case <-time.After(3 * time.Second):
+		t.Fatal("webhook not delivered within 3s")
+	}
+
+	hooks, _ := s.db.ListWebhooks(rec.ID)
+	if len(hooks) == 0 {
+		t.Fatal("no webhook found")
+	}
+
+	resp, err := client.Get(fmt.Sprintf("%s/myrepo/settings/webhooks/%d/deliveries", ts.URL, hooks[0].ID))
+	if err != nil {
+		t.Fatalf("GET deliveries: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("deliveries page: want 200, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Webhook_NonAdminCannotCreate(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "adminpass", true) //nolint:errcheck
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	alice, _ := s.db.CreateUser("alice", "pass", false)
+	rec, _ := s.db.GetRepo("myrepo")
+	s.db.SetPermission(rec.ID, alice.ID, "write") //nolint:errcheck
+	aliceClient := loginAs(t, ts, "alice", "pass")
+
+	resp, err := aliceClient.PostForm(ts.URL+"/myrepo/settings/webhooks", map[string][]string{
+		"url": {"http://example.com/hook"},
+	})
+	if err != nil {
+		t.Fatalf("POST: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode < 400 {
+		t.Errorf("non-admin should not create webhooks, got %d", resp.StatusCode)
+	}
+}

internal/archesrv/wiki_test.go [A]
--- /dev/null
+++ b/internal/archesrv/wiki_test.go
@@ -1,0 +1,148 @@
+package archesrv
+
+import (
+	"io"
+	"net/http"
+	"strings"
+	"testing"
+)
+
+func TestForgeServer_Wiki_SaveAndFetch(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.PostForm(ts.URL+"/myrepo/wiki/Home", map[string][]string{
+		"content": {"# Hello\n\nThis is the home page."},
+	})
+	if err != nil {
+		t.Fatalf("POST wiki: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		t.Errorf("wiki save: got %d", resp.StatusCode)
+	}
+
+	resp2, err := client.Get(ts.URL + "/myrepo/wiki/Home")
+	if err != nil {
+		t.Fatalf("GET wiki: %v", err)
+	}
+	defer resp2.Body.Close()
+	if resp2.StatusCode != http.StatusOK {
+		t.Errorf("wiki get: want 200, got %d", resp2.StatusCode)
+	}
+	body, _ := io.ReadAll(resp2.Body)
+	if !strings.Contains(string(body), "Hello") {
+		t.Error("wiki page should contain saved content")
+	}
+}
+
+func TestForgeServer_Wiki_ListShowsPage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	client.PostForm(ts.URL+"/myrepo/wiki/MyPage", map[string][]string{"content": {"hello"}}) //nolint:errcheck
+
+	resp, err := client.Get(ts.URL + "/myrepo/wiki")
+	if err != nil {
+		t.Fatalf("GET wiki list: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("wiki list: want 200, got %d", resp.StatusCode)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	if !strings.Contains(string(body), "MyPage") {
+		t.Error("wiki list should contain the page name")
+	}
+}
+
+func TestForgeServer_Wiki_UnauthenticatedCannotSave(t *testing.T) {
+	s, ts := newTestServer(t)
+	setupRepoWithDisk(t, s, "myrepo", "public")
+
+	resp, err := http.PostForm(ts.URL+"/myrepo/wiki/Home", map[string][]string{
+		"content": {"malicious content"},
+	})
+	if err != nil {
+		t.Fatalf("POST: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusUnauthorized {
+		t.Errorf("expected 401, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Wiki_PageNotFoundShows200(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	resp, err := client.Get(ts.URL + "/myrepo/wiki/DoesNotExist")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("nonexistent wiki page: want 200 (new-page form), got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Wiki_OverwritePage(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "private")
+
+	r1, _ := client.PostForm(ts.URL+"/myrepo/wiki/Notes", map[string][]string{"content": {"v1 content"}})
+	r1.Body.Close()
+
+	r2, _ := client.PostForm(ts.URL+"/myrepo/wiki/Notes", map[string][]string{"content": {"v2 content"}})
+	r2.Body.Close()
+
+	resp, err := client.Get(ts.URL + "/myrepo/wiki/Notes")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	defer resp.Body.Close()
+	body, _ := io.ReadAll(resp.Body)
+	if !strings.Contains(string(body), "v2 content") {
+		t.Error("wiki page should show updated content")
+	}
+}
+
+func TestForgeServer_Wiki_MissingRepo404(t *testing.T) {
+	s, ts := newTestServer(t)
+	s.db.CreateUser("admin", "adminpass", true) //nolint:errcheck
+	client := loginAs(t, ts, "admin", "adminpass")
+
+	resp, err := client.Get(ts.URL + "/ghost/wiki")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	resp.Body.Close()
+	if resp.StatusCode != http.StatusNotFound {
+		t.Errorf("missing repo: want 404, got %d", resp.StatusCode)
+	}
+}
+
+func TestForgeServer_Wiki_PublicRepoAnonCanReadWiki(t *testing.T) {
+	s, ts := newTestServer(t)
+	_, client := loginAsAdmin(t, s, ts)
+	setupRepoWithDisk(t, s, "myrepo", "public")
+
+	client.PostForm(ts.URL+"/myrepo/wiki/Public", map[string][]string{"content": {"public content"}}) //nolint:errcheck
+
+	resp, err := http.Get(ts.URL + "/myrepo/wiki/Public")
+	if err != nil {
+		t.Fatalf("GET: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		t.Errorf("public wiki: anon should get 200, got %d", resp.StatusCode)
+	}
+	body, _ := io.ReadAll(resp.Body)
+	if !strings.Contains(string(body), "public content") {
+		t.Error("public wiki page should show content for anon")
+	}
+}

internal/cli/cmd_bisect.go [A]
--- /dev/null
+++ b/internal/cli/cmd_bisect.go
@@ -1,0 +1,408 @@
+package cli
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+type bisectState struct {
+	OriginalChangeID string     `json:"original_change_id"`
+	GoodCommits      [][32]byte `json:"good_commits"`
+	BadCommit        [32]byte   `json:"bad_commit"`
+	BadCommitSet     bool       `json:"bad_commit_set"`
+}
+
+func bisectStatePath(archeDir string) string { return filepath.Join(archeDir, "bisect.json") }
+func bisectLogPath(archeDir string) string   { return filepath.Join(archeDir, "bisect.log") }
+
+func loadBisectState(archeDir string) (*bisectState, error) {
+	data, err := os.ReadFile(bisectStatePath(archeDir))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil, nil
+		}
+		return nil, err
+	}
+	var s bisectState
+	if err := json.Unmarshal(data, &s); err != nil {
+		return nil, err
+	}
+	return &s, nil
+}
+
+func saveBisectState(archeDir string, s *bisectState) error {
+	data, err := json.MarshalIndent(s, "", "  ")
+	if err != nil {
+		return err
+	}
+	return os.WriteFile(bisectStatePath(archeDir), data, 0o644)
+}
+
+func clearBisectState(archeDir string) { _ = os.Remove(bisectStatePath(archeDir)) }
+
+func appendBisectLog(archeDir, line string) {
+	f, err := os.OpenFile(bisectLogPath(archeDir), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644)
+	if err != nil {
+		return
+	}
+	defer f.Close()
+	fmt.Fprintln(f, line)
+}
+
+func bisectAncestors(r *repo.Repo, startID [32]byte) [][32]byte {
+	seen := make(map[[32]byte]bool)
+	var order [][32]byte
+	queue := [][32]byte{startID}
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		if seen[id] {
+			continue
+		}
+		seen[id] = true
+		order = append(order, id)
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			break
+		}
+		for _, p := range c.Parents {
+			if !seen[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+	for i, j := 0, len(order)-1; i < j; i, j = i+1, j-1 {
+		order[i], order[j] = order[j], order[i]
+	}
+	return order
+}
+
+func bisectCandidates(r *repo.Repo, goodCommits [][32]byte, badCommit [32]byte) [][32]byte {
+	allFromBad := bisectAncestors(r, badCommit)
+
+	goodSet := make(map[[32]byte]bool)
+	for _, g := range goodCommits {
+		for _, a := range bisectAncestors(r, g) {
+			goodSet[a] = true
+		}
+	}
+
+	var out [][32]byte
+	for _, id := range allFromBad {
+		if !goodSet[id] {
+			out = append(out, id)
+		}
+	}
+	return out
+}
+
+func bisectDoStep(r *repo.Repo, s *bisectState) ([32]byte, error) {
+	if !s.BadCommitSet || len(s.GoodCommits) == 0 {
+		return object.ZeroID, nil
+	}
+
+	candidates := bisectCandidates(r, s.GoodCommits, s.BadCommit)
+	if len(candidates) == 0 {
+		return object.ZeroID, fmt.Errorf("bisect range is empty — good and bad commits may not share an ancestry path")
+	}
+	if len(candidates) == 1 {
+		c, err := r.ReadCommit(candidates[0])
+		if err != nil {
+			return object.ZeroID, err
+		}
+		appendBisectLog(r.ArcheDir(), fmt.Sprintf("found ch:%s", c.ChangeID))
+		fmt.Printf("\nFound: ch:%s — %q\n", c.ChangeID, c.Message)
+		fmt.Printf("Bisect log written to %s\n", bisectLogPath(r.ArcheDir()))
+		clearBisectState(r.ArcheDir())
+		return candidates[0], nil
+	}
+
+	mid := candidates[len(candidates)/2]
+	midC, err := r.ReadCommit(mid)
+	if err != nil {
+		return object.ZeroID, err
+	}
+
+	w := wc.New(r)
+	if err := w.Materialize(midC.TreeID, object.FormatChangeID(midC.ChangeID)); err != nil {
+		return object.ZeroID, fmt.Errorf("materialize: %w", err)
+	}
+	if err := r.WriteHead(object.FormatChangeID(midC.ChangeID)); err != nil {
+		return object.ZeroID, err
+	}
+
+	fmt.Printf("Bisecting: %d revisions remain. Checking out ch:%s — %s\n",
+		len(candidates), midC.ChangeID, bisectFirstLine(midC.Message))
+	return object.ZeroID, nil
+}
+
+var bisectCmd = &cobra.Command{
+	Use:   "bisect",
+	Short: "Binary search commit history to find a regression",
+	Long: `arche bisect performs a binary search through commit history to find the
+first change that introduced a regression. Operates on stable change IDs
+(survive rebase and amend) rather than content hashes.
+
+Subcommands: start, good, bad, reset, run`,
+}
+
+var bisectStartCmd = &cobra.Command{
+	Use:   "start",
+	Short: "Begin a bisect session",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		if s, _ := loadBisectState(r.ArcheDir()); s != nil {
+			return fmt.Errorf("bisect already in progress — run 'arche bisect reset' first")
+		}
+
+		_, headID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+		headC, err := r.ReadCommit(headID)
+		if err != nil {
+			return err
+		}
+
+		s := &bisectState{OriginalChangeID: headC.ChangeID}
+		if err := saveBisectState(r.ArcheDir(), s); err != nil {
+			return err
+		}
+		_ = os.Remove(bisectLogPath(r.ArcheDir()))
+		fmt.Printf("Bisect started at ch:%s\n", headC.ChangeID)
+		fmt.Println("Mark the bad commit with 'arche bisect bad' and a known-good commit with 'arche bisect good <change-id>'.")
+		return nil
+	},
+}
+
+var bisectBadCmd = &cobra.Command{
+	Use:   "bad [change-id]",
+	Short: "Mark a commit as bad (broken)",
+	Args:  cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		s, err := loadBisectState(r.ArcheDir())
+		if err != nil {
+			return err
+		}
+		if s == nil {
+			return fmt.Errorf("no bisect in progress — run 'arche bisect start' first")
+		}
+
+		var targetID [32]byte
+		if len(args) > 0 {
+			targetID, err = resolveRef(r, args[0])
+		} else {
+			_, targetID, err = r.HeadCommit()
+		}
+		if err != nil {
+			return err
+		}
+
+		c, err := r.ReadCommit(targetID)
+		if err != nil {
+			return err
+		}
+
+		s.BadCommit = targetID
+		s.BadCommitSet = true
+		if err := saveBisectState(r.ArcheDir(), s); err != nil {
+			return err
+		}
+		appendBisectLog(r.ArcheDir(), fmt.Sprintf("bad  ch:%s", c.ChangeID))
+		fmt.Printf("Marked bad: ch:%s — %s\n", c.ChangeID, bisectFirstLine(c.Message))
+
+		_, err = bisectDoStep(r, s)
+		return err
+	},
+}
+
+var bisectGoodCmd = &cobra.Command{
+	Use:   "good [change-id]",
+	Short: "Mark a commit as good (working)",
+	Args:  cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		s, err := loadBisectState(r.ArcheDir())
+		if err != nil {
+			return err
+		}
+		if s == nil {
+			return fmt.Errorf("no bisect in progress — run 'arche bisect start' first")
+		}
+
+		var targetID [32]byte
+		if len(args) > 0 {
+			targetID, err = resolveRef(r, args[0])
+		} else {
+			_, targetID, err = r.HeadCommit()
+		}
+		if err != nil {
+			return err
+		}
+
+		c, err := r.ReadCommit(targetID)
+		if err != nil {
+			return err
+		}
+
+		s.GoodCommits = append(s.GoodCommits, targetID)
+		if err := saveBisectState(r.ArcheDir(), s); err != nil {
+			return err
+		}
+		appendBisectLog(r.ArcheDir(), fmt.Sprintf("good ch:%s", c.ChangeID))
+		fmt.Printf("Marked good: ch:%s — %s\n", c.ChangeID, bisectFirstLine(c.Message))
+
+		_, err = bisectDoStep(r, s)
+		return err
+	},
+}
+
+var bisectResetCmd = &cobra.Command{
+	Use:   "reset",
+	Short: "End bisect and restore the original working copy",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		s, err := loadBisectState(r.ArcheDir())
+		if err != nil {
+			return err
+		}
+		if s == nil {
+			fmt.Println("No bisect in progress.")
+			return nil
+		}
+
+		if s.OriginalChangeID != "" {
+			origID, resErr := resolveRef(r, "ch:"+s.OriginalChangeID)
+			if resErr == nil {
+				origC, rdErr := r.ReadCommit(origID)
+				if rdErr == nil {
+					w := wc.New(r)
+					_ = w.Materialize(origC.TreeID, object.FormatChangeID(origC.ChangeID))
+					_ = r.WriteHead(object.FormatChangeID(origC.ChangeID))
+				}
+			}
+		}
+
+		clearBisectState(r.ArcheDir())
+		fmt.Printf("Bisect reset. Restored to ch:%s\n", s.OriginalChangeID)
+		return nil
+	},
+}
+
+var bisectRunCmd = &cobra.Command{
+	Use:                "run <command> [args...]",
+	Short:              "Automate bisect: run command at each candidate commit",
+	Long:               "Exit code 0 = good, non-zero = bad. Arche restores the original working copy when done.",
+	Args:               cobra.MinimumNArgs(1),
+	DisableFlagParsing: true,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		s, err := loadBisectState(r.ArcheDir())
+		if err != nil {
+			return err
+		}
+		if s == nil {
+			return fmt.Errorf("no bisect in progress — run 'arche bisect start' first")
+		}
+		if !s.BadCommitSet || len(s.GoodCommits) == 0 {
+			return fmt.Errorf("set good and bad commits before running automated bisect")
+		}
+
+		candidates := bisectCandidates(r, s.GoodCommits, s.BadCommit)
+		if len(candidates) == 0 {
+			return fmt.Errorf("bisect range is empty")
+		}
+
+		w := wc.New(r)
+		var found [32]byte
+		lo, hi := 0, len(candidates)
+		for lo < hi {
+			mid := (lo + hi) / 2
+			midID := candidates[mid]
+			midC, cErr := r.ReadCommit(midID)
+			if cErr != nil {
+				return cErr
+			}
+			if err := w.Materialize(midC.TreeID, object.FormatChangeID(midC.ChangeID)); err != nil {
+				return err
+			}
+			if err := r.WriteHead(object.FormatChangeID(midC.ChangeID)); err != nil {
+				return err
+			}
+			fmt.Printf("Bisecting: testing ch:%s — %s\n", midC.ChangeID, bisectFirstLine(midC.Message))
+
+			testCmd := exec.Command(args[0], args[1:]...) //nolint:gosec
+			testCmd.Stdout = os.Stdout
+			testCmd.Stderr = os.Stderr
+			testCmd.Dir = r.Root
+			runErr := testCmd.Run()
+			isGood := runErr == nil
+			mark := "bad"
+			if isGood {
+				mark = "good"
+			}
+			appendBisectLog(r.ArcheDir(), fmt.Sprintf("%s ch:%s", mark, midC.ChangeID))
+			fmt.Printf("  → %s\n", mark)
+
+			if isGood {
+				lo = mid + 1
+			} else {
+				found = midID
+				hi = mid
+			}
+		}
+
+		origID, resErr := resolveRef(r, "ch:"+s.OriginalChangeID)
+		if resErr == nil {
+			origC, _ := r.ReadCommit(origID)
+			if origC != nil {
+				_ = w.Materialize(origC.TreeID, object.FormatChangeID(origC.ChangeID))
+				_ = r.WriteHead(object.FormatChangeID(origC.ChangeID))
+			}
+		}
+		clearBisectState(r.ArcheDir())
+
+		if found != (object.ZeroID) {
+			foundC, _ := r.ReadCommit(found)
+			if foundC != nil {
+				fmt.Printf("\nFound: ch:%s — %q\n", foundC.ChangeID, foundC.Message)
+				fmt.Printf("Bisect log written to %s\n", bisectLogPath(r.ArcheDir()))
+				return nil
+			}
+		}
+		fmt.Println("Bisect complete — no bad commit found in range.")
+		return nil
+	},
+}
+
+func bisectFirstLine(s string) string {
+	if i := strings.IndexByte(s, '\n'); i >= 0 {
+		return s[:i]
+	}
+	return s
+}
+
+func init() {
+	bisectCmd.AddCommand(bisectStartCmd, bisectBadCmd, bisectGoodCmd, bisectResetCmd, bisectRunCmd)
+}

internal/cli/cmd_bookmark.go [A]
--- /dev/null
+++ b/internal/cli/cmd_bookmark.go
@@ -1,0 +1,147 @@
+package cli
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"arche/internal/repo"
+	"arche/internal/store"
+
+	"github.com/spf13/cobra"
+)
+
+var bookmarkCmd = &cobra.Command{
+	Use:   "bookmark",
+	Short: "Bookmark subcommands",
+}
+
+var bookmarkSetCmd = &cobra.Command{
+	Use:   "set <name>",
+	Short: "Create or update a bookmark pointing to the current commit",
+	Args:  cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		_, commitID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+
+		before, _ := r.CaptureRefState()
+
+		var beforeState repo.RefState
+		_ = json.Unmarshal([]byte(before), &beforeState)
+		afterState := beforeState
+		if afterState.Bookmarks == nil {
+			afterState.Bookmarks = make(map[string]string)
+		}
+		afterState.Bookmarks[args[0]] = fmt.Sprintf("%x", commitID)
+		afterBytes, _ := json.Marshal(afterState)
+		after := string(afterBytes)
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		if err := r.Store.SetBookmark(tx, store.Bookmark{Name: args[0], CommitID: commitID}); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		op := store.Operation{
+			Kind: "bookmark-set", Timestamp: time.Now().Unix(), Before: before, After: after,
+			Metadata: fmt.Sprintf("bookmark %q → %x", args[0], commitID[:6]),
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		fmt.Printf("Bookmark '%s' set to %x\n", args[0], commitID[:8])
+		return nil
+	},
+}
+
+var bookmarkDeleteCmd = &cobra.Command{
+	Use:     "delete <name>",
+	Aliases: []string{"rm", "remove"},
+	Short:   "Delete a bookmark",
+	Args:    cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		before, _ := r.CaptureRefState()
+
+		var beforeState repo.RefState
+		_ = json.Unmarshal([]byte(before), &beforeState)
+		afterState := beforeState
+		if afterState.Bookmarks == nil {
+			afterState.Bookmarks = make(map[string]string)
+		}
+		delete(afterState.Bookmarks, args[0])
+		afterBytes, _ := json.Marshal(afterState)
+		after := string(afterBytes)
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		if err := r.Store.DeleteBookmark(tx, args[0]); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		op := store.Operation{
+			Kind: "bookmark-delete", Timestamp: time.Now().Unix(), Before: before, After: after,
+			Metadata: fmt.Sprintf("deleted bookmark %q", args[0]),
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+		fmt.Printf("Bookmark '%s' deleted.\n", args[0])
+		return nil
+	},
+}
+
+var bookmarkListCmd = &cobra.Command{
+	Use:   "list",
+	Short: "List all bookmarks",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		bms, err := r.Store.ListBookmarks()
+		if err != nil {
+			return err
+		}
+		if len(bms) == 0 {
+			fmt.Println("No bookmarks.")
+			return nil
+		}
+		_, headID, _ := r.HeadCommit()
+		for _, bm := range bms {
+			marker := "  "
+			if bm.CommitID == headID {
+				marker = "> "
+			}
+			remote := ""
+			if bm.Remote != "" {
+				remote = " (remote: " + bm.Remote + ")"
+			}
+			fmt.Printf("%s%-24s  %x%s\n", marker, bm.Name, bm.CommitID[:8], remote)
+		}
+		return nil
+	},
+}
+
+func init() {
+	bookmarkCmd.AddCommand(bookmarkSetCmd, bookmarkDeleteCmd, bookmarkListCmd)
+}

internal/cli/cmd_bundle.go [A]
--- /dev/null
+++ b/internal/cli/cmd_bundle.go
@@ -1,0 +1,152 @@
+package cli
+
+import (
+	"archive/tar"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/spf13/cobra"
+)
+
+var bundleCmd = &cobra.Command{
+	Use:   "bundle [output]",
+	Short: "Pack the repository into a single portable archive",
+	Long: `Create a .arche-bundle tar+gzip archive containing every database and pack
+file needed to fully restore or clone the repository.  The archive includes:
+
+  store.db    - the main object and metadata database
+  issues.db   - the issue-tracker database (if present)
+  config.toml - repository configuration
+  packs/      - all pack files
+
+The output file defaults to <repo-name>-<date>.arche-bundle when not given.`,
+	Args: cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		archeDir := r.ArcheDir()
+
+		var out string
+		if len(args) > 0 {
+			out = args[0]
+		} else {
+			name := filepath.Base(r.Root)
+			dateStr := time.Now().Format("20060102")
+			out = fmt.Sprintf("%s-%s.arche-bundle", name, dateStr)
+		}
+
+		f, err := os.OpenFile(out, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0o644)
+		if err != nil {
+			return fmt.Errorf("create bundle file: %w", err)
+		}
+		defer f.Close()
+
+		gz := gzip.NewWriter(f)
+		tw := tar.NewWriter(gz)
+
+		type entry struct{ name, path string }
+		candidates := []entry{
+			{"store.db", filepath.Join(archeDir, "store.db")},
+			{"issues.db", filepath.Join(archeDir, "issues.db")},
+			{"config.toml", filepath.Join(archeDir, "config.toml")},
+		}
+
+		for _, e := range candidates {
+			if err := addFileToTar(tw, e.name, e.path); err != nil {
+				if !os.IsNotExist(err) {
+					tw.Close()
+					gz.Close()
+					os.Remove(out)
+					return fmt.Errorf("bundle %s: %w", e.name, err)
+				}
+			}
+		}
+
+		packsPath := filepath.Join(archeDir, "packs")
+		if info, err := os.Stat(packsPath); err == nil && info.IsDir() {
+			if err := addDirToTar(tw, "packs", packsPath); err != nil {
+				tw.Close()
+				gz.Close()
+				os.Remove(out)
+				return fmt.Errorf("bundle packs: %w", err)
+			}
+		}
+
+		if err := tw.Close(); err != nil {
+			gz.Close()
+			os.Remove(out)
+			return err
+		}
+		if err := gz.Close(); err != nil {
+			os.Remove(out)
+			return err
+		}
+
+		fi, _ := f.Stat()
+		size := int64(0)
+		if fi != nil {
+			size = fi.Size()
+		}
+		fmt.Printf("Created %s (%s)\n", out, humanBytes(size))
+		return nil
+	},
+}
+
+func addFileToTar(tw *tar.Writer, name, path string) error {
+	f, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	fi, err := f.Stat()
+	if err != nil {
+		return err
+	}
+
+	hdr := &tar.Header{
+		Name:    name,
+		Size:    fi.Size(),
+		Mode:    int64(fi.Mode()),
+		ModTime: fi.ModTime(),
+	}
+	if err := tw.WriteHeader(hdr); err != nil {
+		return err
+	}
+	_, err = io.Copy(tw, f)
+	return err
+}
+
+func addDirToTar(tw *tar.Writer, prefix, dir string) error {
+	return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		if info.IsDir() {
+			return nil
+		}
+		rel, err := filepath.Rel(dir, path)
+		if err != nil {
+			return err
+		}
+		return addFileToTar(tw, filepath.Join(prefix, rel), path)
+	})
+}
+
+func humanBytes(n int64) string {
+	const unit = 1024
+	if n < unit {
+		return fmt.Sprintf("%d B", n)
+	}
+	div, exp := int64(unit), 0
+	for n2 := n / unit; n2 >= unit; n2 /= unit {
+		div *= unit
+		exp++
+	}
+	return fmt.Sprintf("%.1f %ciB", float64(n)/float64(div), "KMGTPE"[exp])
+}

internal/cli/cmd_clone.go [A]
--- /dev/null
+++ b/internal/cli/cmd_clone.go
@@ -1,0 +1,96 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"arche/internal/repo"
+	"arche/internal/syncpkg"
+
+	"github.com/spf13/cobra"
+)
+
+var cloneCmd = &cobra.Command{
+	Use:   "clone <url> [directory]",
+	Short: "Clone a remote Arche repository",
+	Long: `Clone a remote Arche repository into a new directory.
+
+The URL may use any scheme supported by arche sync:
+  arche clone http://host:8765         # bearer token over HTTP
+  arche clone arche+ssh://host/repo    # SSH with key auth
+
+The directory defaults to the last path component of the URL.
+
+Examples:
+  arche clone http://forge.example.com/myrepo
+  arche clone arche+ssh://forge.example.com/simao/project local-dir
+  arche clone http://forge.example.com/myrepo --token mysecret`,
+	Args: cobra.RangeArgs(1, 2),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		remoteURL := args[0]
+		token, _ := cmd.Flags().GetString("token")
+
+		dir := ""
+		if len(args) == 2 {
+			dir = args[1]
+		} else {
+			dir = guessLocalDir(remoteURL)
+		}
+
+		if _, err := os.Stat(dir); err == nil {
+			return fmt.Errorf("destination %q already exists", dir)
+		}
+
+		fmt.Printf("arche clone: cloning %s into %s …\n", remoteURL, dir)
+
+		r, err := repo.Init(dir)
+		if err != nil {
+			return fmt.Errorf("init: %w", err)
+		}
+		defer r.Close()
+
+		client := syncpkg.NewClient(r, remoteURL, token)
+		if err := client.Pull(); err != nil {
+			os.RemoveAll(dir) //nolint:errcheck
+			return fmt.Errorf("pull: %w", err)
+		}
+
+		r.Cfg.Remotes = append(r.Cfg.Remotes, repo.RemoteConfig{
+			Name:  "origin",
+			URL:   remoteURL,
+			Token: token,
+		})
+		if err := r.SaveConfig(); err != nil {
+			fmt.Fprintf(os.Stderr, "arche clone: warning: could not save remote config: %v\n", err)
+		}
+
+		headCID, _ := r.Head()
+		fmt.Printf("arche clone: done. Working copy at %s\n", headCID)
+		fmt.Printf("cd %s && arche log\n", dir)
+		return nil
+	},
+}
+
+func guessLocalDir(rawURL string) string {
+	s := rawURL
+	for _, prefix := range []string{"arche+ssh://", "arche://", "https://", "http://"} {
+		s = strings.TrimPrefix(s, prefix)
+	}
+
+	s = strings.TrimRight(s, "/")
+	if idx := strings.LastIndex(s, "/"); idx >= 0 {
+		s = s[idx+1:]
+	}
+
+	s = strings.TrimSuffix(s, ".git")
+	if s == "" {
+		s = "repo"
+	}
+	return filepath.Clean(s)
+}
+
+func init() {
+	cloneCmd.Flags().String("token", "", "bearer token for HTTP auth")
+}

internal/cli/cmd_co.go [A]
--- /dev/null
+++ b/internal/cli/cmd_co.go
@@ -1,0 +1,67 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+
+	"arche/internal/gitcompat"
+	"arche/internal/object"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var coCmd = &cobra.Command{
+	Use:     "co <change-id|hash|bookmark>",
+	Aliases: []string{"checkout", "switch"},
+	Short:   "Check out a commit by change ID, hash prefix, or bookmark",
+	Long: `Update the working directory to reflect the named commit. HEAD is updated to
+that commit's change ID. Since HEAD always contains a change ID (never a
+bookmark name), there is no "detached HEAD" state - every checkout is
+structurally identical.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		commitID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+
+		c, err := r.ReadCommit(commitID)
+		if err != nil {
+			return err
+		}
+
+		w := wc.New(r)
+		newChangeID := object.FormatChangeID(c.ChangeID)
+		if err := w.Materialize(c.TreeID, newChangeID); err != nil {
+			return fmt.Errorf("materialize: %w", err)
+		}
+
+		if err := r.WriteHead(newChangeID); err != nil {
+			return err
+		}
+
+		if r.Cfg.Git.Enabled {
+			if err := gitcompat.CheckoutCommit(r.Root, r.ArcheDir(), commitID); err != nil {
+				fmt.Fprintf(os.Stderr, "arche: git checkout failed: %v\n", err)
+			}
+		}
+
+		fmt.Printf("Checked out ch:%s -- %s\n", c.ChangeID, c.Message)
+		bms, _ := r.Store.ListBookmarks()
+		var hasBM bool
+		for _, bm := range bms {
+			if bm.CommitID == commitID {
+				hasBM = true
+				fmt.Printf("  (bookmark: %s)\n", bm.Name)
+			}
+		}
+		if !hasBM {
+			fmt.Println("  No bookmark points here. Create one with: arche bookmark set <name>")
+		}
+		return nil
+	},
+}

internal/cli/cmd_diff.go [A]
--- /dev/null
+++ b/internal/cli/cmd_diff.go
@@ -1,0 +1,91 @@
+package cli
+
+import (
+	"fmt"
+
+	"arche/internal/diff"
+
+	"github.com/spf13/cobra"
+)
+
+var diffCmd = &cobra.Command{
+	Use:   "diff [commit-id]",
+	Short: "Diff working copy against HEAD, or two commits",
+	Long: `With no arguments, diffs the current working copy against its parent commit.
+With one argument, diffs that commit against its first parent.
+With two arguments, diffs from the first commit to the second.`,
+	Args: cobra.MaximumNArgs(2),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		var treeA, treeB [0x20]byte
+
+		switch len(args) {
+		case 0:
+			head, _, err := r.HeadCommit()
+			if err != nil {
+				return err
+			}
+			treeB = head.TreeID
+			if len(head.Parents) > 0 {
+				parent, err := r.ReadCommit(head.Parents[0])
+				if err != nil {
+					return err
+				}
+				treeA = parent.TreeID
+			}
+
+		case 1:
+			id, err := resolveRef(r, args[0])
+			if err != nil {
+				return err
+			}
+			c, err := r.ReadCommit(id)
+			if err != nil {
+				return err
+			}
+			treeB = c.TreeID
+			if len(c.Parents) > 0 {
+				parent, err := r.ReadCommit(c.Parents[0])
+				if err != nil {
+					return err
+				}
+				treeA = parent.TreeID
+			}
+
+		case 2:
+			idA, err := resolveRef(r, args[0])
+			if err != nil {
+				return err
+			}
+			idB, err := resolveRef(r, args[1])
+			if err != nil {
+				return err
+			}
+			cA, err := r.ReadCommit(idA)
+			if err != nil {
+				return err
+			}
+			cB, err := r.ReadCommit(idB)
+			if err != nil {
+				return err
+			}
+			treeA = cA.TreeID
+			treeB = cB.TreeID
+		}
+
+		diffs, err := diff.TreeDiff(r, treeA, treeB)
+		if err != nil {
+			return err
+		}
+		if len(diffs) == 0 {
+			fmt.Println("(no changes)")
+			return nil
+		}
+		for _, d := range diffs {
+			fmt.Print(d.Patch)
+		}
+		return nil
+	},
+}

internal/cli/cmd_explain.go [A]
--- /dev/null
+++ b/internal/cli/cmd_explain.go
@@ -1,0 +1,202 @@
+package cli
+
+import (
+	"encoding/hex"
+	"fmt"
+	"sort"
+	"strings"
+	"time"
+
+	"arche/internal/diff"
+	"arche/internal/object"
+
+	"github.com/spf13/cobra"
+)
+
+var explainCmd = &cobra.Command{
+	Use:   "explain <change-id>",
+	Short: "Show the full story of a change: diff, history, bookmarks, issues",
+	Long: `arche explain <change-id> shows everything Arche knows about a unit of work
+in a single command — the VCS equivalent of "show me all the context for this
+change." It gathers information from the object store, obsolescence markers,
+bookmark table, issue tracker, and operation log.
+
+Accepts change IDs (ch:xxxx), hash prefixes, bookmarks, and relative
+addresses (@ and @-N).`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		commitID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+
+		c, err := r.ReadCommit(commitID)
+		if err != nil {
+			return fmt.Errorf("read commit: %w", err)
+		}
+
+		phase, _ := r.Store.GetPhase(commitID)
+
+		fmt.Printf("Change:  ch:%s\n", c.ChangeID)
+		fmt.Printf("Commit:  %s\n", hex.EncodeToString(commitID[:]))
+		fmt.Printf("Author:  %s <%s>\n", c.Author.Name, c.Author.Email)
+		fmt.Printf("Date:    %s\n", c.Author.Timestamp.Format(time.RFC3339))
+		fmt.Printf("Phase:   %s\n", phase)
+		fmt.Printf("Message: %s\n", c.Message)
+
+		bms, _ := r.Store.ListBookmarks()
+		var bmNames []string
+
+		for _, bm := range bms {
+			if bm.CommitID == commitID {
+				bmNames = append(bmNames, bm.Name)
+			}
+		}
+		if len(bmNames) > 0 {
+			sort.Strings(bmNames)
+			fmt.Printf("Bookmarks: %s\n", strings.Join(bmNames, ", "))
+		}
+
+		obsoleteIDs, _ := r.Store.ListObjectsByKind(string(object.KindObsolete))
+		type obsEntry struct {
+			pred   [32]byte
+			succs  [][32]byte
+			reason string
+			ts     int64
+		}
+		var chain []obsEntry
+		for _, oid := range obsoleteIDs {
+			_, raw, err := r.Store.ReadObject(oid)
+			if err != nil {
+				continue
+			}
+			om, err := object.DecodeObsolete(raw)
+			if err != nil {
+				continue
+			}
+			involved := om.Predecessor == commitID
+			for _, s := range om.Successors {
+				if s == commitID {
+					involved = true
+				}
+			}
+			if involved {
+				chain = append(chain, obsEntry{
+					pred:   om.Predecessor,
+					succs:  om.Successors,
+					reason: om.Reason,
+					ts:     om.Timestamp,
+				})
+			}
+		}
+
+		if len(chain) > 0 {
+			fmt.Println("\nObsolescence chain:")
+			for _, e := range chain {
+				ts := time.Unix(e.ts, 0).Format("2006-01-02 15:04:05")
+				predC, _ := r.ReadCommit(e.pred)
+				predMsg := ""
+				if predC != nil {
+					predMsg = " — " + bisectFirstLine(predC.Message)
+				}
+				fmt.Printf("  %s [%s]%s\n", object.Short(e.pred), ts, predMsg)
+				for _, s := range e.succs {
+					succC, _ := r.ReadCommit(s)
+					succMsg := ""
+					if succC != nil {
+						succMsg = " — " + bisectFirstLine(succC.Message)
+					}
+					reason := e.reason
+					if reason == "" {
+						reason = "rewrite"
+					}
+					fmt.Printf("    → %s (%s)%s\n", object.Short(s), reason, succMsg)
+				}
+			}
+		}
+
+		issueEventIDs, _ := r.Store.ListObjectsByKind(string(object.KindIssueEvent))
+		type issueRef struct {
+			issueID string
+			kind    string
+		}
+		issueRefSet := make(map[string]string)
+		changeIDStr := "ch:" + c.ChangeID
+		commitHex := hex.EncodeToString(commitID[:])
+
+		for _, eid := range issueEventIDs {
+			_, raw, err := r.Store.ReadObject(eid)
+			if err != nil {
+				continue
+			}
+			ev, err := object.DecodeIssueEvent(raw)
+			if err != nil {
+				continue
+			}
+			payload := string(ev.Payload)
+			if strings.Contains(payload, changeIDStr) ||
+				strings.Contains(payload, commitHex[:12]) {
+				existing, ok := issueRefSet[ev.IssueID]
+				if !ok || existing != "ref" {
+					issueRefSet[ev.IssueID] = ev.Kind
+				}
+			}
+		}
+
+		if len(issueRefSet) > 0 {
+			fmt.Println("\nLinked issues:")
+			issueIDs := make([]string, 0, len(issueRefSet))
+			for id := range issueRefSet {
+				issueIDs = append(issueIDs, id)
+			}
+			sort.Strings(issueIDs)
+			for _, id := range issueIDs {
+				fmt.Printf("  #%s (via %s)\n", id, issueRefSet[id])
+			}
+		}
+
+		diffs, err := diff.CommitDiff(r, commitID)
+		if err == nil && len(diffs) > 0 {
+			fmt.Println("\nDiff:")
+			for _, d := range diffs {
+				lines := strings.Count(d.Patch, "\n")
+				added, removed := 0, 0
+				for _, line := range strings.Split(d.Patch, "\n") {
+					if strings.HasPrefix(line, "+") && !strings.HasPrefix(line, "+++") {
+						added++
+					} else if strings.HasPrefix(line, "-") && !strings.HasPrefix(line, "---") {
+						removed++
+					}
+				}
+				_ = lines
+				fmt.Printf("  %c %s (+%d -%d)\n", d.Status, d.Path, added, removed)
+			}
+		}
+
+		ops, _ := r.Store.ListOperations(200)
+		var relOps []string
+		for _, op := range ops {
+			if strings.Contains(op.Before, commitHex[:12]) ||
+				strings.Contains(op.After, commitHex[:12]) ||
+				strings.Contains(op.Metadata, c.ChangeID[:8]) {
+				ts := time.Unix(op.Timestamp, 0).Format("2006-01-02 15:04:05")
+				if op.Metadata != "" {
+					relOps = append(relOps, fmt.Sprintf("  #%-4d %-12s %s  %s", op.Seq, op.Kind, ts, op.Metadata))
+				} else {
+					relOps = append(relOps, fmt.Sprintf("  #%-4d %-12s %s", op.Seq, op.Kind, ts))
+				}
+			}
+		}
+		if len(relOps) > 0 {
+			fmt.Println("\nOperation log:")
+			for _, l := range relOps {
+				fmt.Println(l)
+			}
+		}
+
+		return nil
+	},
+}

internal/cli/cmd_fold.go [A]
--- /dev/null
+++ b/internal/cli/cmd_fold.go
@@ -1,0 +1,128 @@
+package cli
+
+import (
+	"fmt"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var foldCmd = &cobra.Command{
+	Use:   "fold <commit>",
+	Short: "Fold (squash) a commit into its parent",
+	Long: `Merge <commit>'s changes into its parent, combining them into a single commit.
+The folded commit and its original are both marked obsolete.
+HEAD is updated to the new combined commit.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		targetID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+		target, err := r.ReadCommit(targetID)
+		if err != nil {
+			return err
+		}
+		if len(target.Parents) == 0 {
+			return fmt.Errorf("cannot fold root commit (no parent)")
+		}
+		parentID := target.Parents[0]
+		parent, err := r.ReadCommit(parentID)
+		if err != nil {
+			return err
+		}
+
+		if !foldForceRewrite {
+			for _, c := range []*object.Commit{target, parent} {
+				if c.Phase == object.PhasePublic {
+					return fmt.Errorf("commit ch:%s is public; use --force-rewrite to rewrite history", c.ChangeID)
+				}
+			}
+		}
+
+		before, _ := r.CaptureRefState()
+		now := time.Now()
+		sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+
+		msg := parent.Message + "\n\n" + target.Message
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+
+		newCID, err := r.Store.AllocChangeID(tx)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		combined := &object.Commit{
+			TreeID:    target.TreeID,
+			Parents:   parent.Parents,
+			ChangeID:  newCID,
+			Author:    parent.Author,
+			Committer: sig,
+			Message:   msg,
+			Phase:     parent.Phase,
+		}
+		newID, err := repo.WriteCommitTx(r.Store, tx, combined)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.SetChangeCommit(tx, newCID, newID); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		for _, oldID := range [][32]byte{parentID, targetID} {
+			obs := &object.ObsoleteMarker{Predecessor: oldID, Successors: [][32]byte{newID}, Reason: "fold"}
+			if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+		}
+
+		after := fmt.Sprintf(`{"head":%q,"tip":%q}`, object.FormatChangeID(newCID), fmt.Sprintf("%x", newID))
+		op := store.Operation{
+			Kind: "fold", Timestamp: now.Unix(), Before: before, After: after,
+			Metadata: "folded ch:" + target.ChangeID + " into parent",
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		_, headID, err := r.HeadCommit()
+		if err == nil && headID == targetID {
+			w := wc.New(r)
+			if err := w.Materialize(combined.TreeID, object.FormatChangeID(newCID)); err != nil {
+				return err
+			}
+			if err := r.WriteHead(object.FormatChangeID(newCID)); err != nil {
+				return err
+			}
+		}
+
+		fmt.Printf("Folded ch:%s into ch:%s → ch:%s\n", target.ChangeID, parent.ChangeID, newCID)
+		return nil
+	},
+}
+
+var foldForceRewrite bool
+
+func init() {
+	foldCmd.Flags().BoolVar(&foldForceRewrite, "force-rewrite", false, "allow rewriting public commits")
+}

internal/cli/cmd_gc.go [A]
--- /dev/null
+++ b/internal/cli/cmd_gc.go
@@ -1,0 +1,177 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"arche/internal/store"
+
+	"github.com/spf13/cobra"
+)
+
+var (
+	gcRetentionDays int
+	gcTrainDict     bool
+)
+
+var gcCmd = &cobra.Command{
+	Use:   "gc",
+	Short: "Run garbage collection on the repository",
+	Long: `Mark-and-sweep garbage collection that removes objects no longer reachable
+from any bookmark, HEAD, or operation log entry.
+
+GC proceeds in four steps:
+  1. Collect roots: all bookmark commits, current HEAD commit, and every commit
+     hash ever recorded in the operation log (to preserve full undo history).
+  2. Mark: traverse the commit/tree/blob DAG from every root and mark all
+     reachable objects as live.
+  3. Sweep: delete any SQLite-stored object not in the live set.
+  4. Repack: copy only live pack-file blobs into new pack files, update
+     pack_index atomically, and delete the old pack files.
+
+ObsoleteMarkers are retained if their predecessor commit is still live, or if
+they were created within --retention days (default 90). Older markers whose
+predecessor has been swept are also removed.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		gcer, ok := r.Store.(store.GCer)
+		if !ok {
+			return fmt.Errorf("store does not support GC")
+		}
+
+		isatty := isTerminal(os.Stdout)
+		var lastPhase string
+
+		progress := func(phase string, done, total int) {
+			if !isatty {
+				if phase != lastPhase {
+					lastPhase = phase
+					switch phase {
+					case "roots":
+						fmt.Println("Collecting roots...")
+					case "mark":
+						fmt.Println("Marking reachable objects...")
+					case "sweep":
+						fmt.Println("Sweeping unreachable objects...")
+					case "repack":
+						fmt.Printf("Repacking: %d/%d blobs\n", done, total)
+					}
+				}
+				return
+			}
+
+			var line string
+			switch phase {
+			case "roots":
+				line = "  Collecting roots …"
+			case "mark":
+				if total > 0 {
+					line = fmt.Sprintf("  Marking objects   %s  %d/%d roots",
+						progressBar(done, total, 24), done, total)
+				} else {
+					line = "  Marking objects …"
+				}
+			case "sweep":
+				line = "  Sweeping objects …"
+			case "repack":
+				if total > 0 {
+					line = fmt.Sprintf("  Repacking blobs   %s  %d/%d",
+						progressBar(done, total, 24), done, total)
+				} else {
+					line = "  Repacking blobs …"
+				}
+			}
+			fmt.Printf("\r\033[K%s", line)
+		}
+
+		if isatty {
+			fmt.Printf("GC > retention %d days\n", gcRetentionDays)
+		} else {
+			fmt.Printf("Running garbage collection (retention: %d days)...\n", gcRetentionDays)
+		}
+
+		stats, err := gcer.GC(gcRetentionDays, progress)
+		if err != nil {
+			if isatty {
+				fmt.Println()
+			}
+			return err
+		}
+
+		if isatty {
+			fmt.Println()
+		}
+
+		if stats.ObjectsDeleted == 0 && stats.PackEntriesDeleted == 0 {
+			fmt.Println("Nothing to collect — repository is already clean.")
+			return nil
+		}
+
+		fmt.Printf("Objects removed:       %d\n", stats.ObjectsDeleted)
+		fmt.Printf("Pack entries removed:  %d\n", stats.PackEntriesDeleted)
+		if stats.PackFilesRebuilt > 0 {
+			fmt.Printf("Pack files rebuilt:    %d\n", stats.PackFilesRebuilt)
+		}
+		if stats.BytesFreed > 0 {
+			fmt.Printf("Disk space freed:      %s\n", formatBytes(stats.BytesFreed))
+		}
+
+		if gcTrainDict {
+			dt, ok := r.Store.(store.DictTrainer)
+			if !ok {
+				return fmt.Errorf("store does not support dictionary training")
+			}
+			if isatty {
+				fmt.Print("Training zstd dictionary from sampled blobs…")
+			} else {
+				fmt.Println("Training zstd dictionary...")
+			}
+			if err := dt.TrainAndSaveDict(); err != nil {
+				if isatty {
+					fmt.Println()
+				}
+				return fmt.Errorf("dict training: %w", err)
+			}
+			if isatty {
+				fmt.Println(" done")
+			} else {
+				fmt.Println("Dictionary saved. Future compressions will use it.")
+			}
+		}
+		return nil
+	},
+}
+
+func init() {
+	gcCmd.Flags().IntVar(&gcRetentionDays, "retention", 90,
+		"Keep ObsoleteMarkers created within this many days even if predecessor is unreachable")
+	gcCmd.Flags().BoolVar(&gcTrainDict, "train-dict", false,
+		"Train a zstd compression dictionary from sampled blobs after GC (zstd only)")
+}
+
+func progressBar(done, total, width int) string {
+	if total <= 0 {
+		return "[" + strings.Repeat("░", width) + "]"
+	}
+	filled := done * width / total
+	if filled > width {
+		filled = width
+	}
+	return "[" + strings.Repeat("█", filled) + strings.Repeat("░", width-filled) + "]"
+}
+
+func formatBytes(n int64) string {
+	switch {
+	case n >= 1<<30:
+		return fmt.Sprintf("%.1f GiB", float64(n)/(1<<30))
+	case n >= 1<<20:
+		return fmt.Sprintf("%.1f MiB", float64(n)/(1<<20))
+	case n >= 1<<10:
+		return fmt.Sprintf("%.1f KiB", float64(n)/(1<<10))
+	default:
+		return fmt.Sprintf("%d B", n)
+	}
+}

internal/cli/cmd_git_import.go [A]
--- /dev/null
+++ b/internal/cli/cmd_git_import.go
@@ -1,0 +1,56 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"arche/internal/gitcompat"
+
+	"github.com/spf13/cobra"
+)
+
+var gitImportCmd = &cobra.Command{
+	Use:   "git-import [git-repo-path]",
+	Short: "Import git history into the current arche repository (one-shot, no compat)",
+	Long: `Import the full commit history of a git repository into the current arche
+repository as public-phase commits.
+
+  arche git-import           - import from the git repo in the current directory
+  arche git-import /path/to  - import from the git repo at the given path
+
+This is a one-shot migration. Git compatibility is NOT enabled: arche will not
+mirror future snaps back to git and arche sync will not drive git push/pull.
+To keep git as a parallel history mirror, use "arche init --git" instead.
+
+The arche repository must already exist (run "arche init" first if needed).
+Bookmarks and HEAD are updated to match the imported tip.`,
+	Args: cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		gitPath := "."
+		if len(args) == 1 {
+			gitPath = args[0]
+		}
+
+		absGitPath, err := filepath.Abs(gitPath)
+		if err != nil {
+			return fmt.Errorf("resolve path: %w", err)
+		}
+
+		if !gitcompat.IsGitRepo(absGitPath) {
+			return fmt.Errorf("%s is not a git repository (no .git/ found)", absGitPath)
+		}
+
+		r := openRepo()
+		defer r.Close()
+
+		fmt.Fprintf(os.Stdout, "Importing git history from %s ...\n", absGitPath)
+		if err := gitcompat.ImportFromGitOnce(absGitPath, r); err != nil {
+			return fmt.Errorf("git-import: %w", err)
+		}
+		fmt.Println("Import complete.")
+		head, _ := r.Head()
+		fmt.Printf("HEAD is now at %s\n", head)
+		return nil
+	},
+}

internal/cli/cmd_grep.go [A]
--- /dev/null
+++ b/internal/cli/cmd_grep.go
@@ -1,0 +1,283 @@
+package cli
+
+import (
+	"fmt"
+	"regexp"
+	"sort"
+	"strings"
+
+	"arche/internal/diff"
+	"arche/internal/object"
+	"arche/internal/repo"
+
+	"github.com/spf13/cobra"
+)
+
+var (
+	grepAt      string
+	grepHistory bool
+	grepPickaxe bool
+	grepFixed   bool
+)
+
+var grepCmd = &cobra.Command{
+	Use:   "grep <pattern>",
+	Short: "Search file content at a commit or across history",
+	Args:  cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		pat := args[0]
+		if grepFixed {
+			pat = regexp.QuoteMeta(pat)
+		}
+		re, err := regexp.Compile(pat)
+		if err != nil {
+			return fmt.Errorf("invalid pattern: %w", err)
+		}
+
+		switch {
+		case grepHistory:
+			return grepHistory_(r, re)
+		case grepPickaxe:
+			return grepPickaxe_(r, re)
+		default:
+			ref := grepAt
+			if ref == "" {
+				ref = "@"
+			}
+			commitID, err := resolveRef(r, ref)
+			if err != nil {
+				return err
+			}
+			c, err := r.ReadCommit(commitID)
+			if err != nil {
+				return err
+			}
+			return grepTree(r, c.TreeID, re)
+		}
+	},
+}
+
+func grepTree(r *repo.Repo, treeID [32]byte, re *regexp.Regexp) error {
+	blobs := make(map[string][32]byte)
+	if err := grepFlattenTree(r, treeID, "", blobs); err != nil {
+		return err
+	}
+
+	paths := make([]string, 0, len(blobs))
+	for p := range blobs {
+		paths = append(paths, p)
+	}
+	sort.Strings(paths)
+
+	found := false
+	for _, path := range paths {
+		blobID := blobs[path]
+		_, raw, err := r.Store.ReadObject(blobID)
+		if err != nil {
+			continue
+		}
+		b, err := object.DecodeBlob(raw)
+		if err != nil {
+			continue
+		}
+		content := string(b.Content)
+		if strings.ContainsRune(content, 0) {
+			continue
+		}
+		for i, line := range strings.Split(content, "\n") {
+			if re.MatchString(line) {
+				fmt.Printf("%s:%d:%s\n", path, i+1, line)
+				found = true
+			}
+		}
+	}
+	if !found {
+		return fmt.Errorf("no matches found")
+	}
+	return nil
+}
+
+func grepFlattenTree(r *repo.Repo, treeID [32]byte, prefix string, out map[string][32]byte) error {
+	if treeID == object.ZeroID {
+		return nil
+	}
+	t, err := r.ReadTree(treeID)
+	if err != nil {
+		return err
+	}
+	for _, e := range t.Entries {
+		rel := e.Name
+		if prefix != "" {
+			rel = prefix + "/" + e.Name
+		}
+		switch e.Mode {
+		case object.ModeDir:
+			if err := grepFlattenTree(r, e.ObjectID, rel, out); err != nil {
+				return err
+			}
+		default:
+			out[rel] = e.ObjectID
+		}
+	}
+	return nil
+}
+
+func grepHistory_(r *repo.Repo, re *regexp.Regexp) error {
+	_, headID, err := r.HeadCommit()
+	if err != nil {
+		return err
+	}
+
+	type blobSeen struct {
+		path     string
+		changeID string
+		msg      string
+	}
+
+	seen := make(map[[32]byte]bool)
+	blobIndex := make(map[[32]byte]blobSeen)
+
+	queue := [][32]byte{headID}
+	seenCommit := make(map[[32]byte]bool)
+
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		if seenCommit[id] {
+			continue
+		}
+		seenCommit[id] = true
+
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+
+		blobs := make(map[string][32]byte)
+		if err := grepFlattenTree(r, c.TreeID, "", blobs); err != nil {
+			continue
+		}
+		for path, blobID := range blobs {
+			if !seen[blobID] {
+				seen[blobID] = true
+				blobIndex[blobID] = blobSeen{path: path, changeID: c.ChangeID, msg: bisectFirstLine(c.Message)}
+			}
+		}
+
+		for _, p := range c.Parents {
+			if !seenCommit[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+
+	found := false
+	for blobID, info := range blobIndex {
+		content, err := r.ReadBlob(blobID)
+		if err != nil {
+			continue
+		}
+		if strings.ContainsRune(string(content), 0) {
+			continue
+		}
+		for i, line := range strings.Split(string(content), "\n") {
+			if re.MatchString(line) {
+				fmt.Printf("ch:%-8s  %s:%d:%s\n", info.changeID[:8], info.path, i+1, line)
+				found = true
+			}
+		}
+	}
+	if !found {
+		return fmt.Errorf("no matches found in history")
+	}
+	return nil
+}
+
+func grepPickaxe_(r *repo.Repo, re *regexp.Regexp) error {
+	_, headID, err := r.HeadCommit()
+	if err != nil {
+		return err
+	}
+
+	var commits [][32]byte
+	seenCommit := make(map[[32]byte]bool)
+	queue := [][32]byte{headID}
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		if seenCommit[id] {
+			continue
+		}
+		seenCommit[id] = true
+		commits = append(commits, id)
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+		for _, p := range c.Parents {
+			if !seenCommit[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+
+	found := false
+	for _, id := range commits {
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+		if len(c.Parents) == 0 {
+			continue
+		}
+
+		parentC, err := r.ReadCommit(c.Parents[0])
+		if err != nil {
+			continue
+		}
+
+		fileDiffs, err := diff.TreeDiff(r, parentC.TreeID, c.TreeID)
+		if err != nil {
+			continue
+		}
+
+		var matchPaths []string
+		for _, fd := range fileDiffs {
+			for _, line := range strings.Split(fd.Patch, "\n") {
+				if len(line) < 1 {
+					continue
+				}
+				ch := line[0]
+				if (ch == '+' || ch == '-') && !strings.HasPrefix(line, "---") && !strings.HasPrefix(line, "+++") {
+					if re.MatchString(line[1:]) {
+						matchPaths = append(matchPaths, fd.Path)
+						break
+					}
+				}
+			}
+		}
+		sort.Strings(matchPaths)
+
+		if len(matchPaths) > 0 {
+			fmt.Printf("ch:%-8s  %s\n", c.ChangeID[:8], bisectFirstLine(c.Message))
+			for _, p := range matchPaths {
+				fmt.Printf("    %s\n", p)
+			}
+			found = true
+		}
+	}
+	if !found {
+		return fmt.Errorf("no commits added or removed a matching line")
+	}
+	return nil
+}
+
+func init() {
+	grepCmd.Flags().StringVar(&grepAt, "at", "", "search at a specific commit/change ID (default: current HEAD)")
+	grepCmd.Flags().BoolVar(&grepHistory, "history", false, "search across all commits reachable from HEAD")
+	grepCmd.Flags().BoolVar(&grepPickaxe, "pickaxe", false, "find commits that added or removed a matching line")
+	grepCmd.Flags().BoolVarP(&grepFixed, "fixed", "F", false, "treat pattern as a literal fixed string (not regex)")
+}

internal/cli/cmd_hooks.go [A]
--- /dev/null
+++ b/internal/cli/cmd_hooks.go
@@ -1,0 +1,308 @@
+package cli
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"arche/internal/diff"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+const archehooksDir = ".archehooks"
+
+type hookEntry struct {
+	name    string
+	relPath string
+	phase   string
+}
+
+func listArcheHooks(repoRoot string) ([]hookEntry, error) {
+	var entries []hookEntry
+	for _, phase := range []string{"pre-snap", "post-snap"} {
+		dir := filepath.Join(repoRoot, archehooksDir, phase)
+		infos, err := os.ReadDir(dir)
+		if os.IsNotExist(err) {
+			continue
+		}
+		if err != nil {
+			return nil, fmt.Errorf("read %s: %w", dir, err)
+		}
+		for _, info := range infos {
+			if info.IsDir() {
+				continue
+			}
+			rel := archehooksDir + "/" + phase + "/" + info.Name()
+			entries = append(entries, hookEntry{
+				name:    info.Name(),
+				relPath: rel,
+				phase:   phase,
+			})
+		}
+	}
+	return entries, nil
+}
+
+func isInstalled(commands []string, hookRelPath string) bool {
+	for _, cmd := range commands {
+		if cmd == hookRelPath {
+			return true
+		}
+	}
+	return false
+}
+
+var hooksCmd = &cobra.Command{
+	Use:   "hooks",
+	Short: "Manage versioned snap hooks from .archehooks/",
+	Long: `Manage client-side snap hooks.
+
+Hooks in .archehooks/pre-snap/ and .archehooks/post-snap/ are versioned with
+the repository but are NEVER executed automatically. Installation is always
+an explicit opt-in via 'arche hooks install'.
+
+Installed hooks are stored in .arche/config.toml (local, never synced).`,
+}
+
+var hooksListCmd = &cobra.Command{
+	Use:   "list",
+	Short: "Show hooks defined in .archehooks/ and their installation status",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		entries, err := listArcheHooks(r.Root)
+		if err != nil {
+			return err
+		}
+		if len(entries) == 0 {
+			fmt.Println("No hooks found in .archehooks/")
+			return nil
+		}
+
+		cfg := r.Cfg
+		for _, e := range entries {
+			var installed string
+			switch e.phase {
+			case "pre-snap":
+				if isInstalled(cfg.Hooks.PreSnap, e.relPath) {
+					installed = "installed"
+				} else {
+					installed = "not installed"
+				}
+			case "post-snap":
+				if isInstalled(cfg.Hooks.PostSnap, e.relPath) {
+					installed = "installed"
+				} else {
+					installed = "not installed"
+				}
+			}
+			fmt.Printf("[%s] %-30s  %s\n", e.phase, e.name, installed)
+		}
+		return nil
+	},
+}
+
+var hooksInstallCmd = &cobra.Command{
+	Use:   "install [name]",
+	Short: "Opt-in install of a repo hook after reviewing its content",
+	Long: `Show the full content of the hook script and prompt for confirmation
+before adding it to .arche/config.toml.
+
+If no name is given, prompts for each not-yet-installed hook in .archehooks/.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		all, err := listArcheHooks(r.Root)
+		if err != nil {
+			return err
+		}
+
+		var candidates []hookEntry
+		if len(args) > 0 {
+			name := args[0]
+			for _, e := range all {
+				if e.name == name || strings.TrimSuffix(e.name, filepath.Ext(e.name)) == name {
+					candidates = append(candidates, e)
+				}
+			}
+			if len(candidates) == 0 {
+				return fmt.Errorf("hook %q not found in .archehooks/", name)
+			}
+		} else {
+			cfg := r.Cfg
+			for _, e := range all {
+				var installed bool
+				switch e.phase {
+				case "pre-snap":
+					installed = isInstalled(cfg.Hooks.PreSnap, e.relPath)
+				case "post-snap":
+					installed = isInstalled(cfg.Hooks.PostSnap, e.relPath)
+				}
+				if !installed {
+					candidates = append(candidates, e)
+				}
+			}
+			if len(candidates) == 0 {
+				fmt.Println("All available hooks are already installed.")
+				return nil
+			}
+		}
+
+		sc := bufio.NewScanner(os.Stdin)
+		changed := false
+
+		for _, e := range candidates {
+			absPath := filepath.Join(r.Root, filepath.FromSlash(e.relPath))
+			content, err := os.ReadFile(absPath)
+			if err != nil {
+				return fmt.Errorf("read hook %s: %w", e.relPath, err)
+			}
+
+			fmt.Printf("\n--- Hook: %s [%s] ---\n", e.name, e.phase)
+			fmt.Println(string(content))
+			fmt.Printf("--- End of hook ---\n")
+			fmt.Printf("Install this hook in %s? [y/N] ", e.phase)
+
+			if !sc.Scan() {
+				break
+			}
+			answer := strings.TrimSpace(strings.ToLower(sc.Text()))
+			if answer != "y" && answer != "yes" {
+				fmt.Printf("Skipped %s\n", e.name)
+				continue
+			}
+
+			switch e.phase {
+			case "pre-snap":
+				r.Cfg.Hooks.PreSnap = append(r.Cfg.Hooks.PreSnap, e.relPath)
+			case "post-snap":
+				r.Cfg.Hooks.PostSnap = append(r.Cfg.Hooks.PostSnap, e.relPath)
+			}
+			fmt.Printf("Installed %s in %s\n", e.name, e.phase)
+			changed = true
+		}
+
+		if changed {
+			if err := r.SaveConfig(); err != nil {
+				return fmt.Errorf("save config: %w", err)
+			}
+		}
+		return nil
+	},
+}
+
+var hooksDiffCmd = &cobra.Command{
+	Use:   "diff",
+	Short: "Diff committed .archehooks/ scripts against working-copy versions",
+	Long: `For each installed command in .arche/config.toml that references an
+.archehooks/ script, show a unified diff between the version committed in HEAD
+and the file currently on disk.
+
+Use this after pulling changes from collaborators to review exactly what changed
+in hook scripts before running them.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		head, _, err := r.HeadCommit()
+		if err != nil {
+			return fmt.Errorf("head commit: %w", err)
+		}
+
+		committedFiles := make(map[string][32]byte)
+		if err := diff.FlattenTree(r, head.TreeID, committedFiles); err != nil {
+			return fmt.Errorf("flatten head tree: %w", err)
+		}
+
+		cfg := r.Cfg
+		type phaseCmd struct {
+			phase string
+			cmd   string
+		}
+		var allCmds []phaseCmd
+		for _, c := range cfg.Hooks.PreSnap {
+			allCmds = append(allCmds, phaseCmd{"pre-snap", c})
+		}
+		for _, c := range cfg.Hooks.PostSnap {
+			allCmds = append(allCmds, phaseCmd{"post-snap", c})
+		}
+
+		hasArcheHooks := false
+		hasDiff := false
+		for _, entry := range allCmds {
+			if !strings.HasPrefix(entry.cmd, archehooksDir+"/") {
+				continue
+			}
+			hasArcheHooks = true
+
+			var committedContent string
+			if blobID, ok := committedFiles[entry.cmd]; ok {
+				if data, readErr := r.ReadBlob(blobID); readErr == nil {
+					committedContent = string(data)
+				}
+			}
+
+			absPath := filepath.Join(r.Root, filepath.FromSlash(entry.cmd))
+			diskData, diskErr := os.ReadFile(absPath)
+			var diskContent string
+			if diskErr == nil {
+				diskContent = string(diskData)
+			} else if !os.IsNotExist(diskErr) {
+				fmt.Fprintf(os.Stderr, "warning: read %s: %v\n", entry.cmd, diskErr)
+			}
+
+			patch := diff.UnifiedDiff(entry.cmd, committedContent, diskContent)
+			if patch == "" {
+				continue
+			}
+			fmt.Printf("=== [%s] %s ===\n", entry.phase, entry.cmd)
+			fmt.Print(patch)
+			hasDiff = true
+		}
+
+		if !hasArcheHooks {
+			fmt.Println("No installed .archehooks/ scripts found in config.")
+			fmt.Println("Run 'arche hooks list' to see available hooks.")
+		} else if !hasDiff {
+			fmt.Println("No changes to installed .archehooks/ scripts (all match committed version).")
+		}
+		return nil
+	},
+}
+
+var hooksRunCmd = &cobra.Command{
+	Use:   "run <pre-snap|post-snap>",
+	Short: "Manually run pre-snap or post-snap hooks",
+	Args:  cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		var hooks []string
+		switch args[0] {
+		case "pre-snap":
+			hooks = r.Cfg.Hooks.PreSnap
+		case "post-snap":
+			hooks = r.Cfg.Hooks.PostSnap
+		default:
+			return fmt.Errorf("unknown hook phase %q (use pre-snap or post-snap)", args[0])
+		}
+
+		if len(hooks) == 0 {
+			fmt.Printf("No %s hooks configured.\n", args[0])
+			return nil
+		}
+
+		return wc.RunHooksSequential(r.Root, args[0], hooks)
+	},
+}
+
+func init() {
+	hooksCmd.AddCommand(hooksListCmd, hooksInstallCmd, hooksDiffCmd, hooksRunCmd)
+}

internal/cli/cmd_init.go [A]
--- /dev/null
+++ b/internal/cli/cmd_init.go
@@ -1,0 +1,88 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+
+	"arche/internal/gitcompat"
+	"arche/internal/repo"
+
+	"github.com/spf13/cobra"
+)
+
+var initGit bool
+
+var initCmd = &cobra.Command{
+	Use:   "init [directory]",
+	Short: "Initialise a new Arche repository",
+	Long: `Initialise a new Arche repository in the given directory (default: current
+directory). Creates a .arche/ directory with an SQLite store and an initial
+empty draft commit. HEAD is set to the change ID of that draft.
+
+With --git: also initialise (or reuse) a .git/ repository so the directory
+stays fully compatible with standard git tooling. Existing git history is
+imported as public-phase arche commits. Every subsequent "arche snap" will
+also create a git commit; "arche sync" will also run git push/pull.`,
+	Args: cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		dir := "."
+		if len(args) == 1 {
+			dir = args[0]
+		}
+
+		hasGitHistory := initGit && gitcompat.IsGitRepo(dir)
+
+		if initGit && !hasGitHistory {
+			if err := gitcompat.GitInit(dir); err != nil {
+				return fmt.Errorf("git init: %w", err)
+			}
+		}
+
+		r, err := repo.Init(dir)
+		if err != nil {
+			return err
+		}
+		defer r.Close()
+
+		if initGit {
+			if err := gitcompat.EnsureGitIgnore(r.Root); err != nil {
+				return fmt.Errorf("update .gitignore: %w", err)
+			}
+
+			if hasGitHistory {
+				fmt.Println("Importing git history into arche store…")
+				if err := gitcompat.ImportFromGit(r.Root, r); err != nil {
+					return fmt.Errorf("import git history: %w", err)
+				}
+				fmt.Println("Import complete.")
+			}
+
+			r.Cfg.Git.Enabled = true
+			if r.Cfg.Git.Remote == "" {
+				r.Cfg.Git.Remote = "origin"
+			}
+			if err := r.SaveConfig(); err != nil {
+				return err
+			}
+		}
+
+		absDir, _ := os.Getwd()
+		if dir != "." {
+			absDir = dir
+		}
+		head, _ := r.Head()
+		fmt.Printf("Initialised empty Arche repository in %s/.arche/\n", absDir)
+		if initGit {
+			fmt.Printf("Git compatibility enabled — .arche/ added to .gitignore\n")
+		}
+		fmt.Printf("Working copy: %s (draft, empty)\n", head)
+		if !hasGitHistory {
+			fmt.Printf("No bookmark. Run 'arche snap' to record your first commit.\n")
+		}
+		return nil
+	},
+}
+
+func init() {
+	initCmd.Flags().BoolVar(&initGit, "git", false, "also initialise git compatibility (import existing history if present)")
+}

internal/cli/cmd_lock.go [A]
--- /dev/null
+++ b/internal/cli/cmd_lock.go
@@ -1,0 +1,182 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	"arche/internal/store"
+
+	"github.com/spf13/cobra"
+)
+
+var lockCmd = &cobra.Command{
+	Use:   "lock",
+	Short: "Manage exclusive file locks",
+	Long: `Advisory exclusive locks for large binary files (Perforce-style).
+
+Locks are stored in store.db and transmitted during arche sync so that
+all clones see who has a file checked out for editing.
+
+  arche lock add <paths...>     - acquire an exclusive lock
+  arche lock remove <paths...>  - release a lock you hold
+  arche lock list               - show all current locks`,
+}
+
+var lockComment string
+
+var lockAddCmd = &cobra.Command{
+	Use:     "add <path> [paths...]",
+	Aliases: []string{"acquire"},
+	Short:   "Acquire an exclusive lock on one or more files",
+	Args:    cobra.MinimumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		ls, ok := r.Store.(store.LockStore)
+		if !ok {
+			return fmt.Errorf("store does not support file locks")
+		}
+
+		owner := lockOwner(r.Cfg.User.Name)
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		for _, path := range args {
+			if err := ls.AcquireLock(tx, path, owner, lockComment); err != nil {
+				r.Store.Rollback(tx) //nolint:errcheck
+				return err
+			}
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+		for _, path := range args {
+			fmt.Printf("locked  %s\n", path)
+		}
+		return nil
+	},
+}
+
+var lockRemoveCmd = &cobra.Command{
+	Use:     "remove <path> [paths...]",
+	Aliases: []string{"release", "unlock"},
+	Short:   "Release a lock you hold",
+	Args:    cobra.MinimumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		ls, ok := r.Store.(store.LockStore)
+		if !ok {
+			return fmt.Errorf("store does not support file locks")
+		}
+
+		owner := lockOwner(r.Cfg.User.Name)
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		for _, path := range args {
+			if err := ls.ReleaseLock(tx, path, owner); err != nil {
+				r.Store.Rollback(tx) //nolint:errcheck
+				return err
+			}
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+		for _, path := range args {
+			fmt.Printf("unlocked  %s\n", path)
+		}
+		return nil
+	},
+}
+
+var lockStealFlag bool
+
+var lockSteelCmd = &cobra.Command{
+	Use:   "steal <path> [paths...]",
+	Short: "Force-release a lock held by another user (admin action)",
+	Args:  cobra.MinimumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		ls, ok := r.Store.(store.LockStore)
+		if !ok {
+			return fmt.Errorf("store does not support file locks")
+		}
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		for _, path := range args {
+			lock, _ := ls.GetLock(path)
+			if err := ls.ReleaseLockAdmin(tx, path); err != nil {
+				r.Store.Rollback(tx) //nolint:errcheck
+				return err
+			}
+			if lock != nil {
+				fmt.Printf("stole lock on %s (was held by %s)\n", path, lock.Owner)
+			} else {
+				fmt.Printf("no lock on %s\n", path)
+			}
+		}
+		return r.Store.Commit(tx)
+	},
+}
+
+var lockListCmd = &cobra.Command{
+	Use:   "list",
+	Short: "List all current file locks",
+	Args:  cobra.NoArgs,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		ls, ok := r.Store.(store.LockStore)
+		if !ok {
+			return fmt.Errorf("store does not support file locks")
+		}
+
+		locks, err := ls.ListLocks()
+		if err != nil {
+			return err
+		}
+		if len(locks) == 0 {
+			fmt.Println("No file locks.")
+			return nil
+		}
+		for _, l := range locks {
+			ts := time.Unix(l.AcquiredAt, 0).Format("2006-01-02 15:04")
+			if l.Comment != "" {
+				fmt.Printf("%-50s  %-30s  %s  %s\n", l.Path, l.Owner, ts, l.Comment)
+			} else {
+				fmt.Printf("%-50s  %-30s  %s\n", l.Path, l.Owner, ts)
+			}
+		}
+		return nil
+	},
+}
+
+func init() {
+	lockAddCmd.Flags().StringVarP(&lockComment, "comment", "c", "", "Optional comment for the lock")
+	lockCmd.AddCommand(lockAddCmd, lockRemoveCmd, lockSteelCmd, lockListCmd)
+}
+
+func lockOwner(userName string) string {
+	if userName == "" {
+		userName = "unknown"
+	}
+	host, err := os.Hostname()
+	if err != nil {
+		host = "localhost"
+	}
+	return userName + "@" + host
+}

internal/cli/cmd_log.go [A]
--- /dev/null
+++ b/internal/cli/cmd_log.go
@@ -1,0 +1,161 @@
+package cli
+
+import (
+	"fmt"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/revset"
+
+	"github.com/spf13/cobra"
+)
+
+var (
+	logLimit      int
+	logOps        bool
+	logShowSecret bool
+	logWhere      string
+)
+
+var logCmd = &cobra.Command{
+	Use:   "log",
+	Short: "Show the commit DAG",
+	Long: `Walk the commit graph backwards from HEAD (and all bookmarks) and display
+each commit in reverse chronological order.
+
+With --ops, show the operation log instead (equivalent to 'arche op log').`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		if logOps {
+			ops, err := r.Store.ListOperations(logLimit)
+			if err != nil {
+				return err
+			}
+			if len(ops) == 0 {
+				fmt.Println("No operations recorded.")
+				return nil
+			}
+			for _, op := range ops {
+				ts := time.Unix(op.Timestamp, 0).Format("2006-01-02 15:04:05")
+				if op.Metadata != "" {
+					fmt.Printf("#%-4d  %-14s  %s  %s\n", op.Seq, op.Kind, ts, op.Metadata)
+				} else {
+					fmt.Printf("#%-4d  %-14s  %s\n", op.Seq, op.Kind, ts)
+				}
+			}
+			return nil
+		}
+
+		var whereFilter revset.Func
+		if logWhere != "" {
+			var err error
+			whereFilter, err = revset.Parse(logWhere)
+			if err != nil {
+				return err
+			}
+		}
+
+		tips := make(map[[32]byte]bool)
+		_, headID, err := r.HeadCommit()
+		if err == nil {
+			tips[headID] = true
+		}
+
+		bms, _ := r.Store.ListBookmarks()
+		for _, bm := range bms {
+			tips[bm.CommitID] = true
+		}
+
+		seen := make(map[[32]byte]bool)
+		queue := make([][32]byte, 0, len(tips))
+		for id := range tips {
+			queue = append(queue, id)
+		}
+
+		bmIndex := make(map[[32]byte][]string)
+		for _, bm := range bms {
+			bmIndex[bm.CommitID] = append(bmIndex[bm.CommitID], bm.Name)
+		}
+		var curHeadID [32]byte
+		if _, id, err := r.HeadCommit(); err == nil {
+			curHeadID = id
+		}
+
+		count := 0
+		for len(queue) > 0 && (logLimit <= 0 || count < logLimit) {
+			id := queue[0]
+			queue = queue[1:]
+			if seen[id] {
+				continue
+			}
+			seen[id] = true
+
+			c, err := r.ReadCommit(id)
+			if err != nil {
+				continue
+			}
+
+			if !logShowSecret {
+				phase, _ := r.Store.GetPhase(id)
+				if phase == object.PhaseSecret {
+					for _, p := range c.Parents {
+						if !seen[p] {
+							queue = append(queue, p)
+						}
+					}
+					continue
+				}
+			}
+
+			if whereFilter != nil {
+				phase, _ := r.Store.GetPhase(id)
+				if !whereFilter(id, c, phase) {
+					for _, p := range c.Parents {
+						if !seen[p] {
+							queue = append(queue, p)
+						}
+					}
+					continue
+				}
+			}
+
+			printCommit(id, c, bmIndex[id], curHeadID == id)
+			count++
+
+			for _, p := range c.Parents {
+				if !seen[p] {
+					queue = append(queue, p)
+				}
+			}
+		}
+		return nil
+	},
+}
+
+func init() {
+	logCmd.Flags().IntVarP(&logLimit, "limit", "n", 0, "maximum number of commits to show (0 = all)")
+	logCmd.Flags().BoolVar(&logOps, "ops", false, "show the operation log instead of the commit graph")
+	logCmd.Flags().BoolVarP(&logShowSecret, "secret", "s", false, "include secret commits in output")
+	logCmd.Flags().StringVar(&logWhere, "where", "", `filter commits with a revset expression, e.g. --where 'author(alice) and not public()'`)
+}
+
+func printCommit(id [32]byte, c *object.Commit, bookmarks []string, isHead bool) {
+	prefix := "  "
+	if isHead {
+		prefix = "@ "
+	}
+	fmt.Printf("%scommit  %x\n", prefix, id)
+	fmt.Printf("  change  ch:%s\n", c.ChangeID)
+	if len(bookmarks) > 0 {
+		fmt.Printf("  marks   %v\n", bookmarks)
+	}
+	fmt.Printf("  author  %s <%s>\n", c.Author.Name, c.Author.Email)
+	fmt.Printf("  date    %s\n", c.Author.Timestamp.Format(time.RFC1123))
+	fmt.Printf("  phase   %s\n", c.Phase)
+	if c.Message != "" {
+		fmt.Printf("\n    %s\n", c.Message)
+	}
+	fmt.Println()
+}

internal/cli/cmd_merge.go [A]
--- /dev/null
+++ b/internal/cli/cmd_merge.go
@@ -1,0 +1,174 @@
+package cli
+
+import (
+	"fmt"
+	"time"
+
+	"arche/internal/merge"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var mergeCmd = &cobra.Command{
+	Use:   "merge <commit>",
+	Short: "Merge a commit into the working copy",
+	Long: `Perform a three-way merge of the given commit into the current working copy.
+Conflicts are stored as first-class objects and written to the working
+directory with standard conflict markers - no operation is blocked.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		theirsID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+
+		theirs, err := r.ReadCommit(theirsID)
+		if err != nil {
+			return err
+		}
+
+		head, headID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+
+		baseID := findMergeBase(r, headID, theirsID)
+
+		before, _ := r.CaptureRefState()
+		now := time.Now()
+
+		result, err := merge.Trees(r, baseID, head.TreeID, theirs.TreeID)
+		if err != nil {
+			return fmt.Errorf("merge failed: %w", err)
+		}
+
+		sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+		mMsg := fmt.Sprintf("Merge ch:%s", theirs.ChangeID)
+		if len(result.Conflicts) > 0 {
+			mMsg += fmt.Sprintf(" (%d conflict(s))", len(result.Conflicts))
+		}
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+
+		newChangeID, err := r.Store.AllocChangeID(tx)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		mc := &object.Commit{
+			TreeID:    result.TreeID,
+			Parents:   [][32]byte{headID, theirsID},
+			ChangeID:  newChangeID,
+			Author:    sig,
+			Committer: sig,
+			Message:   mMsg,
+			Phase:     object.PhaseDraft,
+		}
+		mcID, err := repo.WriteCommitTx(r.Store, tx, mc)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.SetChangeCommit(tx, newChangeID, mcID); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		if err := r.Store.ClearAllConflicts(tx); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		for _, cp := range result.Conflicts {
+			if err := r.Store.AddConflict(tx, cp); err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+		}
+		after := buildMergeRefState(mcID, object.FormatChangeID(newChangeID))
+		op := store.Operation{
+			Kind: "merge", Timestamp: now.Unix(), Before: before, After: after,
+			Metadata: "merged ch:" + theirs.ChangeID,
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		w := wc.New(r)
+		newCID := object.FormatChangeID(newChangeID)
+		if err := w.Materialize(result.TreeID, newCID); err != nil {
+			return fmt.Errorf("materialize merge: %w", err)
+		}
+		if err := r.WriteHead(newCID); err != nil {
+			return err
+		}
+
+		if len(result.Conflicts) > 0 {
+			fmt.Printf("Merged ch:%s into %s with %d conflict(s):\n",
+				theirs.ChangeID, newCID, len(result.Conflicts))
+			for _, p := range result.Conflicts {
+				fmt.Printf("  conflict: %s\n", p)
+			}
+			fmt.Println("Resolve conflicts then run 'arche resolve <path>'.")
+		} else {
+			fmt.Printf("Merged ch:%s into %s (clean)\n", theirs.ChangeID, newCID)
+		}
+		return nil
+	},
+}
+
+func findMergeBase(r *repo.Repo, a, b [32]byte) [32]byte {
+	ancestorsA := make(map[[32]byte]bool)
+	queueA := [][32]byte{a}
+	for len(queueA) > 0 {
+		id := queueA[0]
+		queueA = queueA[1:]
+		if ancestorsA[id] {
+			continue
+		}
+		ancestorsA[id] = true
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			break
+		}
+		queueA = append(queueA, c.Parents...)
+	}
+
+	queueB := [][32]byte{b}
+	seenB := make(map[[32]byte]bool)
+	for len(queueB) > 0 {
+		id := queueB[0]
+		queueB = queueB[1:]
+		if seenB[id] {
+			continue
+		}
+		seenB[id] = true
+		if ancestorsA[id] {
+			return id
+		}
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			break
+		}
+		queueB = append(queueB, c.Parents...)
+	}
+	return object.ZeroID
+}
+
+func buildMergeRefState(commitID [32]byte, changeID string) string {
+	return fmt.Sprintf(`{"head":%q,"tip":%q}`, changeID, fmt.Sprintf("%x", commitID))
+}

internal/cli/cmd_oplog.go [A]
--- /dev/null
+++ b/internal/cli/cmd_oplog.go
@@ -1,0 +1,48 @@
+package cli
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/spf13/cobra"
+)
+
+var opLogCmd = &cobra.Command{
+	Use:   "op",
+	Short: "Operation log subcommands",
+	Long:  "Subcommands for the operation log.",
+}
+
+var opLogListCmd = &cobra.Command{
+	Use:   "log",
+	Short: "Show the operation history",
+	Long: `Display the operation log in reverse chronological order. Each entry shows
+the sequence number, operation kind, timestamp, and a summary of what changed.
+Use 'arche undo --step N' to revert to any point in this log.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		ops, err := r.Store.ListOperations(0)
+		if err != nil {
+			return err
+		}
+		if len(ops) == 0 {
+			fmt.Println("No operations recorded.")
+			return nil
+		}
+		for _, op := range ops {
+			ts := time.Unix(op.Timestamp, 0).Format("2006-01-02 15:04:05")
+			if op.Metadata != "" {
+				fmt.Printf("#%-4d  %-14s  %s  %s\n", op.Seq, op.Kind, ts, op.Metadata)
+			} else {
+				fmt.Printf("#%-4d  %-14s  %s\n", op.Seq, op.Kind, ts)
+			}
+		}
+		return nil
+	},
+}
+
+func init() {
+	opLogCmd.AddCommand(opLogListCmd)
+}

internal/cli/cmd_phase.go [A]
--- /dev/null
+++ b/internal/cli/cmd_phase.go
@@ -1,0 +1,116 @@
+package cli
+
+import (
+	"fmt"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/store"
+
+	"github.com/spf13/cobra"
+)
+
+var phaseCmd = &cobra.Command{
+	Use:   "phase",
+	Short: "Manage commit phases (draft/public/secret)",
+}
+
+var phaseSetCmd = &cobra.Command{
+	Use:   "set <commit> <phase>",
+	Short: "Set the phase of a commit",
+	Long: `Manually set the lifecycle phase of a commit.
+
+Valid phases: draft, public, secret
+
+  draft   - work in progress; can be rewritten freely
+  public  - pushed / shared; rewriting is discouraged
+  secret  - hidden from default log output
+`,
+	Args: cobra.ExactArgs(2),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		commitID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+
+		var phase object.Phase
+		switch args[1] {
+		case "draft":
+			phase = object.PhaseDraft
+		case "public":
+			phase = object.PhasePublic
+		case "secret":
+			phase = object.PhaseSecret
+		default:
+			return fmt.Errorf("unknown phase %q (want draft|public|secret)", args[1])
+		}
+
+		before, _ := r.CaptureRefState()
+		now := time.Now()
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		if err := r.Store.SetPhase(tx, commitID, phase); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		after := before
+		op := store.Operation{
+			Kind: "phase-set", Timestamp: now.Unix(), Before: before, After: after,
+			Metadata: fmt.Sprintf("set %s phase to %s", args[0], args[1]),
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		c, _ := r.ReadCommit(commitID)
+		displayID := fmt.Sprintf("%x", commitID[:6])
+		if c != nil {
+			displayID = "ch:" + c.ChangeID
+		}
+		fmt.Printf("Set phase of %s to %s\n", displayID, phase)
+		return nil
+	},
+}
+
+var phaseGetCmd = &cobra.Command{
+	Use:   "get <commit>",
+	Short: "Show the current phase of a commit",
+	Args:  cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		commitID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+
+		phase, err := r.Store.GetPhase(commitID)
+		if err != nil {
+			c, err2 := r.ReadCommit(commitID)
+			if err2 != nil {
+				return err2
+			}
+			phase = c.Phase
+		}
+
+		fmt.Println(phase.String())
+		return nil
+	},
+}
+
+func init() {
+	phaseCmd.AddCommand(phaseSetCmd)
+	phaseCmd.AddCommand(phaseGetCmd)
+}

internal/cli/cmd_rebase.go [A]
--- /dev/null
+++ b/internal/cli/cmd_rebase.go
@@ -1,0 +1,239 @@
+package cli
+
+import (
+	"fmt"
+	"time"
+
+	"arche/internal/merge"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var rebaseForceRewrite bool
+
+var rebaseCmd = &cobra.Command{
+	Use:   "rebase <dest>",
+	Short: "Replay commits from the current change onto a new base",
+	Long: `Replay the working-copy lineage on top of <dest>.
+Each replayed commit is re-parented; the originals are marked obsolete.
+Conflicts stop the rebase and require 'arche resolve'.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		destID, err := resolveRef(r, args[0])
+		if err != nil {
+			return err
+		}
+
+		_, headID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+
+		chain, err := collectLinearChain(r, headID, destID)
+		if err != nil {
+			return err
+		}
+		if len(chain) == 0 {
+			fmt.Println("Nothing to rebase.")
+			return nil
+		}
+
+		if !rebaseForceRewrite {
+			for _, id := range chain {
+				c, err := r.ReadCommit(id)
+				if err != nil {
+					return fmt.Errorf("read commit %x: %w", id[:6], err)
+				}
+				if c.Phase == object.PhasePublic {
+					return fmt.Errorf("commit %x is public; use --force-rewrite to rewrite history", id[:8])
+				}
+			}
+		}
+
+		before, _ := r.CaptureRefState()
+		now := time.Now()
+		newParentID := destID
+		newTipID := destID
+		newTipChangeID := ""
+
+		for _, origID := range chain {
+			orig, err := r.ReadCommit(origID)
+			if err != nil {
+				return fmt.Errorf("read commit %x: %w", origID[:6], err)
+			}
+
+			newParentCommit, err := r.ReadCommit(newParentID)
+			if err != nil {
+				return err
+			}
+
+			var baseTreeID [32]byte
+			if len(orig.Parents) > 0 {
+				p, err := r.ReadCommit(orig.Parents[0])
+				if err != nil {
+					return err
+				}
+				baseTreeID = p.TreeID
+			}
+
+			result, err := merge.Trees(r, baseTreeID, orig.TreeID, newParentCommit.TreeID)
+			if err != nil {
+				return fmt.Errorf("rebase merge step: %w", err)
+			}
+
+			sig := object.Signature{
+				Name:      orig.Author.Name,
+				Email:     orig.Author.Email,
+				Timestamp: orig.Author.Timestamp,
+			}
+			committer := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+			_ = committer
+
+			tx, err := r.Store.Begin()
+			if err != nil {
+				return err
+			}
+
+			newCommit := &object.Commit{
+				TreeID:    result.TreeID,
+				Parents:   [][32]byte{newParentID},
+				ChangeID:  orig.ChangeID,
+				Author:    sig,
+				Committer: object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now},
+				Message:   orig.Message,
+				Phase:     orig.Phase,
+			}
+			newCommitID, err := repo.WriteCommitTx(r.Store, tx, newCommit)
+			if err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+			if err := r.Store.SetChangeCommit(tx, orig.ChangeID, newCommitID); err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+
+			obs := &object.ObsoleteMarker{
+				Predecessor: origID,
+				Successors:  [][32]byte{newCommitID},
+				Reason:      "rebase",
+			}
+			obsID, err := repo.WriteObsoleteTx(r.Store, tx, obs)
+			if err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+			_ = obsID
+
+			opAfter := buildMergeRefState(newCommitID, object.FormatChangeID(orig.ChangeID))
+			op := store.Operation{
+				Kind: "rebase-step", Timestamp: now.Unix(), Before: before, After: opAfter,
+				Metadata: fmt.Sprintf("rebased %s onto %x", object.FormatChangeID(orig.ChangeID), destID[:6]),
+			}
+			if _, err := r.Store.InsertOperation(tx, op); err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+
+			if err := r.Store.ClearAllConflicts(tx); err != nil {
+				r.Store.Rollback(tx)
+				return err
+			}
+			for _, cp := range result.Conflicts {
+				if err := r.Store.AddConflict(tx, cp); err != nil {
+					r.Store.Rollback(tx)
+					return err
+				}
+			}
+
+			if err := r.Store.Commit(tx); err != nil {
+				return err
+			}
+
+			newParentID = newCommitID
+			newTipID = newCommitID
+			newTipChangeID = object.FormatChangeID(orig.ChangeID)
+
+			if len(result.Conflicts) > 0 {
+				w := wc.New(r)
+				if err := w.Materialize(result.TreeID, newTipChangeID); err != nil {
+					return err
+				}
+				if err := r.WriteHead(newTipChangeID); err != nil {
+					return err
+				}
+				fmt.Printf("Rebase paused at %s (%d conflict(s)):\n", newTipChangeID, len(result.Conflicts))
+				for _, p := range result.Conflicts {
+					fmt.Printf("  conflict: %s\n", p)
+				}
+				fmt.Println("Resolve conflicts, then run 'arche resolve <path>' and 'arche snap'.")
+				return nil
+			}
+		}
+
+		_ = newTipID
+		finalCommit, err := r.ReadCommit(newParentID)
+		if err != nil {
+			return err
+		}
+		w := wc.New(r)
+		if err := w.Materialize(finalCommit.TreeID, newTipChangeID); err != nil {
+			return err
+		}
+		if err := r.WriteHead(newTipChangeID); err != nil {
+			return err
+		}
+		fmt.Printf("Rebase complete: now at %s\n", newTipChangeID)
+		return nil
+	},
+}
+
+func init() {
+	rebaseCmd.Flags().BoolVar(&rebaseForceRewrite, "force-rewrite", false, "allow rewriting public commits")
+}
+
+func collectLinearChain(r *repo.Repo, headID, destID [32]byte) ([][32]byte, error) {
+	destAncestors := make(map[[32]byte]bool)
+	q := [][32]byte{destID}
+	for len(q) > 0 {
+		id := q[0]
+		q = q[1:]
+		if destAncestors[id] {
+			continue
+		}
+		destAncestors[id] = true
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			break
+		}
+		q = append(q, c.Parents...)
+	}
+
+	var chain [][32]byte
+	current := headID
+	for !destAncestors[current] {
+
+		c, err := r.ReadCommit(current)
+		if err != nil {
+			return nil, err
+		}
+		chain = append(chain, current)
+		if len(c.Parents) == 0 {
+			break
+		}
+		current = c.Parents[0]
+	}
+
+	for i, j := 0, len(chain)-1; i < j; i, j = i+1, j-1 {
+		chain[i], chain[j] = chain[j], chain[i]
+	}
+
+	return chain, nil
+}

internal/cli/cmd_resolve.go [A]
--- /dev/null
+++ b/internal/cli/cmd_resolve.go
@@ -1,0 +1,137 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var resolveCmd = &cobra.Command{
+	Use:   "resolve <path>",
+	Short: "Mark a conflict at <path> as resolved",
+	Long: `After editing a conflicted file to a satisfactory state, run this command
+to replace the conflict object in the working-copy draft with the current
+file contents and mark the path as resolved.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		relPath := filepath.ToSlash(args[0])
+		r := openRepo()
+		defer r.Close()
+
+		head, headID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+
+		absPath := filepath.Join(r.Root, relPath)
+		data, err := os.ReadFile(absPath)
+		if err != nil {
+			return fmt.Errorf("read %s: %w", relPath, err)
+		}
+
+		blob := &object.Blob{Content: data}
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+
+		blobID, err := repo.WriteBlobTx(r.Store, tx, blob)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		newTreeID, err := replaceEntryInTree(r, tx, head.TreeID, relPath, blobID, object.ModeFile)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		now := time.Now()
+		sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+		newCommit := &object.Commit{
+			TreeID:    newTreeID,
+			Parents:   head.Parents,
+			ChangeID:  head.ChangeID,
+			Author:    sig,
+			Committer: sig,
+			Message:   head.Message,
+			Phase:     head.Phase,
+		}
+		newCommitID, err := repo.WriteCommitTx(r.Store, tx, newCommit)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.SetChangeCommit(tx, head.ChangeID, newCommitID); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		if err := r.Store.ClearConflict(tx, relPath); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		before := fmt.Sprintf("%x", headID[:6])
+		op := store.Operation{
+			Kind: "resolve", Timestamp: now.Unix(), Before: before,
+			After: fmt.Sprintf("%x", newCommitID[:6]), Metadata: "resolved " + relPath,
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		w := wc.New(r)
+		if err := w.Materialize(newTreeID, object.FormatChangeID(head.ChangeID)); err != nil {
+			return err
+		}
+		fmt.Printf("Resolved: %s\n", relPath)
+		return nil
+	},
+}
+
+func replaceEntryInTree(r *repo.Repo, tx *store.Tx, treeID [32]byte, path string, blobID [32]byte, m object.EntryMode) ([32]byte, error) {
+	parts := strings.SplitN(path, "/", 2)
+
+	tree, err := r.ReadTree(treeID)
+	if err != nil {
+		return object.ZeroID, err
+	}
+
+	newEntries := make([]object.TreeEntry, 0, len(tree.Entries))
+	found := false
+	for _, e := range tree.Entries {
+		if e.Name == parts[0] {
+			found = true
+			if len(parts) == 1 {
+				newEntries = append(newEntries, object.TreeEntry{Mode: m, Name: parts[0], ObjectID: blobID})
+			} else {
+				subID, err := replaceEntryInTree(r, tx, e.ObjectID, parts[1], blobID, m)
+				if err != nil {
+					return object.ZeroID, err
+				}
+				newEntries = append(newEntries, object.TreeEntry{Mode: object.ModeDir, Name: parts[0], ObjectID: subID})
+			}
+		} else {
+			newEntries = append(newEntries, e)
+		}
+	}
+	if !found {
+		return object.ZeroID, fmt.Errorf("path %q not found in tree", path)
+	}
+	return repo.WriteTreeTx(r.Store, tx, &object.Tree{Entries: newEntries})
+}

internal/cli/cmd_serve.go [A]
--- /dev/null
+++ b/internal/cli/cmd_serve.go
@@ -1,0 +1,59 @@
+package cli
+
+import (
+	"fmt"
+	"net/http"
+
+	"arche/internal/syncpkg"
+
+	"github.com/spf13/cobra"
+)
+
+var serveCmd = &cobra.Command{
+	Use:   "serve",
+	Short: "Start a sync server for this repository",
+	Long: `Start an HTTP sync server allowing remote clients to push and pull objects,
+bookmarks, and issue events.
+
+Authentication is via bearer token. Set --token or configure it in
+.arche/config.toml under [serve]:
+
+  [serve]
+  token = "secret"
+
+.arche/ is local metadata — like .git/ — and is never committed to the
+object store, never synced, and never visible to remote peers. The token
+stays on this machine only.
+
+The server listens on localhost only by default. Use --bind 0.0.0.0 to
+accept connections from other hosts.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		port, _ := cmd.Flags().GetInt("port")
+		bindAddr, _ := cmd.Flags().GetString("bind")
+		token, _ := cmd.Flags().GetString("token")
+
+		if port == 8765 && r.Cfg.Serve.Port != 0 {
+			port = r.Cfg.Serve.Port
+		}
+		if token == "" {
+			token = r.Cfg.Serve.Token
+		}
+		if token == "" {
+			fmt.Println("warning: no token configured — server accepts unauthenticated requests")
+		}
+
+		srv := syncpkg.NewServer(r, token)
+		addr := fmt.Sprintf("%s:%d", bindAddr, port)
+		fmt.Printf("arche serve: listening on http://%s\n", addr)
+		return http.ListenAndServe(addr, srv.Handler())
+	},
+}
+
+func init() {
+	serveCmd.Flags().Int("port", 8765, "port to listen on")
+	serveCmd.Flags().String("bind", "localhost", "address to bind")
+	serveCmd.Flags().String("token", "", "bearer token (overrides config)")
+}

internal/cli/cmd_snap.go [A]
--- /dev/null
+++ b/internal/cli/cmd_snap.go
@@ -1,0 +1,174 @@
+package cli
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strings"
+
+	"arche/internal/gitcompat"
+	"arche/internal/tui"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var (
+	snapInteractive bool
+	snapSign        bool
+	snapSignKey     string
+	snapAmend       bool
+)
+
+var snapCmd = &cobra.Command{
+	Use:     "snap [message]",
+	Aliases: []string{"commit"},
+	Short:   "Finalise the working copy draft into a named commit",
+	Long: `Snapshot the current working directory into the draft commit (optionally
+setting a message), finalise it, and create a new empty draft as the next
+HEAD. The snapped commit keeps its change ID; only the content hash changes
+if files were modified since the last snap.
+
+With --interactive (-i) you are shown each hunk and asked whether to include
+it in this snap.  Unselected hunks remain as working-copy changes in the new
+draft.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		msg := strings.Join(args, " ")
+		if msg == "" {
+			msg = promptMessage()
+		}
+		if msg == "" {
+			return fmt.Errorf("aborting snap: empty commit message")
+		}
+
+		w := wc.New(r)
+
+		if snapSign || r.Cfg.Sign.Auto {
+			w.SignKey = r.Cfg.Sign.KeyFile
+			if snapSignKey != "" {
+				w.SignKey = snapSignKey
+			}
+		}
+
+		if snapAmend {
+			amended, amendedID, err := w.Amend(msg)
+			if err != nil {
+				return err
+			}
+			signedLabel := ""
+			if amended.CommitSig != nil {
+				signedLabel = " [signed]"
+			}
+			fmt.Printf("Amended %s - %s%s\n", "ch:"+amended.ChangeID, amended.Message, signedLabel)
+			fmt.Printf("  %x\n", amendedID[:8])
+			return nil
+		}
+
+		if snapInteractive {
+			diffs, err := w.ComputeWorkingDiffs()
+			if err != nil {
+				return err
+			}
+			if len(diffs) == 0 {
+				return fmt.Errorf("nothing to snap: working copy is clean")
+			}
+
+			var items []tui.HunkItem
+			for _, fhd := range diffs {
+				for hi, h := range fhd.Hunks {
+					items = append(items, tui.HunkItem{
+						FilePath:         fhd.Path,
+						HunkIdx:          hi,
+						TotalHunksInFile: len(fhd.Hunks),
+						Hunk:             h,
+					})
+				}
+			}
+
+			sel, err := tui.RunHunkSelector(items, "include in snap")
+			if err != nil {
+				return err
+			}
+			if sel.Cancelled {
+				fmt.Fprintln(os.Stderr, "Interactive snap cancelled.")
+				return nil
+			}
+
+			perFile := make(map[string][]bool)
+			idx := 0
+			for _, fhd := range diffs {
+				n := len(fhd.Hunks)
+				perFile[fhd.Path] = sel.Selected[idx : idx+n]
+				idx += n
+			}
+
+			snapped, snappedID, err := w.SnapSelectedHunks(msg, diffs, perFile)
+			if err != nil {
+				return err
+			}
+
+			signedLabelI := ""
+			if snapped.CommitSig != nil {
+				signedLabelI = " [signed]"
+			}
+			fmt.Printf("Snapped %s - %s%s\n", "ch:"+snapped.ChangeID, snapped.Message, signedLabelI)
+			fmt.Printf("  %x\n", snappedID[:8])
+
+			if r.Cfg.Git.Enabled {
+				gitHash, err := gitcompat.MirrorCommit(r.Root, r, snappedID)
+				if err != nil {
+					fmt.Fprintf(os.Stderr, "arche: git mirror failed: %v\n", err)
+				} else if gitHash != "" {
+					fmt.Printf("  git: %s\n", gitHash[:8])
+				}
+			}
+
+			head, _ := r.Head()
+			fmt.Printf("Working copy now at %s (draft)\n", head)
+			return nil
+		}
+
+		snapped, snappedID, err := w.Snap(msg)
+		if err != nil {
+			return err
+		}
+
+		signedLabel := ""
+		if snapped.CommitSig != nil {
+			signedLabel = " [signed]"
+		}
+
+		fmt.Printf("Snapped %s - %s%s\n", "ch:"+snapped.ChangeID, snapped.Message, signedLabel)
+		fmt.Printf("  %x\n", snappedID[:8])
+
+		if r.Cfg.Git.Enabled {
+			gitHash, err := gitcompat.MirrorCommit(r.Root, r, snappedID)
+			if err != nil {
+				fmt.Fprintf(os.Stderr, "arche: git mirror failed: %v\n", err)
+			} else if gitHash != "" {
+				fmt.Printf("  git: %s\n", gitHash[:8])
+			}
+		}
+
+		head, _ := r.Head()
+		fmt.Printf("Working copy now at %s (draft, empty)\n", head)
+		return nil
+	},
+}
+
+func promptMessage() string {
+	fmt.Print("Commit message: ")
+	sc := bufio.NewScanner(os.Stdin)
+	sc.Scan()
+	return strings.TrimSpace(sc.Text())
+}
+
+func init() {
+	snapCmd.Flags().BoolVarP(&snapInteractive, "interactive", "i", false, "interactively select hunks to include in this snap")
+	snapCmd.Flags().BoolVar(&snapSign, "sign", false, "sign the commit with your SSH key")
+	snapCmd.Flags().StringVar(&snapSignKey, "key", "", "path to SSH private key to use for signing (default: auto-detect)")
+	snapCmd.Flags().BoolVar(&snapAmend, "amend", false, "amend the current commit in-place and auto-rebase downstream draft dependents")
+}

internal/cli/cmd_split.go [A]
--- /dev/null
+++ b/internal/cli/cmd_split.go
@@ -1,0 +1,161 @@
+package cli
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strings"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/tui"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var splitCmd = &cobra.Command{
+	Use:   "split",
+	Short: "Split the current working-copy changes into two commits using interactive hunk selection",
+	Long: `Interactively select which hunks belong to the first commit; the remaining
+changes become a second draft commit chained on top.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		w := wc.New(r)
+
+		head, headID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+
+		before, _ := r.CaptureRefState()
+
+		diffs, err := w.ComputeWorkingDiffs()
+		if err != nil {
+			return err
+		}
+		if len(diffs) == 0 {
+			fmt.Println("Nothing to split: working copy has no changes.")
+			return nil
+		}
+
+		var items []tui.HunkItem
+		for _, fhd := range diffs {
+			for hi, h := range fhd.Hunks {
+				items = append(items, tui.HunkItem{
+					FilePath:         fhd.Path,
+					HunkIdx:          hi,
+					TotalHunksInFile: len(fhd.Hunks),
+					Hunk:             h,
+				})
+			}
+		}
+
+		sel, err := tui.RunHunkSelector(items, "include in first commit")
+		if err != nil {
+			return err
+		}
+		if sel.Cancelled {
+			fmt.Fprintln(os.Stderr, "Split cancelled.")
+			return nil
+		}
+
+		perFile := make(map[string][]bool)
+		idx := 0
+		for _, fhd := range diffs {
+			n := len(fhd.Hunks)
+			perFile[fhd.Path] = sel.Selected[idx : idx+n]
+			idx += n
+		}
+
+		anyFirst := false
+		for _, bs := range perFile {
+			for _, b := range bs {
+				if b {
+					anyFirst = true
+					break
+				}
+			}
+		}
+		if !anyFirst {
+			return fmt.Errorf("no hunks selected for first commit – split aborted")
+		}
+
+		sc := bufio.NewScanner(os.Stdin)
+
+		msg1 := strings.TrimSpace(head.Message)
+		if msg1 == "" {
+			msg1 = "change (1/2)"
+		} else {
+			msg1 += " (1/2)"
+		}
+		fmt.Printf("Message for first commit [%s]: ", msg1)
+		sc.Scan()
+		if t := strings.TrimSpace(sc.Text()); t != "" {
+			msg1 = t
+		}
+
+		msg2 := strings.TrimSpace(head.Message)
+		if msg2 == "" {
+			msg2 = "change (2/2)"
+		} else {
+			msg2 += " (2/2)"
+		}
+		fmt.Printf("Message for second commit [%s]: ", msg2)
+		sc.Scan()
+		if t := strings.TrimSpace(sc.Text()); t != "" {
+			msg2 = t
+		}
+
+		c1, c1ID, err := w.SnapFirstOfSplit(msg1, diffs, perFile)
+		if err != nil {
+			return fmt.Errorf("snap first commit: %w", err)
+		}
+
+		c2, c2ID, err := w.SnapRemaining(msg2, c1ID)
+		if err != nil {
+			return fmt.Errorf("snap second commit: %w", err)
+		}
+
+		now := time.Now()
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		obs := &object.ObsoleteMarker{
+			Predecessor: headID,
+			Successors:  [][32]byte{c1ID, c2ID},
+			Reason:      "split",
+		}
+		if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		after := fmt.Sprintf(`{"head":%q,"tip1":%q,"tip2":%q}`,
+			object.FormatChangeID(c2.ChangeID),
+			fmt.Sprintf("%x", c1ID),
+			fmt.Sprintf("%x", c2ID),
+		)
+		op := store.Operation{
+			Kind: "split", Timestamp: now.Unix(), Before: before, After: after,
+			Metadata: "split into 2 commits",
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		fmt.Printf("Split into:\n  ch:%s  %s\n  ch:%s  %s\n",
+			c1.ChangeID, c1.Message,
+			c2.ChangeID, c2.Message,
+		)
+		return nil
+	},
+}

internal/cli/cmd_squash.go [A]
--- /dev/null
+++ b/internal/cli/cmd_squash.go
@@ -1,0 +1,230 @@
+package cli
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var squashCmd = &cobra.Command{
+	Use:   "squash <id1>..<id2>",
+	Short: "Collapse a range of commits into one",
+	Long: `Collapse all commits from <id1> to <id2> (inclusive, linear range) into a
+single new commit.  The new commit:
+
+  - Has the tree of <id2> (the final state).
+  - Has the parents of <id1> (the commit before the range starts).
+  - Gets a new change ID.
+  - Combines the messages of all squashed commits.
+
+The squashed commits are each marked obsolete.  If HEAD is inside the
+squashed range, HEAD is moved to the new commit and the working copy is
+materialized from <id2>'s tree.`,
+	Args: cobra.ExactArgs(1),
+	RunE: squashRunE,
+}
+
+func squashRunE(cmd *cobra.Command, args []string) error {
+	r := openRepo()
+	defer r.Close()
+
+	id1, id2, err := parseDotDotRange(r, args[0])
+	if err != nil {
+		return err
+	}
+
+	chain, err := collectRangeInclusive(r, id1, id2)
+	if err != nil {
+		return err
+	}
+	if len(chain) == 1 {
+		fmt.Println("Range contains only one commit — nothing to squash.")
+		return nil
+	}
+
+	if !squashForceRewrite {
+		for _, cid := range chain {
+			c, err := r.ReadCommit(cid)
+			if err != nil {
+				return fmt.Errorf("read commit %x: %w", cid[:6], err)
+			}
+			if c.Phase == object.PhasePublic {
+				return fmt.Errorf("commit ch:%s is public; use --force-rewrite to rewrite history", c.ChangeID)
+			}
+		}
+	}
+
+	first, err := r.ReadCommit(chain[0])
+	if err != nil {
+		return err
+	}
+	last, err := r.ReadCommit(chain[len(chain)-1])
+	if err != nil {
+		return err
+	}
+
+	var msgs []string
+	for _, cid := range chain {
+		c, err := r.ReadCommit(cid)
+		if err != nil {
+			return err
+		}
+		if m := strings.TrimSpace(c.Message); m != "" {
+			msgs = append(msgs, m)
+		}
+	}
+	combined := strings.Join(msgs, "\n\n")
+	if combined == "" {
+		combined = "squash"
+	}
+
+	before, _ := r.CaptureRefState()
+	now := time.Now()
+	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+
+	newCID, err := r.Store.AllocChangeID(tx)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	squashed := &object.Commit{
+		TreeID:    last.TreeID,
+		Parents:   first.Parents,
+		ChangeID:  newCID,
+		Author:    first.Author,
+		Committer: sig,
+		Message:   combined,
+		Phase:     first.Phase,
+	}
+	if first.Author.Timestamp.IsZero() {
+		squashed.Author = sig
+	}
+
+	newID, err := repo.WriteCommitTx(r.Store, tx, squashed)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+	if err := r.Store.SetChangeCommit(tx, newCID, newID); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	for _, oldID := range chain {
+		obs := &object.ObsoleteMarker{
+			Predecessor: oldID,
+			Successors:  [][32]byte{newID},
+			Reason:      "squash",
+		}
+		if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+	}
+
+	after := fmt.Sprintf(`{"head":%q,"tip":%q}`,
+		object.FormatChangeID(newCID), fmt.Sprintf("%x", newID))
+	op := store.Operation{
+		Kind:      "squash",
+		Timestamp: now.Unix(),
+		Before:    before,
+		After:     after,
+		Metadata:  fmt.Sprintf("squashed %d commits", len(chain)),
+	}
+	if _, err := r.Store.InsertOperation(tx, op); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return err
+	}
+
+	_, headID, headErr := r.HeadCommit()
+	if headErr == nil {
+		for _, cid := range chain {
+			if cid == headID {
+				w := wc.New(r)
+				if err := w.Materialize(squashed.TreeID, object.FormatChangeID(newCID)); err != nil {
+					return err
+				}
+				if err := r.WriteHead(object.FormatChangeID(newCID)); err != nil {
+					return err
+				}
+				break
+			}
+		}
+	}
+
+	fmt.Printf("Squashed %d commits → ch:%s\n  %s\n",
+		len(chain), newCID, squashFirstLine(combined))
+	return nil
+}
+
+func parseDotDotRange(r *repo.Repo, s string) ([32]byte, [32]byte, error) {
+	parts := strings.SplitN(s, "..", 2)
+	if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
+		return object.ZeroID, object.ZeroID,
+			fmt.Errorf("squash range must be <id1>..<id2>, got %q", s)
+	}
+	id1, err := resolveRef(r, parts[0])
+	if err != nil {
+		return object.ZeroID, object.ZeroID, fmt.Errorf("resolve %q: %w", parts[0], err)
+	}
+	id2, err := resolveRef(r, parts[1])
+	if err != nil {
+		return object.ZeroID, object.ZeroID, fmt.Errorf("resolve %q: %w", parts[1], err)
+	}
+	return id1, id2, nil
+}
+
+func collectRangeInclusive(r *repo.Repo, id1, id2 [32]byte) ([][32]byte, error) {
+	var chain [][32]byte
+	cur := id2
+	for {
+		chain = append(chain, cur)
+		if cur == id1 {
+			break
+		}
+		c, err := r.ReadCommit(cur)
+		if err != nil {
+			return nil, fmt.Errorf("read commit %x: %w", cur[:4], err)
+		}
+		if len(c.Parents) == 0 {
+			return nil, fmt.Errorf("reached root without finding start commit — id1 must be an ancestor of id2")
+		}
+		if len(c.Parents) > 1 {
+			return nil, fmt.Errorf("merge commit %x in range — squash only works on linear chains", cur[:4])
+		}
+		cur = c.Parents[0]
+	}
+	for i, j := 0, len(chain)-1; i < j; i, j = i+1, j-1 {
+		chain[i], chain[j] = chain[j], chain[i]
+	}
+	return chain, nil
+}
+
+var squashForceRewrite bool
+
+func init() {
+	squashCmd.Flags().BoolVar(&squashForceRewrite, "force-rewrite", false, "allow rewriting public commits")
+}
+
+func squashFirstLine(s string) string {
+	if idx := strings.IndexByte(s, '\n'); idx >= 0 {
+		return s[:idx]
+	}
+	return s
+}

internal/cli/cmd_stack.go [A]
--- /dev/null
+++ b/internal/cli/cmd_stack.go
@@ -1,0 +1,132 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/syncpkg"
+
+	"github.com/spf13/cobra"
+)
+
+var stackCmd = &cobra.Command{
+	Use:   "stack",
+	Short: "Manage stacked changes",
+}
+
+var stackPushCmd = &cobra.Command{
+	Use:   "push [remote]",
+	Short: "Publish the draft stack as per-change bookmarks for review",
+	Long: `arche stack push publishes the chain of draft commits from the current HEAD
+back to the nearest public ancestor as individual bookmarks on the remote.
+
+Each draft change gets a bookmark named stack/<change-id-prefix>, grouped as a
+reviewable unit on the forge. When you amend a change and run stack push again
+all affected downstream bookmarks are updated in a single operation.
+
+Only draft commits are included. The first public (or secret) ancestor marks
+the base of the stack.`,
+	Args: cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		remoteName := "origin"
+		if len(args) == 1 {
+			remoteName = args[0]
+		}
+
+		rc := findRemote(r.Cfg.Remotes, remoteName)
+		if rc == nil {
+			return fmt.Errorf("no remote named %q; add one to .arche/config.toml:\n\n  [[remote]]\n  name  = %q\n  url   = \"http://host:8765\"\n  token = \"secret\"", remoteName, remoteName)
+		}
+
+		chain, err := collectDraftChain(r)
+		if err != nil {
+			return err
+		}
+		if len(chain) == 0 {
+			return fmt.Errorf("no draft commits in stack — HEAD is already public")
+		}
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+
+		var bmNames []string
+		for _, e := range chain {
+			name := "stack/" + e.commit.ChangeID[:8]
+			bm := store.Bookmark{Name: name, CommitID: e.commitID}
+			if setErr := r.Store.SetBookmark(tx, bm); setErr != nil {
+				r.Store.Rollback(tx)
+				return fmt.Errorf("set bookmark %q: %w", name, setErr)
+			}
+			bmNames = append(bmNames, name)
+		}
+
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		fmt.Printf("Pushing stack of %d change(s) to %s (%s):\n\n", len(chain), remoteName, rc.URL)
+		for i, e := range chain {
+			name := bmNames[i]
+			fmt.Printf("  ch:%s → %s — %s\n", e.commit.ChangeID, name, bisectFirstLine(e.commit.Message))
+		}
+		fmt.Println()
+
+		client := syncpkg.NewClient(r, rc.URL, rc.Token)
+		pushOpts := syncpkg.PushOptions{}
+		if err := client.PushWith(pushOpts); err != nil {
+			fmt.Fprintf(os.Stderr, "arche stack push: push failed: %v\n", err)
+			return err
+		}
+
+		fmt.Printf("Stack pushed: %s\n", strings.Join(bmNames, ", "))
+		return nil
+	},
+}
+
+type draftEntry struct {
+	commitID [32]byte
+	commit   *object.Commit
+}
+
+func collectDraftChain(r *repo.Repo) ([]draftEntry, error) {
+	_, headID, err := r.HeadCommit()
+	if err != nil {
+		return nil, err
+	}
+
+	var chain []draftEntry
+	cur := headID
+	for {
+		c, err := r.ReadCommit(cur)
+		if err != nil {
+			break
+		}
+		phase, _ := r.Store.GetPhase(cur)
+		if phase != object.PhaseDraft {
+			break
+		}
+		chain = append(chain, draftEntry{commitID: cur, commit: c})
+		if len(c.Parents) == 0 {
+			break
+		}
+		cur = c.Parents[0]
+	}
+
+	for i, j := 0, len(chain)-1; i < j; i, j = i+1, j-1 {
+		chain[i], chain[j] = chain[j], chain[i]
+	}
+	return chain, nil
+}
+
+func init() {
+	stackCmd.AddCommand(stackPushCmd)
+}

internal/cli/cmd_status.go [A]
--- /dev/null
+++ b/internal/cli/cmd_status.go
@@ -1,0 +1,131 @@
+package cli
+
+import (
+	"fmt"
+
+	"arche/internal/issuedb"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var statusCmd = &cobra.Command{
+	Use:     "status",
+	Aliases: []string{"st"},
+	Short:   "Show working copy state, conflicts, and current change",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		head, err := r.Head()
+		if err != nil {
+			return err
+		}
+
+		headCommit, commitID, err := r.HeadCommit()
+		if err != nil {
+			return err
+		}
+
+		phase, _ := r.Store.GetPhase(commitID)
+
+		w := wc.New(r)
+		changes, err := w.Status()
+		if err != nil {
+			return err
+		}
+		conflicts, _ := r.Store.ListConflicts()
+
+		empty := ""
+		if len(changes) == 0 && len(conflicts) == 0 {
+			empty = ", empty"
+		}
+		fmt.Printf("Working copy: %s (%s%s)\n", head, phase, empty)
+		fmt.Printf("Commit:       %x  %s\n", commitID[:8], headCommit.Message)
+
+		bms, _ := r.Store.ListBookmarks()
+		var bmNames []string
+		for _, bm := range bms {
+			if bm.CommitID == commitID {
+				bmNames = append(bmNames, bm.Name)
+			}
+		}
+		if len(bmNames) > 0 {
+			fmt.Printf("Bookmarks:    %v\n", bmNames)
+		} else if len(bms) == 0 {
+			fmt.Println("No bookmark. Run arche snap to record your first commit.")
+		} else {
+			fmt.Println("No bookmark points here. Create one with: arche bookmark set <name>")
+		}
+		fmt.Println()
+
+		conflictSet := make(map[string]bool, len(conflicts))
+		for _, p := range conflicts {
+			conflictSet[p] = true
+		}
+
+		if len(changes) == 0 && len(conflicts) == 0 {
+			fmt.Println("Nothing changed (working copy matches commit).")
+			return nil
+		}
+
+		for _, f := range changes {
+			var label string
+			if conflictSet[f.Path] {
+				label = "conflict"
+			} else {
+				switch f.Status {
+				case 'M':
+					label = "modified"
+				case 'A':
+					label = "added   "
+				case 'D':
+					label = "deleted "
+				default:
+					label = "unknown "
+				}
+			}
+			fmt.Printf("  %s  %s\n", label, f.Path)
+		}
+
+		for _, p := range conflicts {
+			if !func() bool {
+				for _, f := range changes {
+					if f.Path == p {
+						return true
+					}
+				}
+				return false
+			}() {
+				fmt.Printf("  conflict  %s\n", p)
+			}
+		}
+		if len(conflicts) > 0 {
+			fmt.Printf("\n%d unresolved conflict(s). Run 'arche resolve <path>' after editing.\n", len(conflicts))
+		}
+
+		idb, err := issuedb.Open(r.ArcheDir())
+		if err == nil {
+			defer idb.Close()
+			stubs, err := idb.Issues.ListIssues()
+			if err == nil {
+				var bodyConflicts int
+				for _, st := range stubs {
+					issue, err := idb.Issues.GetIssue(st.ID)
+					if err == nil && issue.BodyConflict != nil {
+						if bodyConflicts == 0 {
+							fmt.Println()
+						}
+						fmt.Printf("  issue conflict  %s  %s\n", st.ID[:8], st.Title)
+						bodyConflicts++
+					}
+				}
+				if bodyConflicts > 0 {
+					fmt.Printf("%d issue body conflict(s). Resolve via 'arche ui'.\n", bodyConflicts)
+				}
+			}
+		}
+
+		return nil
+	},
+}

internal/cli/cmd_sync.go [A]
--- /dev/null
+++ b/internal/cli/cmd_sync.go
@@ -1,0 +1,115 @@
+package cli
+
+import (
+	"fmt"
+
+	"arche/internal/gitcompat"
+	"arche/internal/repo"
+	"arche/internal/syncpkg"
+
+	"github.com/spf13/cobra"
+)
+
+var syncCmd = &cobra.Command{
+	Use:   "sync [remote]",
+	Short: "Synchronise with a remote repository",
+	Long: `Synchronise this repository with a named remote (default: "origin").
+By default, performs a pull followed by a push.
+
+  arche sync           - pull then push from/to "origin"
+  arche sync upstream  - pull then push from/to the remote named "upstream"
+  arche sync --pull    - pull only
+  arche sync --push    - push only
+
+Remote URLs and tokens are stored in the repository configuration under
+[[remote]] sections, or can be specified directly with --url and --token.`,
+	Args: cobra.MaximumNArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		remoteName := "origin"
+		if len(args) == 1 {
+			remoteName = args[0]
+		}
+
+		pullOnly, _ := cmd.Flags().GetBool("pull")
+		pushOnly, _ := cmd.Flags().GetBool("push")
+		urlOverride, _ := cmd.Flags().GetString("url")
+		tokenOverride, _ := cmd.Flags().GetString("token")
+		force, _ := cmd.Flags().GetBool("force")
+		forcePublic, _ := cmd.Flags().GetBool("force-public")
+
+		if pullOnly && pushOnly {
+			return fmt.Errorf("--pull and --push are mutually exclusive")
+		}
+
+		url := urlOverride
+		token := tokenOverride
+		if url == "" {
+			rc := findRemote(r.Cfg.Remotes, remoteName)
+			if rc == nil {
+				return fmt.Errorf("no remote named %q; add one to .arche/config.toml:\n\n  [[remote]]\n  name  = %q\n  url   = \"http://host:8765\"\n  token = \"secret\"", remoteName, remoteName)
+			}
+			url = rc.URL
+			if token == "" {
+				token = rc.Token
+			}
+		}
+
+		client := syncpkg.NewClient(r, url, token)
+
+		if !pushOnly {
+			fmt.Printf("arche sync: pulling from %s …\n", url)
+			if err := client.Pull(); err != nil {
+				return fmt.Errorf("pull: %w", err)
+			}
+			fmt.Printf("arche sync: pull complete\n")
+		}
+
+		if !pullOnly {
+			fmt.Printf("arche sync: pushing to %s …\n", url)
+			if err := client.PushWith(syncpkg.PushOptions{Force: force, ForcePublic: forcePublic}); err != nil {
+				return fmt.Errorf("push: %w", err)
+			}
+			fmt.Printf("arche sync: push complete\n")
+		}
+
+		if r.Cfg.Git.Enabled {
+			gitRemote := r.Cfg.Git.Remote
+			if gitRemote == "" {
+				gitRemote = "origin"
+			}
+			if !pushOnly {
+				if err := gitcompat.SyncPull(r.Root, gitRemote); err != nil {
+					fmt.Printf("arche sync: git pull warning: %v\n", err)
+				}
+			}
+			if !pullOnly {
+				if err := gitcompat.SyncPush(r.Root, gitRemote); err != nil {
+					fmt.Printf("arche sync: git push warning: %v\n", err)
+				}
+			}
+		}
+
+		return nil
+	},
+}
+
+func findRemote(remotes []repo.RemoteConfig, name string) *repo.RemoteConfig {
+	for i := range remotes {
+		if remotes[i].Name == name {
+			return &remotes[i]
+		}
+	}
+	return nil
+}
+
+func init() {
+	syncCmd.Flags().Bool("pull", false, "pull from remote only (no push)")
+	syncCmd.Flags().Bool("push", false, "push to remote only (no pull)")
+	syncCmd.Flags().String("url", "", "remote URL (overrides config)")
+	syncCmd.Flags().String("token", "", "bearer token (overrides config)")
+	syncCmd.Flags().Bool("force", false, "force-push draft bookmarks, skipping fast-forward check")
+	syncCmd.Flags().Bool("force-public", false, "force-push even public bookmarks (writes obsolescence marker on server)")
+}

internal/cli/cmd_ui.go [A]
--- /dev/null
+++ b/internal/cli/cmd_ui.go
@@ -1,0 +1,44 @@
+package cli
+
+import (
+	"fmt"
+
+	"arche/internal/ui"
+
+	"github.com/spf13/cobra"
+)
+
+const defaultUIPort = 7070
+
+var uiCmd = &cobra.Command{
+	Use:   "ui",
+	Short: "Launch the local web UI",
+	Long: `Start a local HTTP server serving the Arche web interface.
+
+The interface shows the commit log, file tree, working copy status,
+and bookmarks. The port defaults to 7070 and can be overridden via
+--port or the ui.port setting in .arche/config.toml.
+
+  arche ui               - start on default port (7070)
+  arche ui --port 8080   - start on port 8080`,
+	Args: cobra.NoArgs,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		port, _ := cmd.Flags().GetInt("port")
+		if port == 0 {
+			port = r.Cfg.UI.Port
+		}
+		if port == 0 {
+			port = defaultUIPort
+		}
+
+		fmt.Printf("arche ui: open http://localhost:%d in your browser\n", port)
+		return ui.Serve(r, port)
+	},
+}
+
+func init() {
+	uiCmd.Flags().Int("port", 0, "port to listen on (default: ui.port from config, or 7070)")
+}

internal/cli/cmd_undo.go [A]
--- /dev/null
+++ b/internal/cli/cmd_undo.go
@@ -1,0 +1,134 @@
+package cli
+
+import (
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"os"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var undoStep int
+
+var undoCmd = &cobra.Command{
+	Use:   "undo [--step N]",
+	Short: "Undo the last N operations",
+	Long: `Revert the repository to the state it was in before the last N operations.
+The undo itself is recorded in the operation log, so it is always undoable.
+Use 'arche op log' to see what operations can be undone.`,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		ops, err := r.Store.ListOperations(undoStep + 1)
+		if err != nil {
+			return err
+		}
+		if len(ops) == 0 {
+			fmt.Println("Nothing to undo.")
+			return nil
+		}
+
+		var target *store.Operation
+		nonUndoCount := 0
+		for i := range ops {
+			if ops[i].Kind == "undo" {
+				continue
+			}
+			nonUndoCount++
+			if nonUndoCount == undoStep {
+				target = &ops[i]
+				break
+			}
+		}
+		if target == nil && len(ops) >= undoStep {
+			target = &ops[undoStep-1]
+		}
+		if target == nil {
+			fmt.Println("Not enough operations to undo.")
+			return nil
+		}
+
+		var beforeState repo.RefState
+		if err := json.Unmarshal([]byte(target.Before), &beforeState); err != nil {
+			return fmt.Errorf("parse before state: %w", err)
+		}
+
+		if beforeState.Head != "" {
+			if err := r.WriteHead(beforeState.Head); err != nil {
+				return fmt.Errorf("restore HEAD: %w", err)
+			}
+		}
+
+		after, _ := r.CaptureRefState()
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+
+		currentBMs, _ := r.Store.ListBookmarks()
+		for _, bm := range currentBMs {
+			if _, exists := beforeState.Bookmarks[bm.Name]; !exists {
+				if err := r.Store.DeleteBookmark(tx, bm.Name); err != nil {
+					r.Store.Rollback(tx)
+					return fmt.Errorf("delete bookmark %s: %w", bm.Name, err)
+				}
+			}
+		}
+		for name, hexID := range beforeState.Bookmarks {
+			raw, err := hex.DecodeString(hexID)
+			if err != nil || len(raw) != 32 {
+				r.Store.Rollback(tx)
+				return fmt.Errorf("invalid bookmark ID for %s: %s", name, hexID)
+			}
+			var id [32]byte
+			copy(id[:], raw)
+			if err := r.Store.SetBookmark(tx, store.Bookmark{Name: name, CommitID: id}); err != nil {
+				r.Store.Rollback(tx)
+				return fmt.Errorf("restore bookmark %s: %w", name, err)
+			}
+		}
+
+		op := store.Operation{
+			Kind:      "undo",
+			Timestamp: time.Now().Unix(),
+			Before:    after,
+			After:     target.Before,
+			Metadata:  fmt.Sprintf(`{"undid":%d}`, target.Seq),
+		}
+		if _, err := r.Store.InsertOperation(tx, op); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		if beforeState.Head != "" {
+			bare := object.StripChangeIDPrefix(beforeState.Head)
+			if commitID, gcErr := r.Store.GetChangeCommit(bare); gcErr == nil {
+				if c, rcErr := r.ReadCommit(commitID); rcErr == nil {
+					if matErr := wc.New(r).MaterializeQuiet(c.TreeID); matErr != nil {
+						fmt.Fprintf(os.Stderr, "warning: could not materialize working copy: %v\n", matErr)
+					}
+				}
+			}
+		}
+
+		fmt.Printf("Undid operation #%d (%s  %s)\n",
+			target.Seq, target.Kind,
+			time.Unix(target.Timestamp, 0).Format("2006-01-02 15:04:05"))
+		return nil
+	},
+}
+
+func init() {
+	undoCmd.Flags().IntVar(&undoStep, "step", 1, "number of operations to undo")
+}

internal/cli/cmd_watch.go [A]
--- /dev/null
+++ b/internal/cli/cmd_watch.go
@@ -1,0 +1,77 @@
+package cli
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"os/signal"
+	"syscall"
+
+	"arche/internal/repo"
+	"arche/internal/watcher"
+	"arche/internal/wc"
+
+	"github.com/spf13/cobra"
+)
+
+var watchCmd = &cobra.Command{
+	Use:   "watch",
+	Short: "Watch working tree for changes to accelerate snap and status",
+	Long: `arche watch starts a filesystem event watcher that tracks which files
+have changed since the last snapshot. When the watcher is active, arche snap
+and arche status skip re-statting and re-hashing unmodified files, measurably
+improving performance on large working trees.
+
+The watcher seeds the dirty set with any files already modified before it
+starts, so the first arche snap after launch is still fully correct.
+
+Only one watcher per repository. Stop the watcher with Ctrl-C or SIGTERM.`,
+	RunE: runWatch,
+}
+
+func runWatch(cmd *cobra.Command, args []string) error {
+	wd, err := os.Getwd()
+	if err != nil {
+		return err
+	}
+	root, err := repo.FindRoot(wd)
+	if err != nil || root == "" {
+		return fmt.Errorf("not inside an arche repository")
+	}
+	r, err := repo.Open(root)
+	if err != nil {
+		return err
+	}
+	arch := r.ArcheDir()
+	workRoot := r.Root
+
+	if watcher.IsActive(arch) {
+		r.Close()
+		return fmt.Errorf("watcher is already running for this repository")
+	}
+
+	w := wc.New(r)
+	statuses, err := w.Status()
+	if err != nil {
+		r.Close()
+		return fmt.Errorf("status: %w", err)
+	}
+	for _, s := range statuses {
+		if markErr := r.Store.MarkWCacheDirty(s.Path); markErr != nil {
+			fmt.Fprintf(os.Stderr, "arche watch: seed dirty %q: %v\n", s.Path, markErr)
+		}
+	}
+
+	fmt.Fprintf(os.Stderr, "arche watch: watching %s (%d modified files at startup)\n", workRoot, len(statuses))
+	fmt.Fprintln(os.Stderr, "arche watch: tracking file events — stop with Ctrl-C")
+
+	ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
+	defer stop()
+
+	defer r.Close()
+	if err := watcher.Run(ctx, workRoot, arch, r.Store); err != nil {
+		return err
+	}
+	fmt.Fprintln(os.Stderr, "arche watch: stopped")
+	return nil
+}

internal/cli/cmd_wiki.go [A]
--- /dev/null
+++ b/internal/cli/cmd_wiki.go
@@ -1,0 +1,137 @@
+package cli
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"time"
+
+	"arche/internal/issuedb"
+
+	"github.com/spf13/cobra"
+)
+
+var wikiCmd = &cobra.Command{
+	Use:   "wiki",
+	Short: "Manage the embedded wiki",
+	Long: `Read and write pages in the repository's embedded wiki.
+
+  arche wiki list                 - list all wiki pages
+  arche wiki get <title>          - print a page's Markdown content
+  arche wiki set <title>          - create or update a page (reads from stdin)
+  arche wiki set <title> -f file  - create or update a page from a file`,
+}
+
+var wikiListCmd = &cobra.Command{
+	Use:   "list",
+	Short: "List all wiki pages",
+	Args:  cobra.NoArgs,
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		idb, err := issuedb.Open(r.ArcheDir())
+		if err != nil {
+			return fmt.Errorf("open issuedb: %w", err)
+		}
+		defer idb.Close()
+
+		pages, err := idb.Wiki.List()
+		if err != nil {
+			return err
+		}
+		if len(pages) == 0 {
+			fmt.Println("No wiki pages yet. Create one with: arche wiki set <title>")
+			return nil
+		}
+		for _, p := range pages {
+			updated := time.Unix(p.Updated, 0).Format("2006-01-02")
+			fmt.Printf("%-40s  %s  %s\n", p.Title, updated, p.Author)
+		}
+		return nil
+	},
+}
+
+var wikiGetCmd = &cobra.Command{
+	Use:   "get <title>",
+	Short: "Print a wiki page's Markdown content",
+	Args:  cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		idb, err := issuedb.Open(r.ArcheDir())
+		if err != nil {
+			return fmt.Errorf("open issuedb: %w", err)
+		}
+		defer idb.Close()
+
+		page, err := idb.Wiki.Get(args[0])
+		if err != nil {
+			return err
+		}
+		fmt.Print(page.Content)
+		if len(page.Content) > 0 && page.Content[len(page.Content)-1] != '\n' {
+			fmt.Println()
+		}
+		return nil
+	},
+}
+
+var wikiSetFile string
+
+var wikiSetCmd = &cobra.Command{
+	Use:   "set <title>",
+	Short: "Create or update a wiki page",
+	Long: `Create or update the wiki page with the given title.
+
+Content is read from stdin unless --file is given:
+
+  echo "# Hello" | arche wiki set "Getting Started"
+  arche wiki set "Getting Started" --file docs/getting-started.md`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		var content []byte
+		var err error
+		if wikiSetFile != "" {
+			content, err = os.ReadFile(wikiSetFile)
+			if err != nil {
+				return fmt.Errorf("read file: %w", err)
+			}
+		} else {
+			content, err = io.ReadAll(os.Stdin)
+			if err != nil {
+				return fmt.Errorf("read stdin: %w", err)
+			}
+		}
+		if len(content) == 0 {
+			return fmt.Errorf("page content is empty")
+		}
+
+		idb, err := issuedb.Open(r.ArcheDir())
+		if err != nil {
+			return fmt.Errorf("open issuedb: %w", err)
+		}
+		defer idb.Close()
+
+		author := r.Cfg.User.Name
+		if author == "" {
+			author = "unknown"
+		}
+		title := args[0]
+		if err := idb.Wiki.Set(title, string(content), author); err != nil {
+			return err
+		}
+		fmt.Printf("Wiki page %q saved.\n", title)
+		return nil
+	},
+}
+
+func init() {
+	wikiSetCmd.Flags().StringVarP(&wikiSetFile, "file", "f", "", "Read content from this file instead of stdin")
+
+	wikiCmd.AddCommand(wikiListCmd, wikiGetCmd, wikiSetCmd)
+}

internal/cli/cmd_worktree.go [A]
--- /dev/null
+++ b/internal/cli/cmd_worktree.go
@@ -1,0 +1,95 @@
+package cli
+
+import (
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+var worktreeCmd = &cobra.Command{
+	Use:   "worktree",
+	Short: "Manage multiple working trees sharing one store",
+	Long: `Multiple working trees let you check out different bookmarks into
+separate directories while sharing the same store.db and issue tracker.
+
+Each linked worktree has its own HEAD and working copy, but all read and write
+objects from the central .arche/ directory of the main repository.`,
+}
+
+var worktreeAddBookmark string
+
+var worktreeAddCmd = &cobra.Command{
+	Use:   "add <path> [--bookmark|-b <name>]",
+	Short: "Create a new linked worktree",
+	Long: `Creates a new working tree at <path> and materializes the tip of
+<bookmark> (or HEAD if no bookmark is given) into that directory.
+
+A .arche-wt sentinel file is written into <path> so that arche commands
+run from inside that directory automatically use the shared store.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		path := args[0]
+		if err := r.AddWorktree(path, worktreeAddBookmark); err != nil {
+			return err
+		}
+
+		fmt.Printf("Worktree created at %s\n", path)
+		if worktreeAddBookmark != "" {
+			fmt.Printf("  bookmark: %s\n", worktreeAddBookmark)
+		}
+		return nil
+	},
+}
+
+var worktreeListCmd = &cobra.Command{
+	Use:   "list",
+	Short: "List all linked worktrees",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		wts, err := r.ListWorktrees()
+		if err != nil {
+			return err
+		}
+
+		if len(wts) == 0 {
+			fmt.Println("No linked worktrees.")
+			return nil
+		}
+
+		for _, wt := range wts {
+			fmt.Printf("%-20s  %s\n", wt.Name, wt.Path)
+		}
+		return nil
+	},
+}
+
+var worktreeRemoveCmd = &cobra.Command{
+	Use:   "remove <name>",
+	Short: "Unregister a linked worktree",
+	Long: `Removes the worktree registration and its .arche-wt sentinel file.
+The working-copy files at the worktree path are left in place.`,
+	Args: cobra.ExactArgs(1),
+	RunE: func(cmd *cobra.Command, args []string) error {
+		r := openRepo()
+		defer r.Close()
+
+		name := args[0]
+		if err := r.RemoveWorktree(name); err != nil {
+			return err
+		}
+
+		fmt.Printf("Worktree %q removed.\n", name)
+		return nil
+	},
+}
+
+func init() {
+	worktreeAddCmd.Flags().StringVarP(&worktreeAddBookmark, "bookmark", "b", "",
+		"bookmark to check out into the new worktree (default: HEAD)")
+	worktreeCmd.AddCommand(worktreeAddCmd, worktreeListCmd, worktreeRemoveCmd)
+}

internal/cli/resolve.go [A]
--- /dev/null
+++ b/internal/cli/resolve.go
@@ -1,0 +1,175 @@
+package cli
+
+import (
+	"database/sql"
+	"encoding/hex"
+	"fmt"
+	"strings"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+)
+
+func resolveRef(r *repo.Repo, ref string) ([32]byte, error) {
+	if ref == "@" || ref == "" {
+		_, id, err := r.HeadCommit()
+		return id, err
+	}
+	if strings.HasPrefix(ref, "@-") {
+		n := 1
+		suffix := ref[2:]
+		if suffix != "" {
+			if _, err := fmt.Sscanf(suffix, "%d", &n); err != nil {
+				return object.ZeroID, fmt.Errorf("invalid relative ref %q", ref)
+			}
+		}
+		_, commitID, err := r.HeadCommit()
+		if err != nil {
+			return object.ZeroID, err
+		}
+		return nthAncestor(r, commitID, n)
+	}
+
+	if strings.HasPrefix(ref, "ch:") {
+		bare := ref[3:]
+		id, err := r.Store.GetChangeCommit(bare)
+		if err != nil {
+			if err == sql.ErrNoRows {
+				return object.ZeroID, fmt.Errorf("change ID %q not found", ref)
+			}
+			return object.ZeroID, err
+		}
+		return id, nil
+	}
+
+	if isChangeIDChars(ref) {
+		id, err := r.Store.GetChangeCommit(ref)
+		if err == nil {
+			return id, nil
+		}
+		if err != sql.ErrNoRows {
+			return object.ZeroID, err
+		}
+	}
+
+	if isHexString(ref) && len(ref) >= 4 {
+		id, err := resolveHashPrefix(r, ref)
+		if err == nil {
+			return id, nil
+		}
+	}
+
+	bm, err := r.Store.GetBookmark(ref)
+	if err == nil && bm != nil {
+		return bm.CommitID, nil
+	}
+
+	return object.ZeroID, fmt.Errorf("unknown commit reference %q", ref)
+}
+
+func nthAncestor(r *repo.Repo, id [32]byte, n int) ([32]byte, error) {
+	cur := id
+	for i := 0; i < n; i++ {
+		c, err := r.ReadCommit(cur)
+		if err != nil {
+			return object.ZeroID, err
+		}
+		if len(c.Parents) == 0 {
+			return object.ZeroID, fmt.Errorf("commit %s has no parent (went %d/%d)", object.Short(cur), i, n)
+		}
+		cur = c.Parents[0]
+	}
+	return cur, nil
+}
+
+func resolveHashPrefix(r *repo.Repo, prefix string) ([32]byte, error) {
+	p := prefix
+	if len(p)%2 != 0 {
+		p = p + "0"
+	}
+	lower, err := hex.DecodeString(p)
+	if err != nil {
+		return object.ZeroID, err
+	}
+
+	var found [32]byte
+	var count int
+
+	allChanges, err := listAllCommitIDs(r)
+	if err != nil {
+		return object.ZeroID, err
+	}
+	for _, id := range allChanges {
+		h := fmt.Sprintf("%x", id)
+		if strings.HasPrefix(h, prefix) {
+			found = id
+			count++
+		}
+	}
+	_ = lower
+	if count == 0 {
+		return object.ZeroID, fmt.Errorf("hash prefix %q not found", prefix)
+	}
+	if count > 1 {
+		return object.ZeroID, fmt.Errorf("ambiguous hash prefix %q matches %d commits", prefix, count)
+	}
+	return found, nil
+}
+
+func listAllCommitIDs(r *repo.Repo) ([][32]byte, error) {
+	bms, err := r.Store.ListBookmarks()
+	if err != nil {
+		return nil, err
+	}
+
+	seen := make(map[[32]byte]bool)
+	var queue [][32]byte
+
+	_, headID, err := r.HeadCommit()
+	if err == nil {
+		queue = append(queue, headID)
+	}
+	for _, bm := range bms {
+		queue = append(queue, bm.CommitID)
+	}
+
+	var all [][32]byte
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		if seen[id] {
+			continue
+		}
+		seen[id] = true
+		all = append(all, id)
+		c, err := r.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+		for _, p := range c.Parents {
+			if !seen[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+	return all, nil
+}
+
+func isHexString(s string) bool {
+	for _, c := range s {
+		if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
+			return false
+		}
+	}
+	return true
+}
+
+func isChangeIDChars(s string) bool {
+	const alpha = "abcdefghjkmnpqrstvwxyz"
+	for _, c := range s {
+		if !strings.ContainsRune(alpha, c) {
+			return false
+		}
+	}
+	return len(s) >= 4
+}

internal/cli/root.go [A]
--- /dev/null
+++ b/internal/cli/root.go
@@ -1,0 +1,79 @@
+package cli
+
+import (
+	"fmt"
+	"os"
+
+	"arche/internal/repo"
+
+	"github.com/spf13/cobra"
+)
+
+var version = "0.1.0"
+
+func isTerminal(f *os.File) bool {
+	fi, err := f.Stat()
+	if err != nil {
+		return false
+	}
+	return (fi.Mode() & os.ModeCharDevice) != 0
+}
+
+var Root = &cobra.Command{
+	Use:           "arche",
+	Short:         "Arche - a modern distributed version control system",
+	SilenceUsage:  true,
+	SilenceErrors: true,
+	Version:       version,
+}
+
+func init() {
+	Root.AddCommand(
+		initCmd,
+		snapCmd,
+		statusCmd,
+		diffCmd,
+		logCmd,
+		coCmd,
+		undoCmd,
+		opLogCmd,
+		bookmarkCmd,
+		mergeCmd,
+		rebaseCmd,
+		resolveCmd,
+		splitCmd,
+		foldCmd,
+		phaseCmd,
+		syncCmd,
+		uiCmd,
+		serveCmd,
+		hooksCmd,
+		cloneCmd,
+		gitImportCmd,
+		bundleCmd,
+		squashCmd,
+		gcCmd,
+		worktreeCmd,
+		watchCmd,
+		wikiCmd,
+		lockCmd,
+		bisectCmd,
+		explainCmd,
+		stackCmd,
+		grepCmd,
+	)
+}
+
+func openRepo() *repo.Repo {
+	wd, err := os.Getwd()
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "arche: %v\n", err)
+		os.Exit(1)
+	}
+	r, err := repo.Open(wd)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "arche: %v\n", err)
+		os.Exit(1)
+	}
+	return r
+}

internal/diff/diff.go [A]
--- /dev/null
+++ b/internal/diff/diff.go
@@ -1,0 +1,224 @@
+package diff
+
+import (
+	"fmt"
+	"strings"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+
+	"github.com/sergi/go-diff/diffmatchpatch"
+)
+
+type FileDiff struct {
+	Path   string
+	Status rune
+	Patch  string
+}
+
+func TreeDiff(r *repo.Repo, treeA, treeB [32]byte) ([]FileDiff, error) {
+	aFiles := make(map[string][32]byte)
+	bFiles := make(map[string][32]byte)
+
+	if treeA != object.ZeroID {
+		if err := flattenTree(r, treeA, "", aFiles); err != nil {
+			return nil, fmt.Errorf("diff tree A: %w", err)
+		}
+	}
+	if treeB != object.ZeroID {
+		if err := flattenTree(r, treeB, "", bFiles); err != nil {
+			return nil, fmt.Errorf("diff tree B: %w", err)
+		}
+	}
+
+	var out []FileDiff
+	dmp := diffmatchpatch.New()
+
+	for path, aID := range aFiles {
+		if _, ok := bFiles[path]; !ok {
+			content, _ := readBlob(r, aID)
+			out = append(out, FileDiff{
+				Path:   path,
+				Status: 'D',
+				Patch:  unifiedDiff(dmp, path, content, ""),
+			})
+		}
+	}
+
+	for path, bID := range bFiles {
+		if _, ok := aFiles[path]; !ok {
+			content, _ := readBlob(r, bID)
+			out = append(out, FileDiff{
+				Path:   path,
+				Status: 'A',
+				Patch:  unifiedDiff(dmp, path, "", content),
+			})
+		}
+	}
+
+	for path, aID := range aFiles {
+		bID, ok := bFiles[path]
+		if !ok {
+			continue
+		}
+		if aID == bID {
+			continue
+		}
+		aContent, _ := readBlob(r, aID)
+		bContent, _ := readBlob(r, bID)
+		if aContent == bContent {
+			continue
+		}
+		out = append(out, FileDiff{
+			Path:   path,
+			Status: 'M',
+			Patch:  unifiedDiff(dmp, path, aContent, bContent),
+		})
+	}
+
+	sortFileDiffs(out)
+	return out, nil
+}
+
+func CommitDiff(r *repo.Repo, commitID [32]byte) ([]FileDiff, error) {
+	c, err := r.ReadCommit(commitID)
+	if err != nil {
+		return nil, err
+	}
+	var parentTree [32]byte
+	if len(c.Parents) > 0 {
+		parent, err := r.ReadCommit(c.Parents[0])
+		if err != nil {
+			return nil, err
+		}
+		parentTree = parent.TreeID
+	}
+	return TreeDiff(r, parentTree, c.TreeID)
+}
+
+func FlattenTree(r *repo.Repo, treeID [32]byte, out map[string][32]byte) error {
+	return flattenTree(r, treeID, "", out)
+}
+
+func UnifiedDiff(path, before, after string) string {
+	dmp := diffmatchpatch.New()
+	return unifiedDiff(dmp, path, before, after)
+}
+
+func unifiedDiff(dmp *diffmatchpatch.DiffMatchPatch, path, before, after string) string {
+	if !isPrintable(before) || !isPrintable(after) {
+		if before == "" {
+			return fmt.Sprintf("--- /dev/null\n+++ b/%s\n(binary file added)\n", path)
+		}
+		if after == "" {
+			return fmt.Sprintf("--- a/%s\n+++ /dev/null\n(binary file deleted)\n", path)
+		}
+		return fmt.Sprintf("--- a/%s\n+++ b/%s\n(binary file modified)\n", path, path)
+	}
+
+	diffs := dmp.DiffMain(before, after, true)
+	dmp.DiffCleanupSemantic(diffs)
+
+	hasChange := false
+	for _, d := range diffs {
+		if d.Type != diffmatchpatch.DiffEqual {
+			hasChange = true
+			break
+		}
+	}
+	if !hasChange {
+		return ""
+	}
+
+	var sb strings.Builder
+	aPath, bPath := "a/"+path, "b/"+path
+	if before == "" {
+		aPath = "/dev/null"
+	}
+	if after == "" {
+		bPath = "/dev/null"
+	}
+	fmt.Fprintf(&sb, "--- %s\n+++ %s\n", aPath, bPath)
+	beforeLines := len(splitLines(before))
+	afterLines := len(splitLines(after))
+	fmt.Fprintf(&sb, "@@ -1,%d +1,%d @@\n", beforeLines, afterLines)
+
+	for _, diff := range diffs {
+		for _, line := range splitLines(diff.Text) {
+			switch diff.Type {
+			case diffmatchpatch.DiffEqual:
+				fmt.Fprintf(&sb, " %s\n", line)
+			case diffmatchpatch.DiffInsert:
+				fmt.Fprintf(&sb, "+%s\n", line)
+			case diffmatchpatch.DiffDelete:
+				fmt.Fprintf(&sb, "-%s\n", line)
+			}
+		}
+	}
+	return sb.String()
+}
+
+func splitLines(s string) []string {
+	if s == "" {
+		return nil
+	}
+	lines := strings.Split(s, "\n")
+	if len(lines) > 0 && lines[len(lines)-1] == "" {
+		lines = lines[:len(lines)-1]
+	}
+	return lines
+}
+
+func isPrintable(s string) bool {
+	for _, b := range []byte(s) {
+		if b == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func readBlob(r *repo.Repo, id [32]byte) (string, error) {
+	content, err := r.ReadBlob(id)
+	if err != nil {
+		return "", err
+	}
+	return string(content), nil
+}
+
+func flattenTree(r *repo.Repo, treeID [32]byte, prefix string, out map[string][32]byte) error {
+	if treeID == object.ZeroID {
+		return nil
+	}
+	t, err := r.ReadTree(treeID)
+	if err != nil {
+		return err
+	}
+	for _, e := range t.Entries {
+		rel := join(prefix, e.Name)
+		switch e.Mode {
+		case object.ModeDir:
+			if err := flattenTree(r, e.ObjectID, rel, out); err != nil {
+				return err
+			}
+		default:
+			out[rel] = e.ObjectID
+		}
+	}
+	return nil
+}
+
+func join(prefix, name string) string {
+	if prefix == "" {
+		return name
+	}
+	return prefix + "/" + name
+}
+
+func sortFileDiffs(diffs []FileDiff) {
+	for i := 1; i < len(diffs); i++ {
+		for j := i; j > 0 && diffs[j].Path < diffs[j-1].Path; j-- {
+			diffs[j], diffs[j-1] = diffs[j-1], diffs[j]
+		}
+	}
+}

internal/diff/diff_test.go [A]
--- /dev/null
+++ b/internal/diff/diff_test.go
@@ -1,0 +1,118 @@
+package diff
+
+import (
+	"strings"
+	"testing"
+
+	"github.com/sergi/go-diff/diffmatchpatch"
+)
+
+func TestSplitLines_Empty(t *testing.T) {
+	lines := splitLines("")
+	if len(lines) != 0 {
+		t.Errorf("expected 0 lines, got %d", len(lines))
+	}
+}
+
+func TestSplitLines_Single(t *testing.T) {
+	lines := splitLines("hello")
+	if len(lines) != 1 || lines[0] != "hello" {
+		t.Errorf("unexpected: %v", lines)
+	}
+}
+
+func TestSplitLines_Multiple(t *testing.T) {
+	lines := splitLines("a\nb\nc\n")
+	if len(lines) != 3 {
+		t.Fatalf("expected 3, got %d: %v", len(lines), lines)
+	}
+	if lines[0] != "a" || lines[1] != "b" || lines[2] != "c" {
+		t.Errorf("unexpected lines: %v", lines)
+	}
+}
+
+func TestSplitLines_NoTrailingNewline(t *testing.T) {
+	lines := splitLines("a\nb")
+	if len(lines) != 2 {
+		t.Fatalf("expected 2, got %d: %v", len(lines), lines)
+	}
+}
+
+func TestIsPrintable_Text(t *testing.T) {
+	if !isPrintable("hello world\n") {
+		t.Error("plain text should be printable")
+	}
+}
+
+func TestIsPrintable_NullByte(t *testing.T) {
+	if isPrintable("binary\x00data") {
+		t.Error("string with null byte should not be printable")
+	}
+}
+
+func TestIsPrintable_Empty(t *testing.T) {
+	if !isPrintable("") {
+		t.Error("empty string should be printable")
+	}
+}
+
+func TestUnifiedDiff_Equal(t *testing.T) {
+	dmp := diffmatchpatch.New()
+	result := unifiedDiff(dmp, "file.txt", "same content\n", "same content\n")
+	if result != "" {
+		t.Errorf("identical files should produce empty diff, got %q", result)
+	}
+}
+
+func TestUnifiedDiff_AddedFile(t *testing.T) {
+	dmp := diffmatchpatch.New()
+	result := unifiedDiff(dmp, "new.txt", "", "hello\n")
+	if !strings.HasPrefix(result, "--- /dev/null") {
+		t.Errorf("added file should start with --- /dev/null, got: %q", result)
+	}
+	if !strings.Contains(result, "+hello") {
+		t.Errorf("missing +hello in: %q", result)
+	}
+}
+
+func TestUnifiedDiff_DeletedFile(t *testing.T) {
+	dmp := diffmatchpatch.New()
+	result := unifiedDiff(dmp, "old.txt", "goodbye\n", "")
+	if !strings.Contains(result, "+++ /dev/null") {
+		t.Errorf("deleted file should have +++ /dev/null, got: %q", result)
+	}
+	if !strings.Contains(result, "-goodbye") {
+		t.Errorf("missing -goodbye in: %q", result)
+	}
+}
+
+func TestUnifiedDiff_Modified(t *testing.T) {
+	dmp := diffmatchpatch.New()
+	result := unifiedDiff(dmp, "mod.txt", "before\n", "after\n")
+	if !strings.HasPrefix(result, "--- a/mod.txt") {
+		t.Errorf("modified file diff header: %q", result)
+	}
+	if !strings.Contains(result, "+++ b/mod.txt") {
+		t.Errorf("missing +++ header: %q", result)
+	}
+}
+
+func TestUnifiedDiff_Binary(t *testing.T) {
+	dmp := diffmatchpatch.New()
+	result := unifiedDiff(dmp, "data.bin", "text", "\x00binary")
+	if !strings.Contains(result, "binary") {
+		t.Errorf("binary diff should mention binary: %q", result)
+	}
+}
+
+func TestSortFileDiffs(t *testing.T) {
+	diffs := []FileDiff{
+		{Path: "z/file.go", Status: 'M'},
+		{Path: "a/main.go", Status: 'A'},
+		{Path: "m/lib.go", Status: 'D'},
+	}
+	sortFileDiffs(diffs)
+	if diffs[0].Path != "a/main.go" || diffs[1].Path != "m/lib.go" || diffs[2].Path != "z/file.go" {
+		t.Errorf("sort order wrong: %v", diffs)
+	}
+}

internal/diff/hunks.go [A]
--- /dev/null
+++ b/internal/diff/hunks.go
@@ -1,0 +1,259 @@
+package diff
+
+import (
+	"sort"
+	"strings"
+
+	"github.com/sergi/go-diff/diffmatchpatch"
+)
+
+const ContextLines = 3
+
+type HunkLineKind byte
+
+const (
+	LineEqual  HunkLineKind = ' '
+	LineAdd    HunkLineKind = '+'
+	LineRemove HunkLineKind = '-'
+)
+
+type HunkLine struct {
+	Content string
+	Kind    HunkLineKind
+}
+
+type Hunk struct {
+	OldStart int
+	OldCount int
+	NewStart int
+	NewCount int
+	Lines    []HunkLine
+}
+
+func (h Hunk) Header() string {
+	return strings.Join([]string{
+		"@@ -",
+		itoa(h.OldStart), ",", itoa(h.OldCount),
+		" +",
+		itoa(h.NewStart), ",", itoa(h.NewCount),
+		" @@",
+	}, "")
+}
+
+type FileHunkDiff struct {
+	Path       string
+	Status     rune
+	OldContent string
+	NewContent string
+	Hunks      []Hunk
+}
+
+func ComputeFileHunks(path, oldContent, newContent string, status rune) FileHunkDiff {
+	fhd := FileHunkDiff{
+		Path:       path,
+		Status:     status,
+		OldContent: oldContent,
+		NewContent: newContent,
+	}
+	fhd.Hunks = computeHunks(oldContent, newContent)
+	return fhd
+}
+
+func computeHunks(oldText, newText string) []Hunk {
+	dmp := diffmatchpatch.New()
+	chars1, chars2, lineArray := dmp.DiffLinesToChars(oldText, newText)
+	diffs := dmp.DiffMain(chars1, chars2, false)
+	diffs = dmp.DiffCharsToLines(diffs, lineArray)
+
+	type rawLine struct {
+		kind    HunkLineKind
+		content string
+	}
+	var flat []rawLine
+	for _, d := range diffs {
+		lines := splitLinesFull(d.Text)
+		switch d.Type {
+		case diffmatchpatch.DiffEqual:
+			for _, l := range lines {
+				flat = append(flat, rawLine{LineEqual, l})
+			}
+		case diffmatchpatch.DiffInsert:
+			for _, l := range lines {
+				flat = append(flat, rawLine{LineAdd, l})
+			}
+		case diffmatchpatch.DiffDelete:
+			for _, l := range lines {
+				flat = append(flat, rawLine{LineRemove, l})
+			}
+		}
+	}
+
+	isChange := make([]bool, len(flat))
+	for i, l := range flat {
+		isChange[i] = l.kind != LineEqual
+	}
+
+	type hunkRange struct{ start, end int }
+	var ranges []hunkRange
+	i := 0
+	for i < len(flat) {
+		if !isChange[i] {
+			i++
+			continue
+		}
+		j := i
+		for j < len(flat) && isChange[j] {
+			j++
+		}
+		start := maxInt(0, i-ContextLines)
+		end := minInt(len(flat), j+ContextLines)
+		if len(ranges) > 0 && start <= ranges[len(ranges)-1].end {
+			ranges[len(ranges)-1].end = end
+		} else {
+			ranges = append(ranges, hunkRange{start, end})
+		}
+		i = j
+	}
+
+	var hunks []Hunk
+	for _, r := range ranges {
+		oldLine, newLine := 1, 1
+		for i := 0; i < r.start; i++ {
+			switch flat[i].kind {
+			case LineEqual:
+				oldLine++
+				newLine++
+			case LineRemove:
+				oldLine++
+			case LineAdd:
+				newLine++
+			}
+		}
+		var lines []HunkLine
+		oldCount, newCount := 0, 0
+		for i := r.start; i < r.end; i++ {
+			lines = append(lines, HunkLine{Content: flat[i].content, Kind: flat[i].kind})
+			switch flat[i].kind {
+			case LineEqual:
+				oldCount++
+				newCount++
+			case LineRemove:
+				oldCount++
+			case LineAdd:
+				newCount++
+			}
+		}
+		hunks = append(hunks, Hunk{
+			OldStart: oldLine,
+			OldCount: oldCount,
+			NewStart: newLine,
+			NewCount: newCount,
+			Lines:    lines,
+		})
+	}
+	return hunks
+}
+
+func ApplySelectedHunks(fhd FileHunkDiff, selected []bool) string {
+	if len(fhd.Hunks) == 0 {
+		return fhd.OldContent
+	}
+
+	oldLines := splitLinesFull(fhd.OldContent)
+
+	type replacement struct {
+		start, count int
+		newLines     []string
+	}
+	var reps []replacement
+	for i, h := range fhd.Hunks {
+		if !selected[i] {
+			continue
+		}
+		var newLines []string
+		for _, l := range h.Lines {
+			if l.Kind == LineEqual || l.Kind == LineAdd {
+				newLines = append(newLines, l.Content)
+			}
+		}
+		reps = append(reps, replacement{
+			start:    h.OldStart - 1,
+			count:    h.OldCount,
+			newLines: newLines,
+		})
+	}
+
+	sort.Slice(reps, func(i, j int) bool {
+		return reps[i].start > reps[j].start
+	})
+
+	result := make([]string, len(oldLines))
+	copy(result, oldLines)
+
+	for _, rep := range reps {
+		end := rep.start + rep.count
+		if end > len(result) {
+			end = len(result)
+		}
+		newResult := make([]string, 0, len(result)-rep.count+len(rep.newLines))
+		newResult = append(newResult, result[:rep.start]...)
+		newResult = append(newResult, rep.newLines...)
+		newResult = append(newResult, result[end:]...)
+		result = newResult
+	}
+
+	return strings.Join(result, "")
+}
+
+func splitLinesFull(s string) []string {
+	if s == "" {
+		return nil
+	}
+	var lines []string
+	for len(s) > 0 {
+		i := strings.Index(s, "\n")
+		if i < 0 {
+			lines = append(lines, s)
+			break
+		}
+		lines = append(lines, s[:i+1])
+		s = s[i+1:]
+	}
+	return lines
+}
+
+func maxInt(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func minInt(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func itoa(n int) string {
+	if n == 0 {
+		return "0"
+	}
+	var buf [20]byte
+	pos := len(buf)
+	neg := n < 0
+	if neg {
+		n = -n
+	}
+	for n > 0 {
+		pos--
+		buf[pos] = byte('0' + n%10)
+		n /= 10
+	}
+	if neg {
+		pos--
+		buf[pos] = '-'
+	}
+	return string(buf[pos:])
+}

internal/gitcompat/gitcompat.go [A]
--- /dev/null
+++ b/internal/gitcompat/gitcompat.go
@@ -1,0 +1,414 @@
+package gitcompat
+
+import (
+	"bufio"
+	"encoding/hex"
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"sort"
+	"strings"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+)
+
+const mapFileName = "git_map"
+
+func IsGitRepo(repoRoot string) bool {
+	_, err := os.Stat(filepath.Join(repoRoot, ".git"))
+	return err == nil
+}
+
+func GitInit(dir string) error {
+	cmd := exec.Command("git", "init", "-q", dir)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	return cmd.Run()
+}
+
+func EnsureGitIgnore(repoRoot string) error {
+	path := filepath.Join(repoRoot, ".gitignore")
+	var existing string
+	if data, err := os.ReadFile(path); err == nil {
+		existing = string(data)
+	}
+	for _, line := range strings.Split(existing, "\n") {
+		t := strings.TrimSpace(line)
+		if t == ".arche/" || t == ".arche" {
+			return nil
+		}
+	}
+	f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if existing != "" && !strings.HasSuffix(existing, "\n") {
+		fmt.Fprintln(f)
+	}
+	_, err = fmt.Fprintln(f, ".arche/")
+	return err
+}
+
+func LoadMap(archeDir string) (map[string]string, error) {
+	path := filepath.Join(archeDir, mapFileName)
+	f, err := os.Open(path)
+	if os.IsNotExist(err) {
+		return map[string]string{}, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	m := map[string]string{}
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		line := sc.Text()
+		if line == "" || strings.HasPrefix(line, "#") {
+			continue
+		}
+		parts := strings.Fields(line)
+		if len(parts) == 2 {
+			m[parts[0]] = parts[1]
+		}
+	}
+	return m, sc.Err()
+}
+
+func SaveMap(archeDir string, m map[string]string) error {
+	path := filepath.Join(archeDir, mapFileName)
+	f, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	keys := make([]string, 0, len(m))
+	for k := range m {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, k := range keys {
+		if _, err := fmt.Fprintf(f, "%s %s\n", k, m[k]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func MirrorCommit(repoRoot string, r *repo.Repo, commitID [32]byte) (string, error) {
+	c, err := r.ReadCommit(commitID)
+	if err != nil {
+		return "", fmt.Errorf("read arche commit: %w", err)
+	}
+	archeHex := hex.EncodeToString(commitID[:])
+
+	m, err := LoadMap(r.ArcheDir())
+	if err != nil {
+		return "", err
+	}
+
+	if out, err := exec.Command("git", "-C", repoRoot, "add", "-A").CombinedOutput(); err != nil {
+		return "", fmt.Errorf("git add -A: %w\n%s", err, out)
+	}
+
+	msg := c.Message
+	if msg == "" {
+		msg = "(empty)"
+	}
+	ts := c.Author.Timestamp.Format(time.RFC3339)
+
+	commitCmd := exec.Command("git", "-C", repoRoot, "commit", "--allow-empty", "-m", msg)
+	commitCmd.Env = append(os.Environ(),
+		"GIT_AUTHOR_NAME="+c.Author.Name,
+		"GIT_AUTHOR_EMAIL="+c.Author.Email,
+		"GIT_AUTHOR_DATE="+ts,
+		"GIT_COMMITTER_NAME="+c.Committer.Name,
+		"GIT_COMMITTER_EMAIL="+c.Committer.Email,
+		"GIT_COMMITTER_DATE="+ts,
+	)
+	_, _ = commitCmd.Output()
+
+	headOut, err := exec.Command("git", "-C", repoRoot, "rev-parse", "HEAD").Output()
+	if err != nil {
+		return "", fmt.Errorf("git rev-parse HEAD: %w", err)
+	}
+	gitHash := strings.TrimSpace(string(headOut))
+
+	m[archeHex] = gitHash
+	if err := SaveMap(r.ArcheDir(), m); err != nil {
+		return "", err
+	}
+	return gitHash, nil
+}
+
+func CheckoutCommit(repoRoot, archeDir string, commitID [32]byte) error {
+	m, err := LoadMap(archeDir)
+	if err != nil {
+		return err
+	}
+	archeHex := hex.EncodeToString(commitID[:])
+	gitHash, ok := m[archeHex]
+	if !ok {
+		// No mirror yet – skip silently.
+		return nil
+	}
+
+	out, err := exec.Command("git", "-C", repoRoot, "checkout", "-q", gitHash).CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("git checkout: %w\n%s", err, out)
+	}
+	return nil
+}
+
+func SyncPush(repoRoot, remote string) error {
+	cmd := exec.Command("git", "-C", repoRoot, "push", remote)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	return cmd.Run()
+}
+
+func SyncPull(repoRoot, remote string) error {
+	cmd := exec.Command("git", "-C", repoRoot, "pull", "--rebase", remote)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	return cmd.Run()
+}
+
+func ImportFromGit(repoRoot string, r *repo.Repo) error {
+	return importGitHistory(repoRoot, r, true)
+}
+
+func ImportFromGitOnce(repoRoot string, r *repo.Repo) error {
+	return importGitHistory(repoRoot, r, false)
+}
+
+func importGitHistory(repoRoot string, r *repo.Repo, writeMap bool) error {
+	raw, err := exec.Command("git", "-C", repoRoot, "log",
+		"--reverse", "--topo-order",
+		"--format=%H%x00%P%x00%an%x00%ae%x00%aI%x00%B%x00",
+	).Output()
+	if err != nil {
+		return fmt.Errorf("git log: %w", err)
+	}
+
+	gitToArche := map[string][32]byte{}
+	archeMap := map[string]string{}
+	var lastArcheID [32]byte
+
+	records := strings.Split(string(raw), "\x00\n")
+	for _, rec := range records {
+		rec = strings.TrimSpace(rec)
+		if rec == "" {
+			continue
+		}
+		parts := strings.SplitN(rec, "\x00", 6)
+		if len(parts) < 5 {
+			continue
+		}
+		gitSHA := strings.TrimSpace(parts[0])
+		parents := strings.Fields(strings.TrimSpace(parts[1]))
+		authorName := parts[2]
+		authorEmail := parts[3]
+		authorDateStr := strings.TrimSpace(parts[4])
+		message := strings.TrimSpace(parts[5])
+		if message == "" {
+			message = "(imported from git)"
+		}
+
+		ts, err := time.Parse(time.RFC3339, authorDateStr)
+		if err != nil {
+			ts = time.Now()
+		}
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+
+		treeID, err := importGitTreeInTx(repoRoot, r, tx, gitSHA)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return fmt.Errorf("import tree for %s: %w", gitSHA[:8], err)
+		}
+
+		var archeParents [][32]byte
+		for _, p := range parents {
+			if aid, ok := gitToArche[p]; ok {
+				archeParents = append(archeParents, aid)
+			}
+		}
+
+		sig := object.Signature{Name: authorName, Email: authorEmail, Timestamp: ts}
+		changeID, err := r.Store.AllocChangeID(tx)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+
+		c := &object.Commit{
+			TreeID:    treeID,
+			Parents:   archeParents,
+			ChangeID:  changeID,
+			Author:    sig,
+			Committer: sig,
+			Message:   message,
+			Phase:     object.PhasePublic,
+		}
+		commitID, err := repo.WriteCommitTx(r.Store, tx, c)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.SetChangeCommit(tx, changeID, commitID); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		gitToArche[gitSHA] = commitID
+		archeMap[hex.EncodeToString(commitID[:])] = gitSHA
+		lastArcheID = commitID
+
+		fmt.Printf("  imported %s -> ch:%s\n", gitSHA[:8], changeID[:8])
+	}
+
+	if writeMap {
+		if err := SaveMap(r.ArcheDir(), archeMap); err != nil {
+			return err
+		}
+	}
+
+	if lastArcheID == (object.ZeroID) {
+		return nil
+	}
+
+	lastCommit, err := r.ReadCommit(lastArcheID)
+	if err != nil {
+		return err
+	}
+
+	branchOut, _ := exec.Command("git", "-C", repoRoot, "branch", "--show-current").Output()
+	bmName := strings.TrimSpace(string(branchOut))
+	if bmName == "" {
+		bmName = "main"
+	}
+
+	bm := store.Bookmark{Name: bmName, CommitID: lastArcheID}
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+	if err := r.Store.SetBookmark(tx, bm); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return err
+	}
+
+	return r.WriteHead(object.FormatChangeID(lastCommit.ChangeID))
+}
+
+type importEntry struct {
+	relPath string
+	blobID  [32]byte
+	mode    object.EntryMode
+}
+
+func importGitTreeInTx(repoRoot string, r *repo.Repo, tx *store.Tx, gitCommitSHA string) ([32]byte, error) {
+	out, err := exec.Command("git", "-C", repoRoot, "ls-tree", "-r", gitCommitSHA).Output()
+	if err != nil {
+		return [32]byte{}, fmt.Errorf("git ls-tree: %w", err)
+	}
+
+	var entries []importEntry
+	for _, line := range strings.Split(string(out), "\n") {
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		tabIdx := strings.Index(line, "\t")
+		if tabIdx < 0 {
+			continue
+		}
+		meta := strings.Fields(line[:tabIdx])
+		if len(meta) < 3 {
+			continue
+		}
+		gitMode := meta[0]
+		gitBlobSHA := meta[2]
+		relPath := line[tabIdx+1:]
+
+		content, err := exec.Command("git", "-C", repoRoot, "cat-file", "blob", gitBlobSHA).Output()
+		if err != nil {
+			return [32]byte{}, fmt.Errorf("cat-file %s: %w", gitBlobSHA, err)
+		}
+
+		blob := &object.Blob{Content: content}
+		blobID, err := repo.WriteBlobTx(r.Store, tx, blob)
+		if err != nil {
+			return [32]byte{}, err
+		}
+
+		mode := object.ModeFile
+		switch gitMode {
+		case "100755":
+			mode = object.ModeExec
+		case "120000":
+			mode = object.ModeSymlink
+		}
+
+		entries = append(entries, importEntry{relPath: relPath, blobID: blobID, mode: mode})
+	}
+
+	return buildNestedTree(r, tx, entries)
+}
+
+func buildNestedTree(r *repo.Repo, tx *store.Tx, entries []importEntry) ([32]byte, error) {
+	tree := &object.Tree{}
+
+	dirs := map[string][]importEntry{}
+	for _, e := range entries {
+		slash := strings.Index(e.relPath, "/")
+		if slash < 0 {
+			tree.Entries = append(tree.Entries, object.TreeEntry{
+				Name:     e.relPath,
+				Mode:     e.mode,
+				ObjectID: e.blobID,
+			})
+		} else {
+			dirName := e.relPath[:slash]
+			dirs[dirName] = append(dirs[dirName], importEntry{
+				relPath: e.relPath[slash+1:],
+				blobID:  e.blobID,
+				mode:    e.mode,
+			})
+		}
+	}
+
+	dirNames := make([]string, 0, len(dirs))
+	for d := range dirs {
+		dirNames = append(dirNames, d)
+	}
+	sort.Strings(dirNames)
+
+	for _, dirName := range dirNames {
+		subID, err := buildNestedTree(r, tx, dirs[dirName])
+		if err != nil {
+			return [32]byte{}, err
+		}
+		tree.Entries = append(tree.Entries, object.TreeEntry{
+			Name:     dirName,
+			Mode:     object.ModeDir,
+			ObjectID: subID,
+		})
+	}
+
+	return repo.WriteTreeTx(r.Store, tx, tree)
+}

internal/issuedb/db.go [A]
--- /dev/null
+++ b/internal/issuedb/db.go
@@ -1,0 +1,311 @@
+package issuedb
+
+import (
+	"database/sql"
+	_ "embed"
+	"encoding/hex"
+	"fmt"
+	"path/filepath"
+	"strconv"
+	"time"
+
+	"arche/internal/issues"
+	"arche/internal/object"
+	"arche/internal/store"
+	"arche/internal/wiki"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+//go:embed sql/001_initial.sql
+var sql001 string
+
+//go:embed sql/002_meta_hwm.sql
+var sql002 string
+
+type DB struct {
+	Issues *issues.Store
+	Wiki   *wiki.Store
+	db     *sql.DB
+}
+
+func Open(repoDir string) (*DB, error) {
+	return openDB(repoDir, nil)
+}
+
+func NewWithStore(repoDir string, st store.Store) (*DB, error) {
+	return openDB(repoDir, st)
+}
+
+type storeObjectSink struct {
+	st store.Store
+}
+
+func (s *storeObjectSink) WriteIssueEventObject(id [32]byte, raw []byte) error {
+	if ok, _ := s.st.HasObject(id); ok {
+		return nil
+	}
+	tx, err := s.st.Begin()
+	if err != nil {
+		return err
+	}
+	if err := s.st.WriteObject(tx, id, "issue-event", raw); err != nil {
+		s.st.Rollback(tx) //nolint:errcheck
+		return err
+	}
+	return s.st.Commit(tx)
+}
+
+func openDB(repoDir string, st store.Store) (*DB, error) {
+	path := filepath.Join(repoDir, "issues.db")
+
+	db, err := sql.Open("sqlite3", path+"?_journal_mode=WAL&_busy_timeout=5000")
+	if err != nil {
+		return nil, err
+	}
+
+	db.SetMaxOpenConns(1)
+
+	if err := runMigrations(db); err != nil {
+		db.Close()
+		return nil, err
+	}
+
+	issueStore := issues.New(db)
+	if st != nil {
+		sink := &storeObjectSink{st: st}
+		issueStore.SetObjectSink(sink)
+		if err := seedParentHashes(db, issueStore, st); err != nil {
+			db.Close()
+			return nil, fmt.Errorf("issuedb: seed parent hashes: %w", err)
+		}
+		if err := recoverFromStore(db, st); err != nil {
+			db.Close()
+			return nil, fmt.Errorf("issuedb: recovery pass: %w", err)
+		}
+	}
+
+	return &DB{
+		Issues: issueStore,
+		Wiki:   wiki.New(db),
+		db:     db,
+	}, nil
+}
+
+func seedParentHashes(idb *sql.DB, st *issues.Store, objStore store.Store) error {
+	rows, err := idb.Query(
+		`SELECT issue_id, hlc_ms, hlc_seq, kind, payload, author
+		   FROM issue_events
+		  ORDER BY hlc_ms ASC, hlc_seq ASC`,
+	)
+	if err != nil {
+		return err
+	}
+	defer rows.Close()
+
+	type evt struct {
+		issueID string
+		hlcMS   int64
+		hlcSeq  int
+		kind    string
+		payload []byte
+		author  string
+	}
+	var evts []evt
+	for rows.Next() {
+		var e evt
+		var payload string
+		if err := rows.Scan(&e.issueID, &e.hlcMS, &e.hlcSeq, &e.kind, &payload, &e.author); err != nil {
+			return err
+		}
+		e.payload = []byte(payload)
+		evts = append(evts, e)
+	}
+	if err := rows.Err(); err != nil {
+		return err
+	}
+
+	last := map[string][32]byte{}
+	for _, e := range evts {
+		obj := &object.IssueEventObject{
+			IssueID: e.issueID,
+			Kind:    e.kind,
+			Payload: e.payload,
+			Author:  e.author,
+			HLCMS:   e.hlcMS,
+			HLCSeq:  e.hlcSeq,
+		}
+		if prev, ok := last[e.issueID]; ok {
+			obj.Parents = [][32]byte{prev}
+		}
+		id := object.HashIssueEvent(obj)
+		if ok, _ := objStore.HasObject(id); ok {
+			last[e.issueID] = id
+		}
+	}
+
+	for issueID, h := range last {
+		st.SetParentHash(issueID, h)
+	}
+	return nil
+}
+
+func recoverFromStore(idb *sql.DB, st store.Store) error {
+	var hwmMS int64
+	var hwmSeq int
+	row := idb.QueryRow(`SELECT value FROM meta WHERE key='hwm_ms'`)
+	var hwmMSStr string
+	if err := row.Scan(&hwmMSStr); err == nil {
+		hwmMS, _ = strconv.ParseInt(hwmMSStr, 10, 64)
+	}
+	row = idb.QueryRow(`SELECT value FROM meta WHERE key='hwm_seq'`)
+	var hwmSeqStr string
+	if err := row.Scan(&hwmSeqStr); err == nil {
+		hwmSeqVal, _ := strconv.ParseInt(hwmSeqStr, 10, 64)
+		hwmSeq = int(hwmSeqVal)
+	}
+
+	allIDs, err := st.ListObjectsByKind(string(object.KindIssueEvent))
+	if err != nil {
+		return err
+	}
+
+	var newEvs []issues.IssueEvent
+	var newHWM struct {
+		ms  int64
+		seq int
+	}
+	newHWM.ms = hwmMS
+	newHWM.seq = hwmSeq
+
+	for _, id := range allIDs {
+		_, raw, err := st.ReadObject(id)
+		if err != nil {
+			continue
+		}
+		obj, err := object.DecodeIssueEvent(raw)
+		if err != nil {
+			continue
+		}
+		if obj.HLCMS < hwmMS || (obj.HLCMS == hwmMS && obj.HLCSeq <= hwmSeq) {
+			continue
+		}
+		eventID := hex.EncodeToString(id[:])
+		var exists int
+		err = idb.QueryRow(`SELECT COUNT(*) FROM issue_events WHERE event_id=?`, eventID).Scan(&exists)
+		if err != nil || exists > 0 {
+			continue
+		}
+		newEvs = append(newEvs, issues.IssueEvent{
+			EventID: eventID,
+			IssueID: obj.IssueID,
+			HLCMS:   obj.HLCMS,
+			HLCSeq:  obj.HLCSeq,
+			Kind:    obj.Kind,
+			Payload: obj.Payload,
+			Author:  obj.Author,
+			Created: 0,
+		})
+		if obj.HLCMS > newHWM.ms || (obj.HLCMS == newHWM.ms && obj.HLCSeq > newHWM.seq) {
+			newHWM.ms = obj.HLCMS
+			newHWM.seq = obj.HLCSeq
+		}
+	}
+
+	if len(newEvs) == 0 {
+		return nil
+	}
+
+	tx, err := idb.Begin()
+	if err != nil {
+		return err
+	}
+
+	for _, ev := range newEvs {
+		_, err := tx.Exec(
+			`INSERT OR IGNORE INTO issue_events (event_id,issue_id,hlc_ms,hlc_seq,kind,payload,author,created) VALUES (?,?,?,?,?,?,?,?)`,
+			ev.EventID, ev.IssueID, ev.HLCMS, ev.HLCSeq, ev.Kind, string(ev.Payload), ev.Author, ev.Created,
+		)
+		if err != nil {
+			tx.Rollback() //nolint:errcheck
+			return fmt.Errorf("recovery: insert event %s: %w", ev.EventID, err)
+		}
+	}
+
+	if _, err := tx.Exec(`INSERT OR REPLACE INTO meta (key,value) VALUES ('hwm_ms',?)`, strconv.FormatInt(newHWM.ms, 10)); err != nil {
+		tx.Rollback() //nolint:errcheck
+		return err
+	}
+
+	if _, err := tx.Exec(`INSERT OR REPLACE INTO meta (key,value) VALUES ('hwm_seq',?)`, strconv.Itoa(newHWM.seq)); err != nil {
+		tx.Rollback() //nolint:errcheck
+		return err
+	}
+
+	return tx.Commit()
+}
+
+func (d *DB) Close() error {
+	return d.db.Close()
+}
+
+type migration struct {
+	version int
+	sql     string
+}
+
+var all = []migration{
+	{1, sql001},
+	{2, sql002},
+}
+
+func runMigrations(db *sql.DB) error {
+	if _, err := db.Exec(`CREATE TABLE IF NOT EXISTS schema_migrations (
+		version    INTEGER PRIMARY KEY,
+		applied_at INTEGER NOT NULL
+	)`); err != nil {
+		return err
+	}
+
+	rows, err := db.Query("SELECT version FROM schema_migrations")
+	if err != nil {
+		return err
+	}
+	defer rows.Close()
+
+	applied := map[int]bool{}
+	for rows.Next() {
+		var v int
+		if err := rows.Scan(&v); err != nil {
+			return err
+		}
+		applied[v] = true
+	}
+
+	for _, m := range all {
+		if applied[m.version] {
+			continue
+		}
+
+		tx, err := db.Begin()
+		if err != nil {
+			return err
+		}
+
+		if _, err := tx.Exec(m.sql); err != nil {
+			tx.Rollback()
+			return fmt.Errorf("issuedb migrate v%d: %w", m.version, err)
+		}
+
+		if _, err := tx.Exec("INSERT INTO schema_migrations (version, applied_at) VALUES (?,?)", m.version, time.Now().Unix()); err != nil {
+			tx.Rollback()
+			return fmt.Errorf("issuedb migrate v%d record: %w", m.version, err)
+		}
+
+		if err := tx.Commit(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}

internal/issuedb/issuedb_test.go [A]
--- /dev/null
+++ b/internal/issuedb/issuedb_test.go
@@ -1,0 +1,99 @@
+package issuedb_test
+
+import (
+	"testing"
+
+	"arche/internal/issuedb"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func TestIssueDB_Open_CreatesTables(t *testing.T) {
+	dir := t.TempDir()
+	db, err := issuedb.Open(dir)
+	if err != nil {
+		t.Fatalf("Open: %v", err)
+	}
+	defer db.Close()
+
+	if db.Issues == nil {
+		t.Error("Issues store is nil")
+	}
+	if db.Wiki == nil {
+		t.Error("Wiki store is nil")
+	}
+}
+
+func TestIssueDB_IssuesAndWikiShareDB(t *testing.T) {
+	dir := t.TempDir()
+	db, err := issuedb.Open(dir)
+	if err != nil {
+		t.Fatalf("Open: %v", err)
+	}
+	defer db.Close()
+
+	id, err := db.Issues.CreateIssue("Test issue", "body", "alice")
+	if err != nil {
+		t.Fatalf("CreateIssue: %v", err)
+	}
+
+	got, err := db.Issues.GetIssue(id)
+	if err != nil {
+		t.Fatalf("GetIssue: %v", err)
+	}
+	if got.Title != "Test issue" {
+		t.Errorf("title: want %q got %q", "Test issue", got.Title)
+	}
+
+	if err := db.Wiki.Set("home", "Welcome to the wiki", "alice"); err != nil {
+		t.Fatalf("Wiki.Set: %v", err)
+	}
+
+	page, err := db.Wiki.Get("home")
+	if err != nil {
+		t.Fatalf("Wiki.Get: %v", err)
+	}
+	if page.Content != "Welcome to the wiki" {
+		t.Errorf("wiki content mismatch: %+v", page)
+	}
+}
+
+func TestIssueDB_Reopen_Persists(t *testing.T) {
+	dir := t.TempDir()
+
+	db1, err := issuedb.Open(dir)
+	if err != nil {
+		t.Fatalf("Open 1: %v", err)
+	}
+	id, err := db1.Issues.CreateIssue("Persist test", "", "bob")
+	if err != nil {
+		t.Fatalf("CreateIssue: %v", err)
+	}
+	db1.Close()
+
+	db2, err := issuedb.Open(dir)
+	if err != nil {
+		t.Fatalf("Open 2: %v", err)
+	}
+	defer db2.Close()
+
+	got, err := db2.Issues.GetIssue(id)
+	if err != nil {
+		t.Fatalf("GetIssue after reopen: %v", err)
+	}
+	if got.Title != "Persist test" {
+		t.Errorf("title after reopen: want %q got %q", "Persist test", got.Title)
+	}
+}
+
+func TestIssueDB_MigrationsIdempotent(t *testing.T) {
+	dir := t.TempDir()
+
+	for i := range 2 {
+		db, err := issuedb.Open(dir)
+		if err != nil {
+			t.Fatalf("Open iteration %d: %v", i, err)
+		}
+		db.Close()
+	}
+}

internal/issuedb/sql/001_initial.sql [A]
--- /dev/null
+++ b/internal/issuedb/sql/001_initial.sql
@@ -1,0 +1,19 @@
+CREATE TABLE IF NOT EXISTS issue_events (
+    event_id TEXT    PRIMARY KEY,
+    issue_id TEXT    NOT NULL,
+    hlc_ms   INTEGER NOT NULL,
+    hlc_seq  INTEGER NOT NULL,
+    kind     TEXT    NOT NULL,
+    payload  TEXT    NOT NULL,
+    author   TEXT    NOT NULL DEFAULT '',
+    created  INTEGER NOT NULL
+);
+CREATE INDEX IF NOT EXISTS idx_ie_issue ON issue_events(issue_id);
+CREATE INDEX IF NOT EXISTS idx_ie_hlc   ON issue_events(hlc_ms, hlc_seq);
+
+CREATE TABLE IF NOT EXISTS wiki_pages (
+    title   TEXT    PRIMARY KEY,
+    content TEXT    NOT NULL DEFAULT '',
+    author  TEXT    NOT NULL DEFAULT '',
+    updated INTEGER NOT NULL DEFAULT 0
+);

internal/issuedb/sql/002_meta_hwm.sql [A]
--- /dev/null
+++ b/internal/issuedb/sql/002_meta_hwm.sql
@@ -1,0 +1,7 @@
+CREATE TABLE IF NOT EXISTS meta (
+    key   TEXT PRIMARY KEY,
+    value TEXT NOT NULL DEFAULT ''
+);
+
+INSERT OR IGNORE INTO meta (key, value) VALUES ('hwm_ms',  '0');
+INSERT OR IGNORE INTO meta (key, value) VALUES ('hwm_seq', '0');

internal/issues/issues.go [A]
--- /dev/null
+++ b/internal/issues/issues.go
@@ -1,0 +1,507 @@
+package issues
+
+import (
+	"bytes"
+	"database/sql"
+	"encoding/json"
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+
+	"arche/internal/object"
+
+	"github.com/google/uuid"
+)
+
+type HLC struct {
+	MS  int64
+	Seq int
+}
+
+func (h HLC) Less(other HLC) bool {
+	if h.MS != other.MS {
+		return h.MS < other.MS
+	}
+	return h.Seq < other.Seq
+}
+
+type ObjectSink interface {
+	WriteIssueEventObject(id [32]byte, raw []byte) error
+}
+
+type Store struct {
+	db       *sql.DB
+	mu       sync.Mutex
+	hlc      HLC
+	sink     ObjectSink
+	lastHash map[string][32]byte
+}
+
+type Issue struct {
+	ID           string
+	Status       string
+	Title        string
+	Body         string
+	BodyConflict *BodyConflict
+	Comments     []Comment
+	Labels       []string
+	Refs         []string
+}
+
+type BodyConflict struct {
+	BaseEventID string
+	OurEdit     string
+	TheirEdit   string
+}
+
+type Comment struct {
+	EventID string
+	Text    string
+	Author  string
+	HLC     HLC
+}
+
+type IssueEvent struct {
+	EventID string
+	IssueID string
+	HLCMS   int64
+	HLCSeq  int
+	Kind    string
+	Payload []byte
+	Author  string
+	Created int64
+}
+
+func New(db *sql.DB) *Store { return &Store{db: db} }
+
+func (s *Store) SetObjectSink(sink ObjectSink) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.sink = sink
+	s.lastHash = make(map[string][32]byte)
+}
+
+func (s *Store) SetParentHash(issueID string, h [32]byte) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.lastHash != nil {
+		s.lastHash[issueID] = h
+	}
+}
+
+func (s *Store) tickHLC(remote *HLC) HLC {
+	now := time.Now().UnixMilli()
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	wall := now
+	if s.hlc.MS > wall {
+		wall = s.hlc.MS
+	}
+	if remote != nil && remote.MS > wall {
+		wall = remote.MS
+	}
+	if wall == s.hlc.MS {
+		s.hlc.Seq++
+	} else {
+		s.hlc.MS = wall
+		s.hlc.Seq = 0
+	}
+	return s.hlc
+}
+
+func newID() string {
+	return uuid.Must(uuid.NewV7()).String()
+}
+
+func (s *Store) insertEvent(issueID, kind string, payload any, author string) (IssueEvent, error) {
+	hlc := s.tickHLC(nil)
+	p, err := json.Marshal(payload)
+	if err != nil {
+		return IssueEvent{}, err
+	}
+	ev := IssueEvent{
+		EventID: newID(),
+		IssueID: issueID,
+		HLCMS:   hlc.MS,
+		HLCSeq:  hlc.Seq,
+		Kind:    kind,
+		Payload: p,
+		Author:  author,
+		Created: time.Now().Unix(),
+	}
+	_, err = s.db.Exec(
+		`INSERT INTO issue_events (event_id,issue_id,hlc_ms,hlc_seq,kind,payload,author,created) VALUES (?,?,?,?,?,?,?,?)`,
+		ev.EventID, ev.IssueID, ev.HLCMS, ev.HLCSeq, ev.Kind, string(ev.Payload), ev.Author, ev.Created,
+	)
+	if err != nil {
+		return IssueEvent{}, err
+	}
+
+	if s.sink != nil {
+		s.mu.Lock()
+		var parents [][32]byte
+		if prev, ok := s.lastHash[issueID]; ok {
+			parents = [][32]byte{prev}
+		}
+		s.mu.Unlock()
+
+		obj := &object.IssueEventObject{
+			IssueID: issueID,
+			Kind:    kind,
+			Payload: p,
+			Author:  author,
+			HLCMS:   hlc.MS,
+			HLCSeq:  hlc.Seq,
+			Parents: parents,
+		}
+		var buf bytes.Buffer
+		object.EncodeIssueEvent(&buf, obj)
+		raw := buf.Bytes()
+		id := object.HashIssueEvent(obj)
+
+		if writeErr := s.sink.WriteIssueEventObject(id, raw); writeErr == nil {
+			s.mu.Lock()
+			s.lastHash[issueID] = id
+			s.mu.Unlock()
+		}
+	}
+
+	return ev, nil
+}
+
+func (s *Store) CreateIssue(title, body, author string) (string, error) {
+	id := newID()
+	if _, err := s.insertEvent(id, "create", map[string]string{"id": id}, author); err != nil {
+		return "", err
+	}
+	if _, err := s.insertEvent(id, "title", title, author); err != nil {
+		return "", err
+	}
+	if _, err := s.insertEvent(id, "status", "open", author); err != nil {
+		return "", err
+	}
+	if body != "" {
+		if _, err := s.insertEvent(id, "body", body, author); err != nil {
+			return "", err
+		}
+	}
+	return id, nil
+}
+
+func (s *Store) SetStatus(issueID, status, author string) error {
+	_, err := s.insertEvent(issueID, "status", status, author)
+	return err
+}
+
+func (s *Store) SetTitle(issueID, title, author string) error {
+	_, err := s.insertEvent(issueID, "title", title, author)
+	return err
+}
+
+func (s *Store) SetBody(issueID, body, author string) error {
+	_, err := s.insertEvent(issueID, "body", body, author)
+	return err
+}
+
+func (s *Store) AddComment(issueID, text, author string) error {
+	_, err := s.insertEvent(issueID, "comment", text, author)
+	return err
+}
+
+func (s *Store) AddLabel(issueID, label, author string) error {
+	token := newID()
+	_, err := s.insertEvent(issueID, "label_add", map[string]string{"label": label, "token": token}, author)
+	return err
+}
+
+func (s *Store) RemoveLabel(issueID, token, author string) error {
+	_, err := s.insertEvent(issueID, "label_rm", token, author)
+	return err
+}
+
+func (s *Store) AddRef(issueID, commitRef, author string) error {
+	_, err := s.insertEvent(issueID, "ref", commitRef, author)
+	return err
+}
+
+func (s *Store) loadEvents(issueID string) ([]IssueEvent, error) {
+	rows, err := s.db.Query(
+		`SELECT event_id,issue_id,hlc_ms,hlc_seq,kind,payload,author,created FROM issue_events WHERE issue_id=? ORDER BY hlc_ms,hlc_seq`,
+		issueID,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var evs []IssueEvent
+	for rows.Next() {
+		var ev IssueEvent
+		var payload string
+		if err := rows.Scan(&ev.EventID, &ev.IssueID, &ev.HLCMS, &ev.HLCSeq, &ev.Kind, &payload, &ev.Author, &ev.Created); err != nil {
+			return nil, err
+		}
+		ev.Payload = []byte(payload)
+		evs = append(evs, ev)
+	}
+	return evs, rows.Err()
+}
+
+func Reduce(evs []IssueEvent) Issue {
+	var iss Issue
+
+	type lwwEntry struct {
+		hlcMS  int64
+		hlcSeq int
+		value  string
+		evID   string
+	}
+	var statusLWW, titleLWW, bodyLWW lwwEntry
+
+	type labelAdd struct{ label, token string }
+	var labelAdds []labelAdd
+	removedTokens := map[string]bool{}
+
+	for _, ev := range evs {
+		hlcMS, hlcSeq := ev.HLCMS, ev.HLCSeq
+		beats := func(cur lwwEntry) bool {
+			if hlcMS != cur.hlcMS {
+				return hlcMS > cur.hlcMS
+			}
+			return hlcSeq > cur.hlcSeq
+		}
+		switch ev.Kind {
+		case "create":
+			var m map[string]string
+			json.Unmarshal(ev.Payload, &m) //nolint:errcheck
+			iss.ID = m["id"]
+		case "status":
+			var v string
+			json.Unmarshal(ev.Payload, &v) //nolint:errcheck
+			if beats(statusLWW) {
+				statusLWW = lwwEntry{hlcMS, hlcSeq, v, ev.EventID}
+			}
+		case "title":
+			var v string
+			json.Unmarshal(ev.Payload, &v) //nolint:errcheck
+			if beats(titleLWW) {
+				titleLWW = lwwEntry{hlcMS, hlcSeq, v, ev.EventID}
+			}
+		case "body":
+			var v string
+			json.Unmarshal(ev.Payload, &v) //nolint:errcheck
+			if beats(bodyLWW) {
+				bodyLWW = lwwEntry{hlcMS, hlcSeq, v, ev.EventID}
+			}
+		case "body_conflict":
+			var bc BodyConflict
+			json.Unmarshal(ev.Payload, &bc) //nolint:errcheck
+			iss.BodyConflict = &bc
+		case "comment":
+			var text string
+			json.Unmarshal(ev.Payload, &text) //nolint:errcheck
+			iss.Comments = append(iss.Comments, Comment{
+				EventID: ev.EventID,
+				Text:    text,
+				Author:  ev.Author,
+				HLC:     HLC{ev.HLCMS, ev.HLCSeq},
+			})
+		case "label_add":
+			var m map[string]string
+			json.Unmarshal(ev.Payload, &m) //nolint:errcheck
+			if m["label"] != "" && m["token"] != "" {
+				labelAdds = append(labelAdds, labelAdd{m["label"], m["token"]})
+			}
+		case "label_rm":
+			var token string
+			json.Unmarshal(ev.Payload, &token) //nolint:errcheck
+			removedTokens[token] = true
+		case "ref":
+			var ref string
+			json.Unmarshal(ev.Payload, &ref) //nolint:errcheck
+			if ref != "" {
+				iss.Refs = append(iss.Refs, ref)
+			}
+		}
+	}
+
+	iss.Status = statusLWW.value
+	if iss.Status == "" {
+		iss.Status = "open"
+	}
+	iss.Title = titleLWW.value
+	iss.Body = bodyLWW.value
+
+	labelPresent := map[string]bool{}
+	for _, la := range labelAdds {
+		if !removedTokens[la.token] {
+			labelPresent[la.label] = true
+		}
+	}
+	for l := range labelPresent {
+		iss.Labels = append(iss.Labels, l)
+	}
+	sort.Strings(iss.Labels)
+
+	return iss
+}
+
+func (s *Store) GetIssue(issueID string) (Issue, error) {
+	evs, err := s.loadEvents(issueID)
+	if err != nil {
+		return Issue{}, err
+	}
+	if len(evs) == 0 {
+		return Issue{}, fmt.Errorf("issue not found: %s", issueID)
+	}
+	iss := Reduce(evs)
+	iss.ID = issueID
+	return iss, nil
+}
+
+type IssueStub struct {
+	ID     string
+	Status string
+	Title  string
+}
+
+func (s *Store) ListIssues() ([]IssueStub, error) {
+	rows, err := s.db.Query(
+		`SELECT DISTINCT issue_id FROM issue_events WHERE kind='create' ORDER BY rowid DESC`,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var ids []string
+	for rows.Next() {
+		var id string
+		rows.Scan(&id) //nolint:errcheck
+		ids = append(ids, id)
+	}
+	if err := rows.Err(); err != nil {
+		return nil, err
+	}
+
+	var stubs []IssueStub
+	for _, id := range ids {
+		evs, err := s.loadEvents(id)
+		if err != nil {
+			return nil, err
+		}
+		iss := Reduce(evs)
+		iss.ID = id
+		stubs = append(stubs, IssueStub{
+			ID:     iss.ID,
+			Status: iss.Status,
+			Title:  iss.Title,
+		})
+	}
+	return stubs, nil
+}
+
+func (s *Store) MergeEvents(evs []IssueEvent) error {
+	bodyIssues := map[string]bool{}
+
+	for _, ev := range evs {
+		res, err := s.db.Exec(
+			`INSERT OR IGNORE INTO issue_events (event_id,issue_id,hlc_ms,hlc_seq,kind,payload,author,created) VALUES (?,?,?,?,?,?,?,?)`,
+			ev.EventID, ev.IssueID, ev.HLCMS, ev.HLCSeq, ev.Kind, string(ev.Payload), ev.Author, ev.Created,
+		)
+		if err != nil {
+			return err
+		}
+		s.mu.Lock()
+		remote := HLC{ev.HLCMS, ev.HLCSeq}
+		if !remote.Less(s.hlc) {
+			s.hlc = remote
+			s.hlc.Seq++
+		}
+		s.mu.Unlock()
+
+		if ev.Kind == "body" {
+			if n, _ := res.RowsAffected(); n > 0 {
+				bodyIssues[ev.IssueID] = true
+			}
+		}
+	}
+
+	for issueID := range bodyIssues {
+		if err := s.detectBodyConflict(issueID); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *Store) detectBodyConflict(issueID string) error {
+	rows, err := s.db.Query(
+		`SELECT event_id, hlc_ms, hlc_seq, payload FROM issue_events
+		 WHERE issue_id=? AND kind='body' ORDER BY hlc_ms DESC, hlc_seq DESC`,
+		issueID,
+	)
+	if err != nil {
+		return err
+	}
+	type bodyEv struct {
+		eventID string
+		hlcMS   int64
+		hlcSeq  int
+		text    string
+	}
+	var bodies []bodyEv
+	for rows.Next() {
+		var b bodyEv
+		var raw string
+		if err := rows.Scan(&b.eventID, &b.hlcMS, &b.hlcSeq, &raw); err != nil {
+			rows.Close()
+			return err
+		}
+		json.Unmarshal([]byte(raw), &b.text) //nolint:errcheck
+		bodies = append(bodies, b)
+	}
+	rows.Close()
+	if err := rows.Err(); err != nil {
+		return err
+	}
+	if len(bodies) < 2 {
+		return nil
+	}
+
+	var existing int
+	_ = s.db.QueryRow(
+		`SELECT COUNT(*) FROM issue_events WHERE issue_id=? AND kind='body_conflict'`,
+		issueID,
+	).Scan(&existing)
+	if existing > 0 {
+		return nil
+	}
+
+	bc := BodyConflict{
+		BaseEventID: bodies[1].eventID,
+		OurEdit:     bodies[0].text,
+		TheirEdit:   bodies[1].text,
+	}
+	_, err = s.insertEvent(issueID, "body_conflict", bc, "")
+	return err
+}
+
+func (s *Store) AllEvents() ([]IssueEvent, error) {
+	rows, err := s.db.Query(
+		`SELECT event_id,issue_id,hlc_ms,hlc_seq,kind,payload,author,created FROM issue_events ORDER BY hlc_ms,hlc_seq`,
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var evs []IssueEvent
+	for rows.Next() {
+		var ev IssueEvent
+		var payload string
+		rows.Scan(&ev.EventID, &ev.IssueID, &ev.HLCMS, &ev.HLCSeq, &ev.Kind, &payload, &ev.Author, &ev.Created) //nolint:errcheck
+		ev.Payload = []byte(payload)
+		evs = append(evs, ev)
+	}
+	return evs, rows.Err()
+}

internal/issues/issues_test.go [A]
--- /dev/null
+++ b/internal/issues/issues_test.go
@@ -1,0 +1,266 @@
+package issues_test
+
+import (
+	"testing"
+	"time"
+
+	"arche/internal/issues"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func buildEvent(issueID, kind string, payload []byte, hlcMS int64, hlcSeq int) issues.IssueEvent {
+	return issues.IssueEvent{
+		EventID: kind + "-" + string(rune('a'+hlcSeq)),
+		IssueID: issueID,
+		HLCMS:   hlcMS,
+		HLCSeq:  hlcSeq,
+		Kind:    kind,
+		Payload: payload,
+		Author:  "test",
+		Created: time.Now().Unix(),
+	}
+}
+
+func mustJSON(s string) []byte { return []byte(`"` + s + `"`) }
+
+func TestReduce_BasicIssue(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "create", []byte(`{"id":"id1"}`), 1, 0),
+		buildEvent("id1", "title", mustJSON("hello"), 2, 0),
+		buildEvent("id1", "status", mustJSON("open"), 3, 0),
+		buildEvent("id1", "body", mustJSON("first body"), 4, 0),
+	}
+	iss := issues.Reduce(evs)
+	if iss.Title != "hello" {
+		t.Errorf("title: want %q got %q", "hello", iss.Title)
+	}
+	if iss.Status != "open" {
+		t.Errorf("status: want open got %q", iss.Status)
+	}
+	if iss.Body != "first body" {
+		t.Errorf("body: want %q got %q", "first body", iss.Body)
+	}
+}
+
+func TestReduce_StatusLWW(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "status", mustJSON("closed"), 10, 0),
+		buildEvent("id1", "status", mustJSON("open"), 5, 0),
+	}
+	iss := issues.Reduce(evs)
+	if iss.Status != "closed" {
+		t.Errorf("LWW: want closed got %q", iss.Status)
+	}
+}
+
+func TestReduce_TitleLWW_SameMS_SeqBreaks(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "title", mustJSON("first"), 10, 0),
+		buildEvent("id1", "title", mustJSON("second"), 10, 1),
+	}
+	iss := issues.Reduce(evs)
+	if iss.Title != "second" {
+		t.Errorf("seq tiebreak: want %q got %q", "second", iss.Title)
+	}
+}
+
+func TestReduce_Comments_AppendOrder(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "comment", mustJSON("first comment"), 1, 0),
+		buildEvent("id1", "comment", mustJSON("second comment"), 2, 0),
+		buildEvent("id1", "comment", mustJSON("third comment"), 3, 0),
+	}
+	iss := issues.Reduce(evs)
+	if len(iss.Comments) != 3 {
+		t.Fatalf("want 3 comments, got %d", len(iss.Comments))
+	}
+	if iss.Comments[0].Text != "first comment" {
+		t.Errorf("comment[0]: want %q got %q", "first comment", iss.Comments[0].Text)
+	}
+	if iss.Comments[2].Text != "third comment" {
+		t.Errorf("comment[2]: want %q got %q", "third comment", iss.Comments[2].Text)
+	}
+}
+
+func TestReduce_Labels_ORSet(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "label_add", []byte(`{"label":"bug","token":"tok1"}`), 1, 0),
+		buildEvent("id1", "label_add", []byte(`{"label":"urgent","token":"tok2"}`), 2, 0),
+		buildEvent("id1", "label_rm", mustJSON("tok1"), 3, 0),
+	}
+	iss := issues.Reduce(evs)
+	if len(iss.Labels) != 1 {
+		t.Fatalf("want 1 label, got %d: %v", len(iss.Labels), iss.Labels)
+	}
+	if iss.Labels[0] != "urgent" {
+		t.Errorf("label: want urgent got %q", iss.Labels[0])
+	}
+}
+
+func TestReduce_Labels_AddTwiceRemoveOnce(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "label_add", []byte(`{"label":"bug","token":"tok1"}`), 1, 0),
+		buildEvent("id1", "label_add", []byte(`{"label":"bug","token":"tok2"}`), 2, 0),
+		buildEvent("id1", "label_rm", mustJSON("tok1"), 3, 0),
+	}
+	iss := issues.Reduce(evs)
+	if len(iss.Labels) != 1 || iss.Labels[0] != "bug" {
+		t.Errorf("OR-set: bug should survive remove of tok1; got %v", iss.Labels)
+	}
+}
+
+func TestReduce_Refs_GrowOnly(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "ref", mustJSON("abc123"), 1, 0),
+		buildEvent("id1", "ref", mustJSON("def456"), 2, 0),
+	}
+	iss := issues.Reduce(evs)
+	if len(iss.Refs) != 2 {
+		t.Fatalf("want 2 refs, got %d", len(iss.Refs))
+	}
+}
+
+func TestReduce_BodyConflict(t *testing.T) {
+	bc := `{"BaseEventID":"base","OurEdit":"ours","TheirEdit":"theirs"}`
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "body_conflict", []byte(bc), 5, 0),
+	}
+	iss := issues.Reduce(evs)
+	if iss.BodyConflict == nil {
+		t.Fatal("expected BodyConflict, got nil")
+	}
+	if iss.BodyConflict.OurEdit != "ours" {
+		t.Errorf("OurEdit: want ours got %q", iss.BodyConflict.OurEdit)
+	}
+}
+
+func TestReduce_EmptyStatus_DefaultsOpen(t *testing.T) {
+	evs := []issues.IssueEvent{
+		buildEvent("id1", "create", []byte(`{"id":"id1"}`), 1, 0),
+	}
+	iss := issues.Reduce(evs)
+	if iss.Status != "open" {
+		t.Errorf("default status: want open got %q", iss.Status)
+	}
+}
+
+func openTestStore(t *testing.T) *issues.Store {
+	t.Helper()
+	s, err := issues.OpenForTesting()
+	if err != nil {
+		t.Fatalf("open store: %v", err)
+	}
+	t.Cleanup(func() { s.Close() })
+	return s
+}
+
+func TestStore_CreateAndGet(t *testing.T) {
+	s := openTestStore(t)
+
+	id, err := s.CreateIssue("fix the bug", "steps to reproduce", "alice")
+	if err != nil {
+		t.Fatalf("CreateIssue: %v", err)
+	}
+
+	iss, err := s.GetIssue(id)
+	if err != nil {
+		t.Fatalf("GetIssue: %v", err)
+	}
+	if iss.Title != "fix the bug" {
+		t.Errorf("title: want %q got %q", "fix the bug", iss.Title)
+	}
+	if iss.Status != "open" {
+		t.Errorf("status: want open got %q", iss.Status)
+	}
+	if iss.Body != "steps to reproduce" {
+		t.Errorf("body: want %q got %q", "steps to reproduce", iss.Body)
+	}
+}
+
+func TestStore_SetStatus(t *testing.T) {
+	s := openTestStore(t)
+	id, _ := s.CreateIssue("bug", "", "alice")
+
+	if err := s.SetStatus(id, "closed", "bob"); err != nil {
+		t.Fatalf("SetStatus: %v", err)
+	}
+	iss, _ := s.GetIssue(id)
+	if iss.Status != "closed" {
+		t.Errorf("want closed got %q", iss.Status)
+	}
+}
+
+func TestStore_AddComment(t *testing.T) {
+	s := openTestStore(t)
+	id, _ := s.CreateIssue("bug", "", "alice")
+
+	if err := s.AddComment(id, "this is a comment", "bob"); err != nil {
+		t.Fatalf("AddComment: %v", err)
+	}
+	iss, _ := s.GetIssue(id)
+	if len(iss.Comments) != 1 {
+		t.Fatalf("want 1 comment, got %d", len(iss.Comments))
+	}
+	if iss.Comments[0].Text != "this is a comment" {
+		t.Errorf("comment text: want %q got %q", "this is a comment", iss.Comments[0].Text)
+	}
+}
+
+func TestStore_ListIssues(t *testing.T) {
+	s := openTestStore(t)
+	s.CreateIssue("issue A", "", "alice") //nolint:errcheck
+	s.CreateIssue("issue B", "", "bob")   //nolint:errcheck
+
+	stubs, err := s.ListIssues()
+	if err != nil {
+		t.Fatalf("ListIssues: %v", err)
+	}
+	if len(stubs) != 2 {
+		t.Errorf("want 2 issues, got %d", len(stubs))
+	}
+}
+
+func TestStore_MergeEvents_Idempotent(t *testing.T) {
+	s := openTestStore(t)
+	id, _ := s.CreateIssue("bug", "body", "alice")
+
+	all, err := s.AllEvents()
+	if err != nil {
+		t.Fatalf("AllEvents: %v", err)
+	}
+
+	if err := s.MergeEvents(all); err != nil {
+		t.Fatalf("MergeEvents idempotent: %v", err)
+	}
+
+	iss, _ := s.GetIssue(id)
+	if iss.Title != "bug" {
+		t.Errorf("title after idempotent merge: want bug got %q", iss.Title)
+	}
+}
+
+func TestStore_MergeEvents_Union(t *testing.T) {
+	s1 := openTestStore(t)
+	s2 := openTestStore(t)
+
+	id, _ := s1.CreateIssue("shared bug", "", "alice")
+	s1.AddComment(id, "from s1", "alice") //nolint:errcheck
+
+	evs1, _ := s1.AllEvents()
+	s2.MergeEvents(evs1) //nolint:errcheck
+
+	s2.AddComment(id, "from s2", "bob") //nolint:errcheck
+
+	evs2, _ := s2.AllEvents()
+	s1.MergeEvents(evs2) //nolint:errcheck
+
+	iss1, _ := s1.GetIssue(id)
+	iss2, _ := s2.GetIssue(id)
+	if len(iss1.Comments) != 2 {
+		t.Errorf("s1 wants 2 comments, got %d", len(iss1.Comments))
+	}
+	if len(iss2.Comments) != 2 {
+		t.Errorf("s2 wants 2 comments, got %d", len(iss2.Comments))
+	}
+}

internal/issues/testing.go [A]
--- /dev/null
+++ b/internal/issues/testing.go
@@ -1,0 +1,35 @@
+package issues
+
+import (
+	"database/sql"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func OpenForTesting() (*Store, error) {
+	db, err := sql.Open("sqlite3", ":memory:")
+	if err != nil {
+		return nil, err
+	}
+	db.SetMaxOpenConns(1)
+	_, err = db.Exec(`
+CREATE TABLE issue_events (
+    event_id TEXT PRIMARY KEY,
+    issue_id TEXT NOT NULL,
+    hlc_ms   INTEGER NOT NULL,
+    hlc_seq  INTEGER NOT NULL,
+    kind     TEXT NOT NULL,
+    payload  TEXT NOT NULL,
+    author   TEXT NOT NULL DEFAULT '',
+    created  INTEGER NOT NULL
+);
+CREATE INDEX idx_ie_issue ON issue_events(issue_id);
+`)
+	if err != nil {
+		db.Close()
+		return nil, err
+	}
+	return &Store{db: db}, nil
+}
+
+func (s *Store) Close() error { return s.db.Close() }

internal/markdown/markdown.go [A]
--- /dev/null
+++ b/internal/markdown/markdown.go
@@ -1,0 +1,33 @@
+package markdown
+
+import (
+	"bytes"
+	"html/template"
+
+	"github.com/yuin/goldmark"
+	"github.com/yuin/goldmark/extension"
+	"github.com/yuin/goldmark/parser"
+	"github.com/yuin/goldmark/renderer/html"
+)
+
+var md = goldmark.New(
+	goldmark.WithExtensions(
+		extension.GFM,
+		extension.Footnote,
+	),
+	goldmark.WithParserOptions(
+		parser.WithAutoHeadingID(),
+	),
+	goldmark.WithRendererOptions(
+		html.WithHardWraps(),
+		html.WithXHTML(),
+	),
+)
+
+func Render(src string) template.HTML {
+	var buf bytes.Buffer
+	if err := md.Convert([]byte(src), &buf); err != nil {
+		return template.HTML("<pre>" + template.HTMLEscapeString(src) + "</pre>")
+	}
+	return template.HTML(buf.String())
+}

internal/merge/merge.go [A]
--- /dev/null
+++ b/internal/merge/merge.go
@@ -1,0 +1,306 @@
+package merge
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+
+	"github.com/sergi/go-diff/diffmatchpatch"
+)
+
+type MergeResult struct {
+	Content string
+	Clean   bool
+	Ours    string
+	Theirs  string
+}
+
+func MergeText(base, ours, theirs string) MergeResult {
+	if ours == theirs {
+		return MergeResult{Content: ours, Clean: true}
+	}
+	if base == ours {
+		return MergeResult{Content: theirs, Clean: true}
+	}
+	if base == theirs {
+		return MergeResult{Content: ours, Clean: true}
+	}
+
+	dmp := diffmatchpatch.New()
+
+	patches := dmp.PatchMake(base, ours)
+	result, applied := dmp.PatchApply(patches, theirs)
+
+	allApplied := true
+	for _, ok := range applied {
+		if !ok {
+			allApplied = false
+			break
+		}
+	}
+
+	if allApplied && !strings.Contains(result, "<<<<<<<") {
+		return MergeResult{Content: result, Clean: true}
+	}
+
+	conflict := fmt.Sprintf(
+		"<<<<<<< ours\n%s=======\n%s>>>>>>> theirs\n",
+		ensureNewline(ours),
+		ensureNewline(theirs),
+	)
+	return MergeResult{
+		Content: conflict,
+		Clean:   false,
+		Ours:    ours,
+		Theirs:  theirs,
+	}
+}
+
+type TreeMergeResult struct {
+	TreeID    [32]byte
+	Conflicts []string
+}
+
+func Trees(r *repo.Repo, base, ours, theirs [32]byte) (*TreeMergeResult, error) {
+	baseFiles := make(map[string][32]byte)
+	oursFiles := make(map[string][32]byte)
+	theirsFiles := make(map[string][32]byte)
+	oursMode := make(map[string]object.EntryMode)
+	theirsMode := make(map[string]object.EntryMode)
+
+	if base != object.ZeroID {
+		if err := flattenTree(r, base, "", baseFiles, nil); err != nil {
+			return nil, fmt.Errorf("merge base: %w", err)
+		}
+	}
+	if err := flattenTree(r, ours, "", oursFiles, oursMode); err != nil {
+		return nil, fmt.Errorf("merge ours: %w", err)
+	}
+	if err := flattenTree(r, theirs, "", theirsFiles, theirsMode); err != nil {
+		return nil, fmt.Errorf("merge theirs: %w", err)
+	}
+
+	allPaths := make(map[string]bool)
+	for p := range oursFiles {
+		allPaths[p] = true
+	}
+	for p := range theirsFiles {
+		allPaths[p] = true
+	}
+	for p := range baseFiles {
+		allPaths[p] = true
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, err
+	}
+
+	var mergedFiles []mergedFile
+	var conflictPaths []string
+
+	for path := range allPaths {
+		bBase := baseFiles[path]
+		bOurs := oursFiles[path]
+		bTheirs := theirsFiles[path]
+
+		switch {
+		case bOurs == bTheirs:
+			if bOurs != object.ZeroID {
+				mergedFiles = append(mergedFiles, mergedFile{path: path, blobID: bOurs, mode: oursMode[path]})
+			}
+
+		case bBase == bOurs:
+			if bTheirs != object.ZeroID {
+				mergedFiles = append(mergedFiles, mergedFile{path: path, blobID: bTheirs, mode: theirsMode[path]})
+			}
+
+		case bBase == bTheirs:
+			if bOurs != object.ZeroID {
+				mergedFiles = append(mergedFiles, mergedFile{path: path, blobID: bOurs, mode: oursMode[path]})
+			}
+
+		default:
+			if bOurs == object.ZeroID || bTheirs == object.ZeroID {
+				conflictPaths = append(conflictPaths, path)
+				conf := &object.Conflict{
+					Ours:   object.ConflictSide{BlobID: bOurs},
+					Theirs: object.ConflictSide{BlobID: bTheirs},
+				}
+				if bBase != object.ZeroID {
+					conf.Base = &object.ConflictSide{BlobID: bBase}
+				}
+				conflictID, err := repo.WriteConflictTx(r.Store, tx, conf)
+				if err != nil {
+					r.Store.Rollback(tx)
+					return nil, err
+				}
+				if err := r.Store.AddConflict(tx, path); err != nil {
+					r.Store.Rollback(tx)
+					return nil, err
+				}
+				mergedFiles = append(mergedFiles, mergedFile{path: path, blobID: conflictID, mode: object.ModeFile})
+				continue
+			}
+
+			baseContent, _ := readBlobStr(r, bBase)
+			oursContent, _ := readBlobStr(r, bOurs)
+			theirsContent, _ := readBlobStr(r, bTheirs)
+
+			result := MergeText(baseContent, oursContent, theirsContent)
+			if !result.Clean {
+				conflictPaths = append(conflictPaths, path)
+				conf := &object.Conflict{
+					Ours:   object.ConflictSide{BlobID: bOurs},
+					Theirs: object.ConflictSide{BlobID: bTheirs},
+				}
+				if bBase != object.ZeroID {
+					conf.Base = &object.ConflictSide{BlobID: bBase}
+				}
+				conflictID, err := repo.WriteConflictTx(r.Store, tx, conf)
+				if err != nil {
+					r.Store.Rollback(tx)
+					return nil, err
+				}
+				if err := r.Store.AddConflict(tx, path); err != nil {
+					r.Store.Rollback(tx)
+					return nil, err
+				}
+				mergedFiles = append(mergedFiles, mergedFile{path: path, blobID: conflictID, mode: oursMode[path]})
+			} else {
+				id, err := writeBlobTx(r, tx, []byte(result.Content))
+				if err != nil {
+					r.Store.Rollback(tx)
+					return nil, err
+				}
+				mergedFiles = append(mergedFiles, mergedFile{path: path, blobID: id, mode: oursMode[path]})
+			}
+		}
+	}
+
+	rootID, err := buildMergeTree(r, tx, mergedFiles)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, err
+	}
+
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, err
+	}
+	return &TreeMergeResult{TreeID: rootID, Conflicts: conflictPaths}, nil
+}
+
+func readBlobStr(r *repo.Repo, id [32]byte) (string, error) {
+	if id == object.ZeroID {
+		return "", nil
+	}
+	content, err := r.ReadBlob(id)
+	if err != nil {
+		return "", err
+	}
+	return string(content), nil
+}
+
+func writeBlobTx(r *repo.Repo, tx *store.Tx, content []byte) ([32]byte, error) {
+	b := &object.Blob{Content: content}
+	var buf bytes.Buffer
+	object.EncodeBlob(&buf, b)
+	id := object.HashBlob(b)
+	return id, r.Store.WriteObject(tx, id, string(object.KindBlob), buf.Bytes())
+}
+
+type mergedFile struct {
+	path   string
+	blobID [32]byte
+	mode   object.EntryMode
+}
+
+func buildMergeTree(r *repo.Repo, tx *store.Tx, files []mergedFile) ([32]byte, error) {
+	type node struct {
+		isFile   bool
+		blobID   [32]byte
+		mode     object.EntryMode
+		children map[string]*node
+	}
+	root := &node{children: make(map[string]*node)}
+
+	for _, f := range files {
+		parts := strings.Split(f.path, "/")
+		cur := root
+		for i, part := range parts {
+			if i == len(parts)-1 {
+				cur.children[part] = &node{isFile: true, blobID: f.blobID, mode: f.mode}
+			} else {
+				if _, ok := cur.children[part]; !ok {
+					cur.children[part] = &node{children: make(map[string]*node)}
+				}
+				cur = cur.children[part]
+			}
+		}
+	}
+
+	var writeNode func(n *node) ([32]byte, error)
+	writeNode = func(n *node) ([32]byte, error) {
+		var entries []object.TreeEntry
+		for name, child := range n.children {
+			if child.isFile {
+				entries = append(entries, object.TreeEntry{Name: name, Mode: child.mode, ObjectID: child.blobID})
+			} else {
+				subID, err := writeNode(child)
+				if err != nil {
+					return object.ZeroID, err
+				}
+				entries = append(entries, object.TreeEntry{Name: name, Mode: object.ModeDir, ObjectID: subID})
+			}
+		}
+		sort.Slice(entries, func(i, j int) bool { return entries[i].Name < entries[j].Name })
+		t := &object.Tree{Entries: entries}
+		id, err := repo.WriteTreeTx(r.Store, tx, t)
+		return id, err
+	}
+
+	return writeNode(root)
+}
+
+func flattenTree(r *repo.Repo, treeID [32]byte, prefix string, blobs map[string][32]byte, modes map[string]object.EntryMode) error {
+	if treeID == object.ZeroID {
+		return nil
+	}
+	t, err := r.ReadTree(treeID)
+	if err != nil {
+		return err
+	}
+	for _, e := range t.Entries {
+		rel := joinPath(prefix, e.Name)
+		if e.Mode == object.ModeDir {
+			if err := flattenTree(r, e.ObjectID, rel, blobs, modes); err != nil {
+				return err
+			}
+		} else {
+			blobs[rel] = e.ObjectID
+			if modes != nil {
+				modes[rel] = e.Mode
+			}
+		}
+	}
+	return nil
+}
+
+func joinPath(prefix, name string) string {
+	if prefix == "" {
+		return name
+	}
+	return prefix + "/" + name
+}
+
+func ensureNewline(s string) string {
+	if s != "" && !strings.HasSuffix(s, "\n") {
+		return s + "\n"
+	}
+	return s
+}

internal/merge/merge_test.go [A]
--- /dev/null
+++ b/internal/merge/merge_test.go
@@ -1,0 +1,92 @@
+package merge_test
+
+import (
+	"strings"
+	"testing"
+
+	"arche/internal/merge"
+)
+
+func TestMergeText_Equal(t *testing.T) {
+	r := merge.MergeText("base", "same", "same")
+	if !r.Clean {
+		t.Error("equal ours/theirs should be clean")
+	}
+	if r.Content != "same" {
+		t.Errorf("got %q, want %q", r.Content, "same")
+	}
+}
+
+func TestMergeText_FastForwardOurs(t *testing.T) {
+	r := merge.MergeText("base", "base", "theirs-edit")
+	if !r.Clean {
+		t.Error("base==ours should be clean fast-forward")
+	}
+	if r.Content != "theirs-edit" {
+		t.Errorf("got %q, want %q", r.Content, "theirs-edit")
+	}
+}
+
+func TestMergeText_FastForwardTheirs(t *testing.T) {
+	r := merge.MergeText("base", "ours-edit", "base")
+	if !r.Clean {
+		t.Error("base==theirs should be clean fast-forward")
+	}
+	if r.Content != "ours-edit" {
+		t.Errorf("got %q, want %q", r.Content, "ours-edit")
+	}
+}
+
+func TestMergeText_NonConflictingEdits(t *testing.T) {
+	base := "line1\nline2\nline3\n"
+	ours := "line1\nLINE2-OURS\nline3\n"
+	theirs := "line1\nline2\nLINE3-THEIRS\n"
+	r := merge.MergeText(base, ours, theirs)
+	if !r.Clean {
+		t.Logf("merge produced conflict (patch may not have applied cleanly): %q", r.Content)
+		return
+	}
+	if !strings.Contains(r.Content, "LINE2-OURS") {
+		t.Error("merged content missing LINE2-OURS")
+	}
+	if !strings.Contains(r.Content, "LINE3-THEIRS") {
+		t.Error("merged content missing LINE3-THEIRS")
+	}
+}
+
+func TestMergeText_Conflict(t *testing.T) {
+	base := "shared line\n"
+	ours := "ours changed this\n"
+	theirs := "theirs changed this\n"
+	r := merge.MergeText(base, ours, theirs)
+	if r.Clean {
+		t.Logf("no conflict detected; merged content: %q", r.Content)
+		return
+	}
+	if !strings.Contains(r.Content, "<<<<<<<") {
+		t.Errorf("expected conflict markers in: %q", r.Content)
+	}
+	if r.Ours != ours {
+		t.Errorf("Ours: got %q, want %q", r.Ours, ours)
+	}
+	if r.Theirs != theirs {
+		t.Errorf("Theirs: got %q, want %q", r.Theirs, theirs)
+	}
+}
+
+func TestMergeText_AllSameAsBase(t *testing.T) {
+	r := merge.MergeText("content", "content", "content")
+	if !r.Clean {
+		t.Error("identical base/ours/theirs should be clean")
+	}
+	if r.Content != "content" {
+		t.Errorf("got %q, want %q", r.Content, "content")
+	}
+}
+
+func TestMergeText_EmptyBase(t *testing.T) {
+	r := merge.MergeText("", "add-ours\n", "add-theirs\n")
+	if r.Content == "" && r.Clean {
+		t.Error("merging two non-empty strings into empty base should produce non-empty result")
+	}
+}

internal/object/changeid.go [A]
--- /dev/null
+++ b/internal/object/changeid.go
@@ -1,0 +1,33 @@
+package object
+
+import (
+	"crypto/rand"
+	"fmt"
+)
+
+const unambiguousAlphabet = "abcdefghjkmnpqrstvwxyz"
+
+func NewChangeID(length int) string {
+	b := make([]byte, length)
+	_, err := rand.Read(b)
+	if err != nil {
+		panic(fmt.Sprintf("change ID: rand.Read failed: %v", err))
+	}
+	out := make([]byte, length)
+	al := byte(len(unambiguousAlphabet))
+	for i, byt := range b {
+		out[i] = unambiguousAlphabet[byt%al]
+	}
+	return string(out)
+}
+
+func FormatChangeID(id string) string {
+	return "ch:" + id
+}
+
+func StripChangeIDPrefix(id string) string {
+	if len(id) >= 3 && id[:3] == "ch:" {
+		return id[3:]
+	}
+	return id
+}

internal/object/encode.go [A]
--- /dev/null
+++ b/internal/object/encode.go
@@ -1,0 +1,478 @@
+package object
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"sort"
+	"time"
+
+	"github.com/zeebo/blake3"
+)
+
+func HashBlob(b *Blob) [32]byte {
+	var buf bytes.Buffer
+	EncodeBlob(&buf, b)
+	return blake3.Sum256(buf.Bytes())
+}
+
+func HashTree(t *Tree) [32]byte {
+	var buf bytes.Buffer
+	EncodeTree(&buf, t)
+	return blake3.Sum256(buf.Bytes())
+}
+
+func HashCommit(c *Commit) [32]byte {
+	var buf bytes.Buffer
+	EncodeCommit(&buf, c)
+	return blake3.Sum256(buf.Bytes())
+}
+
+func HashConflict(c *Conflict) [32]byte {
+	var buf bytes.Buffer
+	EncodeConflict(&buf, c)
+	return blake3.Sum256(buf.Bytes())
+}
+
+func HashObsolete(o *ObsoleteMarker) [32]byte {
+	var buf bytes.Buffer
+	EncodeObsolete(&buf, o)
+	return blake3.Sum256(buf.Bytes())
+}
+
+func EncodeBlob(w *bytes.Buffer, b *Blob) {
+	w.WriteString("arche-blob\x00")
+	writeUint64(w, uint64(len(b.Content)))
+	w.Write(b.Content)
+}
+
+func EncodeTree(w *bytes.Buffer, t *Tree) {
+	entries := make([]TreeEntry, len(t.Entries))
+	copy(entries, t.Entries)
+	sort.Slice(entries, func(i, j int) bool { return entries[i].Name < entries[j].Name })
+
+	w.WriteString("arche-tree\x00")
+	writeUint32(w, uint32(len(entries)))
+	for _, e := range entries {
+		w.WriteByte(byte(e.Mode))
+		writeUint16(w, uint16(len(e.Name)))
+		w.WriteString(e.Name)
+		w.Write(e.ObjectID[:])
+
+		keys := make([]string, 0, len(e.Props))
+		for k := range e.Props {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		writeUint16(w, uint16(len(keys)))
+		for _, k := range keys {
+			v := e.Props[k]
+			writeUint16(w, uint16(len(k)))
+			w.WriteString(k)
+			writeUint16(w, uint16(len(v)))
+			w.WriteString(v)
+		}
+	}
+}
+
+func EncodeCommit(w *bytes.Buffer, c *Commit) {
+	w.WriteString("arche-commit\x00")
+	w.Write(c.TreeID[:])
+	w.WriteByte(byte(len(c.Parents)))
+	for _, p := range c.Parents {
+		w.Write(p[:])
+	}
+	w.WriteByte(byte(len(c.ChangeID)))
+	w.WriteString(c.ChangeID)
+	w.WriteByte(byte(c.Phase))
+	encodeSignature(w, c.Author)
+	encodeSignature(w, c.Committer)
+	writeUint32(w, uint32(len(c.Message)))
+	w.WriteString(c.Message)
+	if len(c.CommitSig) > 0 {
+		w.WriteByte(1)
+		writeUint16(w, uint16(len(c.CommitSig)))
+		w.Write(c.CommitSig)
+	}
+}
+
+func CommitBodyForSigning(c *Commit) []byte {
+	tmp := *c
+	tmp.CommitSig = nil
+	var buf bytes.Buffer
+	EncodeCommit(&buf, &tmp)
+	return buf.Bytes()
+}
+
+func EncodeConflict(w *bytes.Buffer, c *Conflict) {
+	w.WriteString("arche-conflict\x00")
+	if c.Base != nil {
+		w.WriteByte(1)
+		w.Write(c.Base.CommitID[:])
+		w.Write(c.Base.BlobID[:])
+	} else {
+		w.WriteByte(0)
+	}
+	w.Write(c.Ours.CommitID[:])
+	w.Write(c.Ours.BlobID[:])
+	w.Write(c.Theirs.CommitID[:])
+	w.Write(c.Theirs.BlobID[:])
+}
+
+func EncodeObsolete(w *bytes.Buffer, o *ObsoleteMarker) {
+	w.WriteString("arche-obsolete\x00")
+	w.Write(o.Predecessor[:])
+	w.WriteByte(byte(len(o.Successors)))
+	for _, s := range o.Successors {
+		w.Write(s[:])
+	}
+	w.WriteByte(byte(len(o.Reason)))
+	w.WriteString(o.Reason)
+	writeInt64(w, o.Timestamp)
+}
+
+func DecodeBlob(data []byte) (*Blob, error) {
+	prefix := "arche-blob\x00"
+	if !bytes.HasPrefix(data, []byte(prefix)) {
+		return nil, fmt.Errorf("invalid blob prefix")
+	}
+	data = data[len(prefix):]
+	if len(data) < 8 {
+		return nil, fmt.Errorf("blob: length field truncated")
+	}
+	l := binary.BigEndian.Uint64(data[:8])
+	data = data[8:]
+	if uint64(len(data)) < l {
+		return nil, fmt.Errorf("blob: content truncated (want %d bytes, got %d)", l, len(data))
+	}
+	content := make([]byte, l)
+	copy(content, data[:l])
+	return &Blob{Content: content}, nil
+}
+
+func DecodeTree(data []byte) (*Tree, error) {
+	prefix := "arche-tree\x00"
+	if !bytes.HasPrefix(data, []byte(prefix)) {
+		return nil, fmt.Errorf("invalid tree prefix")
+	}
+	data = data[len(prefix):]
+	if len(data) < 4 {
+		return nil, fmt.Errorf("tree: entry count truncated")
+	}
+	n := binary.BigEndian.Uint32(data[:4])
+	data = data[4:]
+
+	entries := make([]TreeEntry, 0, n)
+	for i := uint32(0); i < n; i++ {
+		if len(data) < 1 {
+			return nil, fmt.Errorf("tree entry %d: truncated at mode", i)
+		}
+		mode := EntryMode(data[0])
+		data = data[1:]
+
+		if len(data) < 2 {
+			return nil, fmt.Errorf("tree entry %d: name len truncated", i)
+		}
+		nameLen := binary.BigEndian.Uint16(data[:2])
+		data = data[2:]
+		if len(data) < int(nameLen) {
+			return nil, fmt.Errorf("tree entry %d: name truncated", i)
+		}
+		name := string(data[:nameLen])
+		data = data[nameLen:]
+
+		if len(data) < 32 {
+			return nil, fmt.Errorf("tree entry %d: object ID truncated", i)
+		}
+		var oid [32]byte
+		copy(oid[:], data[:32])
+		data = data[32:]
+
+		if len(data) < 2 {
+			return nil, fmt.Errorf("tree entry %d: props count truncated", i)
+		}
+		numProps := binary.BigEndian.Uint16(data[:2])
+		data = data[2:]
+
+		props := make(map[string]string, numProps)
+		for j := uint16(0); j < numProps; j++ {
+			if len(data) < 2 {
+				return nil, fmt.Errorf("tree entry %d prop %d: key len truncated", i, j)
+			}
+			kl := binary.BigEndian.Uint16(data[:2])
+			data = data[2:]
+			if len(data) < int(kl) {
+				return nil, fmt.Errorf("tree entry %d prop %d: key truncated", i, j)
+			}
+			k := string(data[:kl])
+			data = data[kl:]
+
+			if len(data) < 2 {
+				return nil, fmt.Errorf("tree entry %d prop %d: val len truncated", i, j)
+			}
+			vl := binary.BigEndian.Uint16(data[:2])
+			data = data[2:]
+			if len(data) < int(vl) {
+				return nil, fmt.Errorf("tree entry %d prop %d: val truncated", i, j)
+			}
+			props[k] = string(data[:vl])
+			data = data[vl:]
+		}
+
+		entries = append(entries, TreeEntry{Name: name, Mode: mode, ObjectID: oid, Props: props})
+	}
+
+	for i := 1; i < len(entries); i++ {
+		if entries[i].Name <= entries[i-1].Name {
+			return nil, fmt.Errorf("tree entries not correctly sorted at index %d", i)
+		}
+	}
+	return &Tree{Entries: entries}, nil
+}
+
+func DecodeCommit(data []byte) (*Commit, error) {
+	prefix := "arche-commit\x00"
+	if !bytes.HasPrefix(data, []byte(prefix)) {
+		return nil, fmt.Errorf("invalid commit prefix")
+	}
+	data = data[len(prefix):]
+
+	if len(data) < 32 {
+		return nil, fmt.Errorf("commit: tree ID truncated")
+	}
+	var treeID [32]byte
+	copy(treeID[:], data[:32])
+	data = data[32:]
+
+	if len(data) < 1 {
+		return nil, fmt.Errorf("commit: parent count truncated")
+	}
+	np := int(data[0])
+	data = data[1:]
+	parents := make([][32]byte, np)
+	for i := range parents {
+		if len(data) < 32 {
+			return nil, fmt.Errorf("commit: parent %d truncated", i)
+		}
+		copy(parents[i][:], data[:32])
+		data = data[32:]
+	}
+
+	if len(data) < 1 {
+		return nil, fmt.Errorf("commit: change ID len truncated")
+	}
+	cidLen := int(data[0])
+	data = data[1:]
+	if len(data) < cidLen {
+		return nil, fmt.Errorf("commit: change ID truncated")
+	}
+	changeID := string(data[:cidLen])
+	data = data[cidLen:]
+
+	if len(data) < 1 {
+		return nil, fmt.Errorf("commit: phase truncated")
+	}
+	phase := Phase(data[0])
+	data = data[1:]
+
+	author, data, err := decodeSignature(data)
+	if err != nil {
+		return nil, fmt.Errorf("commit author: %w", err)
+	}
+	committer, data, err := decodeSignature(data)
+	if err != nil {
+		return nil, fmt.Errorf("commit committer: %w", err)
+	}
+
+	if len(data) < 4 {
+		return nil, fmt.Errorf("commit: message len truncated")
+	}
+	ml := binary.BigEndian.Uint32(data[:4])
+	data = data[4:]
+	if uint32(len(data)) < ml {
+		return nil, fmt.Errorf("commit: message truncated")
+	}
+	msg := string(data[:ml])
+	data = data[ml:]
+
+	var commitSig []byte
+	if len(data) >= 1 && data[0] == 1 {
+		data = data[1:]
+		if len(data) < 2 {
+			return nil, fmt.Errorf("commit: signature length truncated")
+		}
+		sigLen := binary.BigEndian.Uint16(data[:2])
+		data = data[2:]
+		if len(data) < int(sigLen) {
+			return nil, fmt.Errorf("commit: signature truncated")
+		}
+		commitSig = make([]byte, sigLen)
+		copy(commitSig, data[:sigLen])
+	}
+
+	return &Commit{
+		TreeID:    treeID,
+		Parents:   parents,
+		ChangeID:  changeID,
+		Author:    author,
+		Committer: committer,
+		Message:   msg,
+		Phase:     phase,
+		CommitSig: commitSig,
+	}, nil
+}
+
+func DecodeConflict(data []byte) (*Conflict, error) {
+	prefix := "arche-conflict\x00"
+	if !bytes.HasPrefix(data, []byte(prefix)) {
+		return nil, fmt.Errorf("invalid conflict prefix")
+	}
+	data = data[len(prefix):]
+
+	if len(data) < 1 {
+		return nil, fmt.Errorf("conflict: base flag truncated")
+	}
+	hasBase := data[0] == 1
+	data = data[1:]
+
+	c := &Conflict{}
+	if hasBase {
+		if len(data) < 64 {
+			return nil, fmt.Errorf("conflict: base side truncated")
+		}
+		var base ConflictSide
+		copy(base.CommitID[:], data[:32])
+		copy(base.BlobID[:], data[32:64])
+		data = data[64:]
+		c.Base = &base
+	}
+
+	if len(data) < 128 {
+		return nil, fmt.Errorf("conflict: ours/theirs truncated")
+	}
+	copy(c.Ours.CommitID[:], data[:32])
+	copy(c.Ours.BlobID[:], data[32:64])
+	copy(c.Theirs.CommitID[:], data[64:96])
+	copy(c.Theirs.BlobID[:], data[96:128])
+	return c, nil
+}
+
+func DecodeObsolete(data []byte) (*ObsoleteMarker, error) {
+	prefix := "arche-obsolete\x00"
+	if !bytes.HasPrefix(data, []byte(prefix)) {
+		return nil, fmt.Errorf("invalid obsolete prefix")
+	}
+	data = data[len(prefix):]
+
+	if len(data) < 32 {
+		return nil, fmt.Errorf("obsolete: predecessor truncated")
+	}
+	var pred [32]byte
+	copy(pred[:], data[:32])
+	data = data[32:]
+
+	if len(data) < 1 {
+		return nil, fmt.Errorf("obsolete: successor count truncated")
+	}
+	ns := int(data[0])
+	data = data[1:]
+	succs := make([][32]byte, ns)
+	for i := range succs {
+		if len(data) < 32 {
+			return nil, fmt.Errorf("obsolete: successor %d truncated", i)
+		}
+		copy(succs[i][:], data[:32])
+		data = data[32:]
+	}
+
+	if len(data) < 1 {
+		return nil, fmt.Errorf("obsolete: reason len truncated")
+	}
+	rl := int(data[0])
+	data = data[1:]
+	if len(data) < rl {
+		return nil, fmt.Errorf("obsolete: reason truncated")
+	}
+	reason := string(data[:rl])
+	data = data[rl:]
+
+	if len(data) < 8 {
+		return nil, fmt.Errorf("obsolete: timestamp truncated")
+	}
+	ts := int64(binary.BigEndian.Uint64(data[:8]))
+
+	return &ObsoleteMarker{
+		Predecessor: pred,
+		Successors:  succs,
+		Reason:      reason,
+		Timestamp:   ts,
+	}, nil
+}
+
+func encodeSignature(w *bytes.Buffer, s Signature) {
+	writeUint16(w, uint16(len(s.Name)))
+	w.WriteString(s.Name)
+	writeUint16(w, uint16(len(s.Email)))
+	w.WriteString(s.Email)
+	writeInt64(w, s.Timestamp.UnixNano())
+}
+
+func decodeSignature(data []byte) (Signature, []byte, error) {
+	if len(data) < 2 {
+		return Signature{}, nil, fmt.Errorf("name len truncated")
+	}
+	nl := binary.BigEndian.Uint16(data[:2])
+	data = data[2:]
+	if len(data) < int(nl) {
+		return Signature{}, nil, fmt.Errorf("name truncated")
+	}
+	name := string(data[:nl])
+	data = data[nl:]
+
+	if len(data) < 2 {
+		return Signature{}, nil, fmt.Errorf("email len truncated")
+	}
+	el := binary.BigEndian.Uint16(data[:2])
+	data = data[2:]
+	if len(data) < int(el) {
+		return Signature{}, nil, fmt.Errorf("email truncated")
+	}
+	email := string(data[:el])
+	data = data[el:]
+
+	if len(data) < 8 {
+		return Signature{}, nil, fmt.Errorf("timestamp truncated")
+	}
+	unixNs := int64(binary.BigEndian.Uint64(data[:8]))
+	data = data[8:]
+
+	return Signature{
+		Name:      name,
+		Email:     email,
+		Timestamp: time.Unix(0, unixNs).UTC(),
+	}, data, nil
+}
+
+func writeUint64(w *bytes.Buffer, v uint64) {
+	var b [8]byte
+	binary.BigEndian.PutUint64(b[:], v)
+	w.Write(b[:])
+}
+
+func writeUint32(w *bytes.Buffer, v uint32) {
+	var b [4]byte
+	binary.BigEndian.PutUint32(b[:], v)
+	w.Write(b[:])
+}
+
+func writeUint16(w *bytes.Buffer, v uint16) {
+	var b [2]byte
+	binary.BigEndian.PutUint16(b[:], v)
+	w.Write(b[:])
+}
+
+func writeInt64(w *bytes.Buffer, v int64) {
+	var b [8]byte
+	binary.BigEndian.PutUint64(b[:], uint64(v))
+	w.Write(b[:])
+}

internal/object/encode_test.go [A]
--- /dev/null
+++ b/internal/object/encode_test.go
@@ -1,0 +1,417 @@
+package object_test
+
+import (
+	"bytes"
+	"testing"
+	"time"
+
+	"arche/internal/object"
+)
+
+func TestBlobRoundtrip(t *testing.T) {
+	b := &object.Blob{Content: []byte("hello Arche!\n")}
+	var buf bytes.Buffer
+	object.EncodeBlob(&buf, b)
+	got, err := object.DecodeBlob(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeBlob: %v", err)
+	}
+	if !bytes.Equal(got.Content, b.Content) {
+		t.Errorf("content mismatch: got %q, want %q", got.Content, b.Content)
+	}
+}
+
+func TestBlobEmpty(t *testing.T) {
+	b := &object.Blob{Content: []byte{}}
+	var buf bytes.Buffer
+	object.EncodeBlob(&buf, b)
+	got, err := object.DecodeBlob(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeBlob: %v", err)
+	}
+	if !bytes.Equal(got.Content, b.Content) {
+		t.Error("empty blob roundtrip failed")
+	}
+}
+
+func TestBlobHashDeterministic(t *testing.T) {
+	b := &object.Blob{Content: []byte("deterministic")}
+	h1 := object.HashBlob(b)
+	h2 := object.HashBlob(b)
+	if h1 != h2 {
+		t.Error("blob hash is non-deterministic")
+	}
+}
+
+func TestBlobDifferentContentDifferentHash(t *testing.T) {
+	h1 := object.HashBlob(&object.Blob{Content: []byte("a")})
+	h2 := object.HashBlob(&object.Blob{Content: []byte("b")})
+	if h1 == h2 {
+		t.Error("different blobs have same hash")
+	}
+}
+
+func TestBlobNullBytes(t *testing.T) {
+	b := &object.Blob{Content: []byte{0x00, 0x01, 0x02, 0xFF}}
+	var buf bytes.Buffer
+	object.EncodeBlob(&buf, b)
+	got, err := object.DecodeBlob(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeBlob: %v", err)
+	}
+	if !bytes.Equal(got.Content, b.Content) {
+		t.Error("binary content mismatch")
+	}
+}
+
+func TestTreeRoundtrip(t *testing.T) {
+	var id1, id2 [32]byte
+	id1[0] = 0xAA
+	id2[0] = 0xBB
+
+	tree := &object.Tree{
+		Entries: []object.TreeEntry{
+			{Name: "zebra.txt", Mode: object.ModeFile, ObjectID: id2},
+			{
+				Name:     "alpha.go",
+				Mode:     object.ModeExec,
+				ObjectID: id1,
+				Props:    map[string]string{"svn:eol-style": "LF"},
+			},
+		},
+	}
+	var buf bytes.Buffer
+	object.EncodeTree(&buf, tree)
+	got, err := object.DecodeTree(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeTree: %v", err)
+	}
+	if len(got.Entries) != 2 {
+		t.Fatalf("entries: got %d, want 2", len(got.Entries))
+	}
+	if got.Entries[0].Name != "alpha.go" {
+		t.Errorf("first entry: got %q, want alpha.go", got.Entries[0].Name)
+	}
+	if got.Entries[1].Name != "zebra.txt" {
+		t.Errorf("second entry: got %q, want zebra.txt", got.Entries[1].Name)
+	}
+	if got.Entries[0].ObjectID != id1 {
+		t.Error("ObjectID mismatch for alpha.go")
+	}
+	if got.Entries[0].Props["svn:eol-style"] != "LF" {
+		t.Errorf("prop mismatch: got %q", got.Entries[0].Props["svn:eol-style"])
+	}
+}
+
+func TestTreeEmpty(t *testing.T) {
+	tree := &object.Tree{Entries: nil}
+	var buf bytes.Buffer
+	object.EncodeTree(&buf, tree)
+	got, err := object.DecodeTree(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeTree: %v", err)
+	}
+	if len(got.Entries) != 0 {
+		t.Errorf("expected empty tree, got %d entries", len(got.Entries))
+	}
+}
+
+func TestTreeHashDeterministic(t *testing.T) {
+	var id [32]byte
+	id[0] = 0x42
+	tree := &object.Tree{
+		Entries: []object.TreeEntry{
+			{Name: "file.txt", Mode: object.ModeFile, ObjectID: id},
+		},
+	}
+	h1 := object.HashTree(tree)
+	h2 := object.HashTree(tree)
+	if h1 != h2 {
+		t.Error("tree hash is non-deterministic")
+	}
+}
+
+func TestTreeSortedEntriesProduceSameHash(t *testing.T) {
+	var id1, id2 [32]byte
+	id1[0] = 1
+	id2[0] = 2
+	t1 := &object.Tree{Entries: []object.TreeEntry{
+		{Name: "a.go", Mode: object.ModeFile, ObjectID: id1},
+		{Name: "b.go", Mode: object.ModeFile, ObjectID: id2},
+	}}
+	t2 := &object.Tree{Entries: []object.TreeEntry{
+		{Name: "b.go", Mode: object.ModeFile, ObjectID: id2},
+		{Name: "a.go", Mode: object.ModeFile, ObjectID: id1},
+	}}
+	if object.HashTree(t1) != object.HashTree(t2) {
+		t.Error("trees with same entries in different order should produce same hash (encoder sorts)")
+	}
+}
+
+func TestTreeWithSubdir(t *testing.T) {
+	var fileID [32]byte
+	fileID[0] = 0xCC
+	tree := &object.Tree{
+		Entries: []object.TreeEntry{
+			{Name: "src", Mode: object.ModeDir, ObjectID: fileID},
+			{Name: "README.md", Mode: object.ModeFile, ObjectID: fileID},
+		},
+	}
+	var buf bytes.Buffer
+	object.EncodeTree(&buf, tree)
+	got, err := object.DecodeTree(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeTree: %v", err)
+	}
+	if len(got.Entries) != 2 {
+		t.Fatalf("expected 2 entries, got %d", len(got.Entries))
+	}
+}
+
+func TestCommitRoundtrip(t *testing.T) {
+	var treeID [32]byte
+	treeID[0] = 0x11
+	now := time.Unix(1700000000, 0).UTC()
+	c := &object.Commit{
+		TreeID:    treeID,
+		Parents:   nil,
+		ChangeID:  "kptxoyvr",
+		Author:    object.Signature{Name: "Alice", Email: "alice@example.com", Timestamp: now},
+		Committer: object.Signature{Name: "Bob", Email: "bob@example.com", Timestamp: now},
+		Message:   "initial commit",
+		Phase:     object.PhaseDraft,
+	}
+	var buf bytes.Buffer
+	object.EncodeCommit(&buf, c)
+	got, err := object.DecodeCommit(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeCommit: %v", err)
+	}
+	if got.TreeID != c.TreeID {
+		t.Error("TreeID mismatch")
+	}
+	if got.ChangeID != c.ChangeID {
+		t.Errorf("ChangeID: got %q, want %q", got.ChangeID, c.ChangeID)
+	}
+	if got.Author.Name != c.Author.Name {
+		t.Errorf("Author.Name: got %q, want %q", got.Author.Name, c.Author.Name)
+	}
+	if got.Author.Email != c.Author.Email {
+		t.Errorf("Author.Email: got %q, want %q", got.Author.Email, c.Author.Email)
+	}
+	if !got.Author.Timestamp.Equal(c.Author.Timestamp) {
+		t.Errorf("Author.Timestamp: got %v, want %v", got.Author.Timestamp, c.Author.Timestamp)
+	}
+	if got.Message != c.Message {
+		t.Errorf("Message: got %q, want %q", got.Message, c.Message)
+	}
+	if got.Phase != c.Phase {
+		t.Errorf("Phase: got %v, want %v", got.Phase, c.Phase)
+	}
+}
+
+func TestCommitWithParents(t *testing.T) {
+	var p1, p2 [32]byte
+	p1[0] = 1
+	p2[0] = 2
+	now := time.Unix(1700000000, 0).UTC()
+	c := &object.Commit{
+		Parents:   [][32]byte{p1, p2},
+		ChangeID:  "abcdefgh",
+		Author:    object.Signature{Name: "Test", Email: "t@t.com", Timestamp: now},
+		Committer: object.Signature{Name: "Test", Email: "t@t.com", Timestamp: now},
+		Phase:     object.PhasePublic,
+	}
+	var buf bytes.Buffer
+	object.EncodeCommit(&buf, c)
+	got, err := object.DecodeCommit(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeCommit: %v", err)
+	}
+	if len(got.Parents) != 2 {
+		t.Fatalf("parents: got %d, want 2", len(got.Parents))
+	}
+	if got.Parents[0] != p1 || got.Parents[1] != p2 {
+		t.Error("parent IDs mismatch")
+	}
+}
+
+func TestCommitHashDeterministic(t *testing.T) {
+	now := time.Unix(1700000000, 0).UTC()
+	c := &object.Commit{
+		ChangeID:  "aaaaaaaa",
+		Author:    object.Signature{Name: "X", Email: "x@x.com", Timestamp: now},
+		Committer: object.Signature{Name: "X", Email: "x@x.com", Timestamp: now},
+		Message:   "msg",
+		Phase:     object.PhaseDraft,
+	}
+	h1 := object.HashCommit(c)
+	h2 := object.HashCommit(c)
+	if h1 != h2 {
+		t.Error("commit hash non-deterministic")
+	}
+}
+
+func TestObsoleteMarkerRoundtrip(t *testing.T) {
+	var pred, succ [32]byte
+	pred[0] = 0x01
+	succ[0] = 0x02
+	o := &object.ObsoleteMarker{
+		Predecessor: pred,
+		Successors:  [][32]byte{succ},
+		Reason:      "rebase",
+		Timestamp:   1700000000,
+	}
+	var buf bytes.Buffer
+	object.EncodeObsolete(&buf, o)
+	got, err := object.DecodeObsolete(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeObsolete: %v", err)
+	}
+	if got.Predecessor != o.Predecessor {
+		t.Error("Predecessor mismatch")
+	}
+	if len(got.Successors) != 1 || got.Successors[0] != succ {
+		t.Error("Successors mismatch")
+	}
+	if got.Reason != o.Reason {
+		t.Errorf("Reason: got %q, want %q", got.Reason, o.Reason)
+	}
+	if got.Timestamp != o.Timestamp {
+		t.Errorf("Timestamp: got %d, want %d", got.Timestamp, o.Timestamp)
+	}
+}
+
+func TestObsoleteMarkerMultipleSuccessors(t *testing.T) {
+	var pred, s1, s2 [32]byte
+	pred[0] = 1
+	s1[0] = 2
+	s2[0] = 3
+	o := &object.ObsoleteMarker{
+		Predecessor: pred,
+		Successors:  [][32]byte{s1, s2},
+		Reason:      "split",
+		Timestamp:   0,
+	}
+	var buf bytes.Buffer
+	object.EncodeObsolete(&buf, o)
+	got, err := object.DecodeObsolete(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeObsolete: %v", err)
+	}
+	if len(got.Successors) != 2 {
+		t.Fatalf("successors: got %d, want 2", len(got.Successors))
+	}
+}
+
+func TestConflictRoundtrip(t *testing.T) {
+	var c1, c2, b1, b2 [32]byte
+	c1[0] = 1
+	c2[0] = 2
+	b1[0] = 3
+	b2[0] = 4
+	conf := &object.Conflict{
+		Ours:   object.ConflictSide{CommitID: c1, BlobID: b1},
+		Theirs: object.ConflictSide{CommitID: c2, BlobID: b2},
+	}
+	var buf bytes.Buffer
+	object.EncodeConflict(&buf, conf)
+	got, err := object.DecodeConflict(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeConflict: %v", err)
+	}
+	if got.Base != nil {
+		t.Error("expected nil Base")
+	}
+	if got.Ours.CommitID != conf.Ours.CommitID {
+		t.Error("Ours.CommitID mismatch")
+	}
+	if got.Theirs.BlobID != conf.Theirs.BlobID {
+		t.Error("Theirs.BlobID mismatch")
+	}
+}
+
+func TestConflictWithBase(t *testing.T) {
+	var cb, bb [32]byte
+	cb[0] = 3
+	bb[0] = 6
+	conf := &object.Conflict{
+		Base:   &object.ConflictSide{CommitID: cb, BlobID: bb},
+		Ours:   object.ConflictSide{},
+		Theirs: object.ConflictSide{},
+	}
+	var buf bytes.Buffer
+	object.EncodeConflict(&buf, conf)
+	got, err := object.DecodeConflict(buf.Bytes())
+	if err != nil {
+		t.Fatalf("DecodeConflict: %v", err)
+	}
+	if got.Base == nil {
+		t.Fatal("expected non-nil Base")
+	}
+	if got.Base.CommitID != cb {
+		t.Error("Base.CommitID mismatch")
+	}
+	if got.Base.BlobID != bb {
+		t.Error("Base.BlobID mismatch")
+	}
+}
+
+func TestNewChangeIDLength(t *testing.T) {
+	for _, n := range []int{8, 10, 12} {
+		id := object.NewChangeID(n)
+		if len(id) != n {
+			t.Errorf("NewChangeID(%d): got length %d", n, len(id))
+		}
+	}
+}
+
+func TestNewChangeIDAlphabet(t *testing.T) {
+	const alphabet = "abcdefghjkmnpqrstvwxyz"
+	id := object.NewChangeID(8)
+	for _, c := range id {
+		found := false
+		for _, a := range alphabet {
+			if c == a {
+				found = true
+				break
+			}
+		}
+		if !found {
+			t.Errorf("character %q not in unambiguous alphabet", c)
+		}
+	}
+}
+
+func TestFormatStripChangeID(t *testing.T) {
+	raw := "kptxoyvr"
+	formatted := object.FormatChangeID(raw)
+	if formatted != "ch:kptxoyvr" {
+		t.Errorf("FormatChangeID: got %q, want %q", formatted, "ch:kptxoyvr")
+	}
+	stripped := object.StripChangeIDPrefix(formatted)
+	if stripped != raw {
+		t.Errorf("StripChangeIDPrefix: got %q, want %q", stripped, raw)
+	}
+	stripped2 := object.StripChangeIDPrefix(raw)
+	if stripped2 != raw {
+		t.Errorf("StripChangeIDPrefix (no prefix): got %q, want %q", stripped2, raw)
+	}
+}
+
+func TestPhaseString(t *testing.T) {
+	cases := []struct {
+		phase object.Phase
+		want  string
+	}{
+		{object.PhaseDraft, "draft"},
+		{object.PhasePublic, "public"},
+		{object.PhaseSecret, "secret"},
+		{object.Phase(99), "unknown"},
+	}
+	for _, tc := range cases {
+		if got := tc.phase.String(); got != tc.want {
+			t.Errorf("Phase(%d).String(): got %q, want %q", tc.phase, got, tc.want)
+		}
+	}
+}

internal/object/issue_event.go [A]
--- /dev/null
+++ b/internal/object/issue_event.go
@@ -1,0 +1,139 @@
+package object
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+
+	"github.com/zeebo/blake3"
+)
+
+type IssueEventObject struct {
+	IssueID string
+	Kind    string
+	Payload []byte
+	Author  string
+	HLCMS   int64
+	HLCSeq  int
+	Parents [][32]byte
+}
+
+func EncodeIssueEvent(w *bytes.Buffer, ev *IssueEventObject) {
+	w.WriteString("arche-issue\x00")
+	writeUint16(w, uint16(len(ev.IssueID)))
+	w.WriteString(ev.IssueID)
+	w.WriteByte(byte(len(ev.Kind)))
+	w.WriteString(ev.Kind)
+	writeUint32(w, uint32(len(ev.Payload)))
+	w.Write(ev.Payload)
+	w.WriteByte(byte(len(ev.Author)))
+	w.WriteString(ev.Author)
+	writeInt64(w, ev.HLCMS)
+	var seqBuf [4]byte
+	binary.BigEndian.PutUint32(seqBuf[:], uint32(ev.HLCSeq))
+	w.Write(seqBuf[:])
+	w.WriteByte(byte(len(ev.Parents)))
+	for _, p := range ev.Parents {
+		w.Write(p[:])
+	}
+}
+
+func HashIssueEvent(ev *IssueEventObject) [32]byte {
+	var buf bytes.Buffer
+	EncodeIssueEvent(&buf, ev)
+	return blake3.Sum256(buf.Bytes())
+}
+
+func DecodeIssueEvent(data []byte) (*IssueEventObject, error) {
+	const prefix = "arche-issue\x00"
+	if !bytes.HasPrefix(data, []byte(prefix)) {
+		return nil, fmt.Errorf("invalid issue-event prefix")
+	}
+	d := data[len(prefix):]
+
+	readStr16 := func() (string, error) {
+		if len(d) < 2 {
+			return "", fmt.Errorf("issue-event: truncated string length")
+		}
+		l := int(binary.BigEndian.Uint16(d[:2]))
+		d = d[2:]
+		if len(d) < l {
+			return "", fmt.Errorf("issue-event: string truncated")
+		}
+		s := string(d[:l])
+		d = d[l:]
+		return s, nil
+	}
+
+	readStr8 := func() (string, error) {
+		if len(d) < 1 {
+			return "", fmt.Errorf("issue-event: truncated byte-length string")
+		}
+		l := int(d[0])
+		d = d[1:]
+		if len(d) < l {
+			return "", fmt.Errorf("issue-event: byte-length string truncated")
+		}
+		s := string(d[:l])
+		d = d[l:]
+		return s, nil
+	}
+
+	issueID, err := readStr16()
+	if err != nil {
+		return nil, fmt.Errorf("issue-event decode IssueID: %w", err)
+	}
+
+	kind, err := readStr8()
+	if err != nil {
+		return nil, fmt.Errorf("issue-event decode Kind: %w", err)
+	}
+
+	if len(d) < 4 {
+		return nil, fmt.Errorf("issue-event: payload length truncated")
+	}
+	payLen := int(binary.BigEndian.Uint32(d[:4]))
+	d = d[4:]
+	if len(d) < payLen {
+		return nil, fmt.Errorf("issue-event: payload truncated")
+	}
+	payload := make([]byte, payLen)
+	copy(payload, d[:payLen])
+	d = d[payLen:]
+
+	author, err := readStr8()
+	if err != nil {
+		return nil, fmt.Errorf("issue-event decode Author: %w", err)
+	}
+
+	if len(d) < 12 {
+		return nil, fmt.Errorf("issue-event: HLC fields truncated")
+	}
+	hlcMS := int64(binary.BigEndian.Uint64(d[:8]))
+	hlcSeq := int(binary.BigEndian.Uint32(d[8:12]))
+	d = d[12:]
+
+	if len(d) < 1 {
+		return nil, fmt.Errorf("issue-event: numParents truncated")
+	}
+	numParents := int(d[0])
+	d = d[1:]
+	if len(d) < numParents*32 {
+		return nil, fmt.Errorf("issue-event: parents truncated")
+	}
+	parents := make([][32]byte, numParents)
+	for i := range parents {
+		copy(parents[i][:], d[:32])
+		d = d[32:]
+	}
+
+	return &IssueEventObject{
+		IssueID: issueID,
+		Kind:    kind,
+		Payload: payload,
+		Author:  author,
+		HLCMS:   hlcMS,
+		HLCSeq:  hlcSeq,
+		Parents: parents,
+	}, nil
+}

internal/object/object.go [A]
--- /dev/null
+++ b/internal/object/object.go
@@ -1,0 +1,106 @@
+package object
+
+import "time"
+
+type Kind string
+
+const (
+	KindBlob       Kind = "blob"
+	KindTree       Kind = "tree"
+	KindCommit     Kind = "commit"
+	KindConflict   Kind = "conflict"
+	KindObsolete   Kind = "obsolete"
+	KindIssueEvent Kind = "issue-event"
+)
+
+type EntryMode uint8
+
+const (
+	ModeFile    EntryMode = 0
+	ModeExec    EntryMode = 1
+	ModeSymlink EntryMode = 2
+	ModeDir     EntryMode = 3
+)
+
+type Phase uint8
+
+const (
+	PhaseDraft  Phase = 0
+	PhasePublic Phase = 1
+	PhaseSecret Phase = 2
+)
+
+func (p Phase) String() string {
+	switch p {
+	case PhaseDraft:
+		return "draft"
+	case PhasePublic:
+		return "public"
+	case PhaseSecret:
+		return "secret"
+	default:
+		return "unknown"
+	}
+}
+
+var ZeroID [32]byte
+
+type Blob struct {
+	Content []byte
+}
+
+type TreeEntry struct {
+	Name     string
+	Mode     EntryMode
+	ObjectID [32]byte
+	Props    map[string]string
+}
+
+type Tree struct {
+	Entries []TreeEntry
+}
+
+type Signature struct {
+	Name      string
+	Email     string
+	Timestamp time.Time
+}
+
+type Commit struct {
+	TreeID    [32]byte
+	Parents   [][32]byte
+	ChangeID  string
+	Author    Signature
+	Committer Signature
+	Message   string
+	Phase     Phase
+	CommitSig []byte
+}
+
+type ConflictSide struct {
+	CommitID [32]byte
+	BlobID   [32]byte
+}
+
+type Conflict struct {
+	Base   *ConflictSide
+	Ours   ConflictSide
+	Theirs ConflictSide
+}
+
+type ObsoleteMarker struct {
+	Predecessor [32]byte
+	Successors  [][32]byte
+	Reason      string
+	Timestamp   int64
+}
+
+func Short(id [32]byte) string {
+	const hex = "0123456789abcdef"
+	out := make([]byte, 12)
+	for i := 0; i < 6; i++ {
+		out[2*i] = hex[id[i]>>4]
+		out[2*i+1] = hex[id[i]&0xf]
+	}
+	return string(out)
+}

internal/object/signing.go [A]
--- /dev/null
+++ b/internal/object/signing.go
@@ -1,0 +1,56 @@
+package object
+
+import (
+	"crypto/rand"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"golang.org/x/crypto/ssh"
+)
+
+func SignCommitBody(body []byte, keyFile string) (sig []byte, fingerprint string, err error) {
+	if keyFile == "" {
+		keyFile, err = FindDefaultSSHKey()
+		if err != nil {
+			return nil, "", err
+		}
+	}
+	pemData, err := os.ReadFile(keyFile)
+	if err != nil {
+		return nil, "", fmt.Errorf("read ssh key %s: %w", keyFile, err)
+	}
+	signer, err := ssh.ParsePrivateKey(pemData)
+	if err != nil {
+		return nil, "", fmt.Errorf("parse ssh key %s: %w", keyFile, err)
+	}
+	ssig, err := signer.Sign(rand.Reader, body)
+	if err != nil {
+		return nil, "", fmt.Errorf("ssh sign: %w", err)
+	}
+	blob := ssh.Marshal(ssig)
+	fp := ssh.FingerprintSHA256(signer.PublicKey())
+	return blob, fp, nil
+}
+
+func VerifyCommitSig(body, sigBlob []byte, pubKey ssh.PublicKey) error {
+	var sig ssh.Signature
+	if err := ssh.Unmarshal(sigBlob, &sig); err != nil {
+		return fmt.Errorf("unmarshal signature: %w", err)
+	}
+	return pubKey.Verify(body, &sig)
+}
+
+func FindDefaultSSHKey() (string, error) {
+	home, err := os.UserHomeDir()
+	if err != nil {
+		return "", fmt.Errorf("find home dir: %w", err)
+	}
+	for _, name := range []string{"id_ed25519", "id_ecdsa", "id_rsa"} {
+		p := filepath.Join(home, ".ssh", name)
+		if _, err := os.Stat(p); err == nil {
+			return p, nil
+		}
+	}
+	return "", fmt.Errorf("no SSH private key found in ~/.ssh; set sign.key in .arche/config.toml or use arche snap --key")
+}

internal/repo/config.go [A]
--- /dev/null
+++ b/internal/repo/config.go
@@ -1,0 +1,117 @@
+package repo
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/BurntSushi/toml"
+)
+
+type Config struct {
+	Storage StorageConfig  `toml:"storage"`
+	User    UserConfig     `toml:"user"`
+	UI      UIConfig       `toml:"ui"`
+	Serve   ServeConfig    `toml:"serve"`
+	Hooks   HooksConfig    `toml:"hooks"`
+	Git     GitConfig      `toml:"git"`
+	Sign    SignConfig     `toml:"sign"`
+	Remotes []RemoteConfig `toml:"remote"`
+}
+
+type StorageConfig struct {
+	PackThreshold int    `toml:"pack_threshold"`
+	PackSealSize  int    `toml:"pack_seal_size"`
+	Compression   string `toml:"compression"`
+}
+
+type UserConfig struct {
+	Name  string `toml:"name"`
+	Email string `toml:"email"`
+}
+
+type UIConfig struct {
+	Port int `toml:"port"`
+}
+
+type ServeConfig struct {
+	Port  int    `toml:"port"`
+	Token string `toml:"token"`
+}
+
+type RemoteConfig struct {
+	Name  string `toml:"name"`
+	URL   string `toml:"url"`
+	Token string `toml:"token"`
+}
+
+type HooksConfig struct {
+	PreSnap  []string `toml:"pre-snap"`
+	PostSnap []string `toml:"post-snap"`
+}
+
+type GitConfig struct {
+	Enabled bool   `toml:"enabled"`
+	Remote  string `toml:"remote"`
+}
+
+type SignConfig struct {
+	Auto    bool   `toml:"auto"`
+	KeyFile string `toml:"key"`
+}
+
+func DefaultConfig() *Config {
+	return &Config{
+		Storage: StorageConfig{
+			PackThreshold: 128 * 1024,
+			Compression:   "zstd",
+		},
+		User: UserConfig{
+			Name:  gitConfigValue("user.name", "Unknown User"),
+			Email: gitConfigValue("user.email", "unknown@example.com"),
+		},
+		UI:    UIConfig{Port: 7070},
+		Serve: ServeConfig{Port: 8765},
+	}
+}
+
+func loadConfig(path string) (*Config, error) {
+	cfg := DefaultConfig()
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		return cfg, nil
+	}
+	_, err := toml.DecodeFile(path, cfg)
+	return cfg, err
+}
+
+func writeConfig(path string, cfg *Config) error {
+	f, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	fmt.Fprintln(f, "# Arche repository configuration")
+	return toml.NewEncoder(f).Encode(cfg)
+}
+
+func gitConfigValue(key, fallback string) string {
+	switch key {
+	case "user.name":
+		if v := os.Getenv("GIT_AUTHOR_NAME"); v != "" {
+			return v
+		}
+		if v := os.Getenv("USER"); v != "" {
+			return v
+		}
+	case "user.email":
+		if v := os.Getenv("GIT_AUTHOR_EMAIL"); v != "" {
+			return v
+		}
+		if host, _ := os.Hostname(); host != "" {
+			user := os.Getenv("USER")
+			if user != "" {
+				return user + "@" + host
+			}
+		}
+	}
+	return fallback
+}

internal/repo/repo.go [A]
--- /dev/null
+++ b/internal/repo/repo.go
@@ -1,0 +1,382 @@
+package repo
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/store"
+)
+
+const (
+	archeDirName = ".arche"
+	storeFile    = "store.db"
+	packsDir     = "packs"
+	headFile     = "HEAD"
+	configFile   = "config.toml"
+	worktreeFile = ".arche-wt"
+)
+
+var ErrNotARepo = errors.New("not an Arche repository (no .arche directory found)")
+
+type Repo struct {
+	Root  string
+	Store store.Store
+	Cfg   *Config
+
+	archeDir     string
+	worktreeName string
+}
+
+func (r *Repo) ArcheDir() string { return r.archeDir }
+
+func (r *Repo) headPath() string {
+	if r.worktreeName != "" {
+		return filepath.Join(r.archeDir, "worktrees", r.worktreeName, headFile)
+	}
+	return filepath.Join(r.archeDir, headFile)
+}
+
+type repoLocation struct {
+	mainRoot     string
+	root         string
+	worktreeName string
+}
+
+func findLocation(start string) (*repoLocation, error) {
+	dir, err := filepath.Abs(start)
+	if err != nil {
+		return nil, err
+	}
+	for {
+		if fi, err := os.Stat(filepath.Join(dir, archeDirName)); err == nil && fi.IsDir() {
+			return &repoLocation{mainRoot: dir, root: dir}, nil
+		}
+		if data, err := os.ReadFile(filepath.Join(dir, worktreeFile)); err == nil {
+			parts := strings.SplitN(strings.TrimSpace(string(data)), "\n", 2)
+			if len(parts) == 2 && parts[0] != "" && parts[1] != "" {
+				return &repoLocation{
+					mainRoot:     parts[0],
+					root:         dir,
+					worktreeName: parts[1],
+				}, nil
+			}
+		}
+		parent := filepath.Dir(dir)
+		if parent == dir {
+			return nil, ErrNotARepo
+		}
+		dir = parent
+	}
+}
+
+func FindRoot(start string) (string, error) {
+	loc, err := findLocation(start)
+	if err != nil {
+		return "", err
+	}
+	return loc.mainRoot, nil
+}
+
+func Open(dir string) (*Repo, error) {
+	loc, err := findLocation(dir)
+	if err != nil {
+		return nil, err
+	}
+	r, err := openAt(loc.mainRoot)
+	if err != nil {
+		return nil, err
+	}
+	if loc.worktreeName != "" {
+		r.Root = loc.root
+		r.worktreeName = loc.worktreeName
+	}
+	return r, nil
+}
+
+func openAt(root string) (*Repo, error) {
+	archeDir := filepath.Join(root, archeDirName)
+	dbPath := filepath.Join(archeDir, storeFile)
+	packDir := filepath.Join(archeDir, packsDir)
+
+	cfg, err := loadConfig(filepath.Join(archeDir, configFile))
+	if err != nil {
+		return nil, fmt.Errorf("load config: %w", err)
+	}
+
+	s, err := store.OpenSQLiteStore(dbPath, packDir, cfg.Storage.PackThreshold, cfg.Storage.PackSealSize, cfg.Storage.Compression)
+	if err != nil {
+		return nil, fmt.Errorf("open store: %w", err)
+	}
+
+	return &Repo{Root: root, archeDir: archeDir, Store: s, Cfg: cfg}, nil
+}
+
+func Init(path string) (*Repo, error) {
+	absPath, err := filepath.Abs(path)
+	if err != nil {
+		return nil, err
+	}
+
+	archeDir := filepath.Join(absPath, archeDirName)
+	if _, err := os.Stat(archeDir); err == nil {
+		return nil, fmt.Errorf("already an Arche repository at %s", absPath)
+	}
+
+	if err := os.MkdirAll(absPath, 0o755); err != nil {
+		return nil, fmt.Errorf("create repo dir: %w", err)
+	}
+	if err := os.MkdirAll(archeDir, 0o755); err != nil {
+		return nil, fmt.Errorf("create .arche dir: %w", err)
+	}
+
+	cfg := DefaultConfig()
+	if err := writeConfig(filepath.Join(archeDir, configFile), cfg); err != nil {
+		return nil, fmt.Errorf("write config: %w", err)
+	}
+
+	r, err := openAt(absPath)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := r.bootstrap(); err != nil {
+		r.Close()
+		return nil, fmt.Errorf("bootstrap: %w", err)
+	}
+
+	return r, nil
+}
+
+func (r *Repo) bootstrap() error {
+	now := time.Now()
+	sig := r.authorSig(now)
+
+	emptyTree := &object.Tree{Entries: nil}
+	treeID := object.HashTree(emptyTree)
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+
+	changeID, err := r.Store.AllocChangeID(tx)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	c := &object.Commit{
+		TreeID:    treeID,
+		Parents:   nil,
+		ChangeID:  changeID,
+		Author:    sig,
+		Committer: sig,
+		Message:   "",
+		Phase:     object.PhaseDraft,
+	}
+	commitID := object.HashCommit(c)
+
+	if _, err := WriteTreeTx(r.Store, tx, emptyTree); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+	if _, err := WriteCommitTx(r.Store, tx, c); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+	if err := r.Store.SetChangeCommit(tx, changeID, commitID); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	op := store.Operation{
+		Kind:      "init",
+		Timestamp: now.Unix(),
+		Before:    "{}",
+		After:     refSnapshot(changeID, commitID),
+	}
+	if _, err := r.Store.InsertOperation(tx, op); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	if err := r.Store.Commit(tx); err != nil {
+		return err
+	}
+
+	return r.WriteHead(object.FormatChangeID(changeID))
+}
+
+func (r *Repo) Head() (string, error) {
+	data, err := os.ReadFile(r.headPath())
+	if err != nil {
+		return "", fmt.Errorf("read HEAD: %w", err)
+	}
+	return strings.TrimSpace(string(data)), nil
+}
+
+func (r *Repo) WriteHead(changeID string) error {
+	p := r.headPath()
+	tmp := p + ".tmp"
+	if err := os.WriteFile(tmp, []byte(changeID+"\n"), 0o644); err != nil {
+		return err
+	}
+	return os.Rename(tmp, p)
+}
+
+func (r *Repo) HeadChangeID() (string, error) {
+	raw, err := r.Head()
+	if err != nil {
+		return "", err
+	}
+	return object.StripChangeIDPrefix(raw), nil
+}
+
+func (r *Repo) HeadCommit() (*object.Commit, [32]byte, error) {
+	cid, err := r.HeadChangeID()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+	commitID, err := r.Store.GetChangeCommit(cid)
+	if err != nil {
+		return nil, object.ZeroID, fmt.Errorf("resolve HEAD change %q: %w", cid, err)
+	}
+	c, err := r.ReadCommit(commitID)
+	if err != nil {
+		return nil, commitID, err
+	}
+	return c, commitID, nil
+}
+
+func (r *Repo) ReadCommit(id [32]byte) (*object.Commit, error) {
+	_, raw, err := r.Store.ReadObject(id)
+	if err != nil {
+		return nil, err
+	}
+	return object.DecodeCommit(raw)
+}
+
+func (r *Repo) ReadTree(id [32]byte) (*object.Tree, error) {
+	_, raw, err := r.Store.ReadObject(id)
+	if err != nil {
+		return nil, err
+	}
+	return object.DecodeTree(raw)
+}
+
+func (r *Repo) ReadBlob(id [32]byte) ([]byte, error) {
+	_, raw, err := r.Store.ReadObject(id)
+	if err != nil {
+		return nil, err
+	}
+	b, err := object.DecodeBlob(raw)
+	if err != nil {
+		return nil, err
+	}
+	return b.Content, nil
+}
+
+func (r *Repo) Close() error {
+	return r.Store.Close()
+}
+
+func (r *Repo) SaveConfig() error {
+	return writeConfig(filepath.Join(r.ArcheDir(), configFile), r.Cfg)
+}
+
+func (r *Repo) authorSig(t time.Time) object.Signature {
+	return object.Signature{
+		Name:      r.Cfg.User.Name,
+		Email:     r.Cfg.User.Email,
+		Timestamp: t,
+	}
+}
+
+func WriteBlobTx(s store.Store, tx *store.Tx, b *object.Blob) ([32]byte, error) {
+	id := object.HashBlob(b)
+	var buf bytes.Buffer
+	object.EncodeBlob(&buf, b)
+	return id, s.WriteObject(tx, id, string(object.KindBlob), buf.Bytes())
+}
+
+func WriteConflictTx(s store.Store, tx *store.Tx, c *object.Conflict) ([32]byte, error) {
+	id := object.HashConflict(c)
+	var buf bytes.Buffer
+	object.EncodeConflict(&buf, c)
+	return id, s.WriteObject(tx, id, string(object.KindConflict), buf.Bytes())
+}
+
+func (r *Repo) ReadConflict(id [32]byte) (*object.Conflict, error) {
+	_, raw, err := r.Store.ReadObject(id)
+	if err != nil {
+		return nil, err
+	}
+	return object.DecodeConflict(raw)
+}
+
+func WriteTreeTx(s store.Store, tx *store.Tx, t *object.Tree) ([32]byte, error) {
+	id := object.HashTree(t)
+	var buf bytes.Buffer
+	object.EncodeTree(&buf, t)
+	return id, s.WriteObject(tx, id, string(object.KindTree), buf.Bytes())
+}
+
+func WriteCommitTx(s store.Store, tx *store.Tx, c *object.Commit) ([32]byte, error) {
+	id := object.HashCommit(c)
+	var buf bytes.Buffer
+	object.EncodeCommit(&buf, c)
+	return id, s.WriteObject(tx, id, string(object.KindCommit), buf.Bytes())
+}
+
+func WriteObsoleteTx(s store.Store, tx *store.Tx, o *object.ObsoleteMarker) ([32]byte, error) {
+	id := object.HashObsolete(o)
+	var buf bytes.Buffer
+	object.EncodeObsolete(&buf, o)
+	return id, s.WriteObject(tx, id, string(object.KindObsolete), buf.Bytes())
+}
+
+func refSnapshot(changeID string, commitID [32]byte) string {
+	m := map[string]string{
+		"head": object.FormatChangeID(changeID),
+		"tip":  fmt.Sprintf("%x", commitID),
+	}
+	b, _ := json.Marshal(m)
+	return string(b)
+}
+
+type RefState struct {
+	Head      string            `json:"head"`
+	Tip       string            `json:"tip"`
+	Bookmarks map[string]string `json:"bookmarks,omitempty"`
+}
+
+func (r *Repo) CaptureRefState() (string, error) {
+	changeID, err := r.Head()
+	if err != nil {
+		return "{}", nil
+	}
+	bare := object.StripChangeIDPrefix(changeID)
+	commitID, err := r.Store.GetChangeCommit(bare)
+	if err != nil {
+		return "{}", nil
+	}
+	bms, _ := r.Store.ListBookmarks()
+	bmMap := make(map[string]string, len(bms))
+	for _, bm := range bms {
+		bmMap[bm.Name] = fmt.Sprintf("%x", bm.CommitID)
+	}
+	s := RefState{
+		Head:      changeID,
+		Tip:       fmt.Sprintf("%x", commitID),
+		Bookmarks: bmMap,
+	}
+	b, _ := json.Marshal(s)
+	return string(b), nil
+}

internal/repo/repo_test.go [A]
--- /dev/null
+++ b/internal/repo/repo_test.go
@@ -1,0 +1,344 @@
+package repo_test
+
+import (
+	"os"
+	"path/filepath"
+	"testing"
+
+	"arche/internal/diff"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/wc"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func initTestRepo(t *testing.T) *repo.Repo {
+	t.Helper()
+	dir := t.TempDir()
+	r, err := repo.Init(dir)
+	if err != nil {
+		t.Fatalf("repo.Init: %v", err)
+	}
+	t.Cleanup(func() { r.Close() })
+	return r
+}
+
+func writeFile(t *testing.T, r *repo.Repo, name, content string) string {
+	t.Helper()
+	abs := filepath.Join(r.Root, name)
+	if err := os.MkdirAll(filepath.Dir(abs), 0o755); err != nil {
+		t.Fatalf("MkdirAll: %v", err)
+	}
+	if err := os.WriteFile(abs, []byte(content), 0o644); err != nil {
+		t.Fatalf("WriteFile %s: %v", name, err)
+	}
+	return name
+}
+
+func TestInit_HeadSet(t *testing.T) {
+	r := initTestRepo(t)
+	head, err := r.Head()
+	if err != nil {
+		t.Fatalf("Head: %v", err)
+	}
+	if len(head) == 0 {
+		t.Error("HEAD is empty after init")
+	}
+	if head[:3] != "ch:" {
+		t.Errorf("HEAD should start with ch:, got %q", head)
+	}
+}
+
+func TestInit_InitialCommitExists(t *testing.T) {
+	r := initTestRepo(t)
+	c, id, err := r.HeadCommit()
+	if err != nil {
+		t.Fatalf("HeadCommit: %v", err)
+	}
+	if id == object.ZeroID {
+		t.Error("initial commit has zero ID")
+	}
+	if c.Phase != object.PhaseDraft {
+		t.Errorf("initial commit phase: got %v, want draft", c.Phase)
+	}
+}
+
+func TestInit_NotARepo(t *testing.T) {
+	dir := t.TempDir()
+	_, err := repo.Open(filepath.Join(dir, "nonexistent"))
+	if err == nil {
+		t.Error("expected error opening non-repo dir, got nil")
+	}
+}
+
+func TestSnap_EmptyRepo(t *testing.T) {
+	r := initTestRepo(t)
+	w := wc.New(r)
+	_, id, err := w.Snap("first snap")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	if id == object.ZeroID {
+		t.Error("snapped commit has zero ID")
+	}
+	head, err := r.Head()
+	if err != nil {
+		t.Fatalf("Head: %v", err)
+	}
+	if head[:3] != "ch:" {
+		t.Errorf("HEAD after snap: %q", head)
+	}
+}
+
+func TestSnap_WithFiles(t *testing.T) {
+	r := initTestRepo(t)
+	writeFile(t, r, "hello.txt", "Hello, Arche!\n")
+	writeFile(t, r, "src/main.go", "package main\n")
+	w := wc.New(r)
+	snapped, snapID, err := w.Snap("add files")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	if snapped.Message != "add files" {
+		t.Errorf("Message: got %q, want %q", snapped.Message, "add files")
+	}
+	tree, err := r.ReadTree(snapped.TreeID)
+	if err != nil {
+		t.Fatalf("ReadTree: %v", err)
+	}
+	if len(tree.Entries) == 0 {
+		t.Error("snapped tree is empty")
+	}
+	_, snapID2, err := w.Snap("no change snap")
+	if err != nil {
+		t.Fatalf("second Snap: %v", err)
+	}
+	if snapID2 == snapID {
+		t.Error("two snaps should produce distinct draft commit IDs")
+	}
+}
+
+func TestSnap_FilesStoredInTree(t *testing.T) {
+	r := initTestRepo(t)
+	writeFile(t, r, "README.md", "# Project\n")
+	writeFile(t, r, "lib/util.go", "package lib\n")
+	w := wc.New(r)
+	snapped, _, err := w.Snap("files snap")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	tree, err := r.ReadTree(snapped.TreeID)
+	if err != nil {
+		t.Fatalf("ReadTree: %v", err)
+	}
+	names := map[string]bool{}
+	for _, e := range tree.Entries {
+		names[e.Name] = true
+	}
+	if !names["README.md"] && !names["lib"] {
+		t.Errorf("tree entries: %v", names)
+	}
+}
+
+func TestStatus_Added(t *testing.T) {
+	r := initTestRepo(t)
+	w := wc.New(r)
+	if _, _, err := w.Snap("base"); err != nil {
+		t.Fatalf("initial snap: %v", err)
+	}
+	writeFile(t, r, "new.txt", "new file")
+	changes, err := w.Status()
+	if err != nil {
+		t.Fatalf("Status: %v", err)
+	}
+	var found bool
+	for _, c := range changes {
+		if c.Path == "new.txt" && c.Status == 'A' {
+			found = true
+		}
+	}
+	if !found {
+		t.Errorf("expected new.txt as Added, got: %v", changes)
+	}
+}
+
+func TestStatus_Clean(t *testing.T) {
+	r := initTestRepo(t)
+	writeFile(t, r, "file.txt", "content")
+	w := wc.New(r)
+	if _, _, err := w.Snap("snap"); err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	_, err := w.Status()
+	if err != nil {
+		t.Fatalf("Status: %v", err)
+	}
+}
+
+func TestBookmarks(t *testing.T) {
+	r := initTestRepo(t)
+	_, commitID, err := r.HeadCommit()
+	if err != nil {
+		t.Fatalf("HeadCommit: %v", err)
+	}
+	tx, err := r.Store.Begin()
+	if err != nil {
+		t.Fatalf("Begin: %v", err)
+	}
+	err = r.Store.SetBookmark(tx, store.Bookmark{Name: "main", CommitID: commitID})
+	if err != nil {
+		r.Store.Rollback(tx)
+		t.Fatalf("SetBookmark: %v", err)
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		t.Fatalf("Commit: %v", err)
+	}
+	bm, err := r.Store.GetBookmark("main")
+	if err != nil {
+		t.Fatalf("GetBookmark: %v", err)
+	}
+	if bm == nil {
+		t.Fatal("bookmark not found")
+	}
+	if bm.CommitID != commitID {
+		t.Errorf("CommitID: got %x, want %x", bm.CommitID[:6], commitID[:6])
+	}
+	bms, err := r.Store.ListBookmarks()
+	if err != nil {
+		t.Fatalf("ListBookmarks: %v", err)
+	}
+	if len(bms) != 1 || bms[0].Name != "main" {
+		t.Errorf("unexpected bookmarks: %v", bms)
+	}
+	tx2, _ := r.Store.Begin()
+	r.Store.DeleteBookmark(tx2, "main")
+	r.Store.Commit(tx2)
+	bm2, _ := r.Store.GetBookmark("main")
+	if bm2 != nil {
+		t.Error("bookmark should be deleted")
+	}
+}
+
+func TestPhaseDefaultDraft(t *testing.T) {
+	r := initTestRepo(t)
+	_, commitID, err := r.HeadCommit()
+	if err != nil {
+		t.Fatalf("HeadCommit: %v", err)
+	}
+	phase, err := r.Store.GetPhase(commitID)
+	if err != nil {
+		t.Fatalf("GetPhase: %v", err)
+	}
+	if phase != object.PhaseDraft {
+		t.Errorf("default phase: got %v, want draft", phase)
+	}
+}
+
+func TestSetPhase(t *testing.T) {
+	r := initTestRepo(t)
+	_, commitID, err := r.HeadCommit()
+	if err != nil {
+		t.Fatalf("HeadCommit: %v", err)
+	}
+	tx, _ := r.Store.Begin()
+	if err := r.Store.SetPhase(tx, commitID, object.PhasePublic); err != nil {
+		r.Store.Rollback(tx)
+		t.Fatalf("SetPhase: %v", err)
+	}
+	r.Store.Commit(tx)
+	phase, err := r.Store.GetPhase(commitID)
+	if err != nil {
+		t.Fatalf("GetPhase: %v", err)
+	}
+	if phase != object.PhasePublic {
+		t.Errorf("phase: got %v, want public", phase)
+	}
+}
+
+func TestOperationLog(t *testing.T) {
+	r := initTestRepo(t)
+	ops, err := r.Store.ListOperations(10)
+	if err != nil {
+		t.Fatalf("ListOperations: %v", err)
+	}
+	if len(ops) == 0 {
+		t.Error("no operations after init")
+	}
+	writeFile(t, r, "a.txt", "a")
+	w := wc.New(r)
+	if _, _, err := w.Snap("snap"); err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	ops2, err := r.Store.ListOperations(10)
+	if err != nil {
+		t.Fatalf("ListOperations: %v", err)
+	}
+	if len(ops2) <= len(ops) {
+		t.Errorf("snap should add operation; before=%d after=%d", len(ops), len(ops2))
+	}
+}
+
+func TestTreeDiff_NoChanges(t *testing.T) {
+	r := initTestRepo(t)
+	writeFile(t, r, "file.txt", "content\n")
+	w := wc.New(r)
+	snapped, _, err := w.Snap("snap")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	diffs, err := diff.TreeDiff(r, snapped.TreeID, snapped.TreeID)
+	if err != nil {
+		t.Fatalf("TreeDiff: %v", err)
+	}
+	if len(diffs) != 0 {
+		t.Errorf("same tree should produce no diffs, got %v", diffs)
+	}
+}
+
+func TestTreeDiff_AddedFile(t *testing.T) {
+	r := initTestRepo(t)
+	w := wc.New(r)
+	snap1, _, err := w.Snap("empty")
+	if err != nil {
+		t.Fatalf("first Snap: %v", err)
+	}
+	writeFile(t, r, "new.go", "package main\n")
+	snap2, _, err := w.Snap("add file")
+	if err != nil {
+		t.Fatalf("second Snap: %v", err)
+	}
+	diffs, err := diff.TreeDiff(r, snap1.TreeID, snap2.TreeID)
+	if err != nil {
+		t.Fatalf("TreeDiff: %v", err)
+	}
+	var found bool
+	for _, d := range diffs {
+		if d.Path == "new.go" && d.Status == 'A' {
+			found = true
+		}
+	}
+	if !found {
+		t.Errorf("expected new.go as Added, got: %v", diffs)
+	}
+}
+
+func TestCommitDiff_RootCommit(t *testing.T) {
+	r := initTestRepo(t)
+	writeFile(t, r, "main.go", "package main\n")
+	w := wc.New(r)
+	_, snapID, err := w.Snap("first commit")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+	diffs, err := diff.CommitDiff(r, snapID)
+	if err != nil {
+		t.Fatalf("CommitDiff: %v", err)
+	}
+	for _, d := range diffs {
+		if d.Status != 'A' {
+			t.Errorf("root commit diff should only have Added files, got status=%c path=%s", d.Status, d.Path)
+		}
+	}
+}

internal/repo/worktree.go [A]
--- /dev/null
+++ b/internal/repo/worktree.go
@@ -1,0 +1,229 @@
+package repo
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"arche/internal/object"
+)
+
+const worktreesDir = "worktrees"
+
+type WorktreeInfo struct {
+	Name string
+	Path string
+}
+
+func (r *Repo) AddWorktree(path, bookmark string) error {
+	absPath, err := filepath.Abs(path)
+	if err != nil {
+		return fmt.Errorf("worktree add: resolve path: %w", err)
+	}
+
+	var commitID [32]byte
+	var changeID string
+	if bookmark != "" {
+		bms, err := r.Store.ListBookmarks()
+		if err != nil {
+			return err
+		}
+		found := false
+		for _, bm := range bms {
+			if bm.Name == bookmark {
+				commitID = bm.CommitID
+				found = true
+				break
+			}
+		}
+		if !found {
+			return fmt.Errorf("worktree add: bookmark %q not found", bookmark)
+		}
+		c, err := r.ReadCommit(commitID)
+		if err != nil {
+			return fmt.Errorf("worktree add: read commit: %w", err)
+		}
+		changeID = object.FormatChangeID(c.ChangeID)
+	} else {
+		c, id, err := r.HeadCommit()
+		if err != nil {
+			return fmt.Errorf("worktree add: read HEAD commit: %w", err)
+		}
+		commitID = id
+		changeID = object.FormatChangeID(c.ChangeID)
+	}
+
+	name := filepath.Base(absPath)
+	if name == "" || name == "." {
+		name = "wt"
+	}
+	wtBaseDir := filepath.Join(r.archeDir, worktreesDir)
+	wtDir := filepath.Join(wtBaseDir, name)
+	if _, err := os.Stat(wtDir); err == nil {
+		for i := 2; ; i++ {
+			cand := fmt.Sprintf("%s%d", name, i)
+			if _, err := os.Stat(filepath.Join(wtBaseDir, cand)); os.IsNotExist(err) {
+				name = cand
+				wtDir = filepath.Join(wtBaseDir, cand)
+				break
+			}
+		}
+	}
+
+	if err := os.MkdirAll(wtDir, 0o755); err != nil {
+		return fmt.Errorf("worktree add: create meta dir: %w", err)
+	}
+
+	headFilePath := filepath.Join(wtDir, headFile)
+	if err := os.WriteFile(headFilePath, []byte(changeID+"\n"), 0o644); err != nil {
+		os.RemoveAll(wtDir) //nolint:errcheck
+		return fmt.Errorf("worktree add: write HEAD: %w", err)
+	}
+
+	if err := os.WriteFile(filepath.Join(wtDir, "path"), []byte(absPath+"\n"), 0o644); err != nil {
+		os.RemoveAll(wtDir) //nolint:errcheck
+		return fmt.Errorf("worktree add: write path: %w", err)
+	}
+
+	if err := os.MkdirAll(absPath, 0o755); err != nil {
+		os.RemoveAll(wtDir) //nolint:errcheck
+		return fmt.Errorf("worktree add: create directory: %w", err)
+	}
+
+	mainRoot := filepath.Dir(r.archeDir)
+	sentinel := mainRoot + "\n" + name + "\n"
+	sentinelPath := filepath.Join(absPath, worktreeFile)
+	if err := os.WriteFile(sentinelPath, []byte(sentinel), 0o644); err != nil {
+		os.RemoveAll(wtDir) //nolint:errcheck
+		return fmt.Errorf("worktree add: write sentinel: %w", err)
+	}
+
+	if err := r.materializeInto(absPath, commitID); err != nil {
+		os.RemoveAll(wtDir)     //nolint:errcheck
+		os.Remove(sentinelPath) //nolint:errcheck
+		return fmt.Errorf("worktree add: materialize: %w", err)
+	}
+
+	return nil
+}
+
+func (r *Repo) materializeInto(targetDir string, commitID [32]byte) error {
+	c, err := r.ReadCommit(commitID)
+	if err != nil {
+		return err
+	}
+	return r.writeTree(targetDir, c.TreeID)
+}
+
+func (r *Repo) writeTree(dir string, treeID [32]byte) error {
+	if treeID == object.ZeroID {
+		return nil
+	}
+	_, raw, err := r.Store.ReadObject(treeID)
+	if err != nil {
+		return err
+	}
+	tree, err := object.DecodeTree(raw)
+	if err != nil {
+		return err
+	}
+	for _, entry := range tree.Entries {
+		targetPath := filepath.Join(dir, entry.Name)
+		switch entry.Mode {
+		case object.ModeDir:
+			if err := os.MkdirAll(targetPath, 0o755); err != nil {
+				return err
+			}
+			if err := r.writeTree(targetPath, entry.ObjectID); err != nil {
+				return err
+			}
+		case object.ModeExec:
+			if err := r.writeBlob(targetPath, entry.ObjectID, 0o755); err != nil {
+				return err
+			}
+		case object.ModeSymlink:
+			content, err := r.readBlobContent(entry.ObjectID)
+			if err != nil {
+				return err
+			}
+			os.Remove(targetPath) //nolint:errcheck
+			if err := os.Symlink(string(content), targetPath); err != nil {
+				return err
+			}
+		default:
+			if err := r.writeBlob(targetPath, entry.ObjectID, 0o644); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *Repo) writeBlob(path string, blobID [32]byte, perm os.FileMode) error {
+	content, err := r.readBlobContent(blobID)
+	if err != nil {
+		return err
+	}
+	tmp := path + ".arche-tmp"
+	if err := os.WriteFile(tmp, content, perm); err != nil {
+		return err
+	}
+	return os.Rename(tmp, path)
+}
+
+func (r *Repo) readBlobContent(blobID [32]byte) ([]byte, error) {
+	kind, raw, err := r.Store.ReadObject(blobID)
+	if err != nil {
+		return nil, err
+	}
+	if kind != string(object.KindBlob) {
+		return nil, fmt.Errorf("expected blob, got %s", kind)
+	}
+	b, err := object.DecodeBlob(raw)
+	if err != nil {
+		return nil, err
+	}
+	return b.Content, nil
+}
+
+func (r *Repo) ListWorktrees() ([]WorktreeInfo, error) {
+	wtBaseDir := filepath.Join(r.archeDir, worktreesDir)
+	entries, err := os.ReadDir(wtBaseDir)
+	if os.IsNotExist(err) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	var out []WorktreeInfo
+	for _, e := range entries {
+		if !e.IsDir() {
+			continue
+		}
+		pathBytes, err := os.ReadFile(filepath.Join(wtBaseDir, e.Name(), "path"))
+		if err != nil {
+			continue
+		}
+		out = append(out, WorktreeInfo{
+			Name: e.Name(),
+			Path: strings.TrimSpace(string(pathBytes)),
+		})
+	}
+	return out, nil
+}
+
+func (r *Repo) RemoveWorktree(name string) error {
+	wtDir := filepath.Join(r.archeDir, worktreesDir, name)
+	if _, err := os.Stat(wtDir); os.IsNotExist(err) {
+		return fmt.Errorf("worktree %q not found", name)
+	}
+
+	pathBytes, err := os.ReadFile(filepath.Join(wtDir, "path"))
+	if err == nil {
+		wtPath := strings.TrimSpace(string(pathBytes))
+		os.Remove(filepath.Join(wtPath, worktreeFile)) //nolint:errcheck
+	}
+
+	return os.RemoveAll(wtDir)
+}

internal/revset/revset.go [A]
--- /dev/null
+++ b/internal/revset/revset.go
@@ -1,0 +1,361 @@
+package revset
+
+import (
+	"fmt"
+	"strings"
+	"time"
+	"unicode"
+
+	"arche/internal/object"
+)
+
+type Func func(id [32]byte, c *object.Commit, phase object.Phase) bool
+
+func Parse(expr string) (Func, error) {
+	p := &parser{tokens: tokenize(expr)}
+	fn, err := p.parseOr()
+	if err != nil {
+		return nil, err
+	}
+	if !p.eof() {
+		return nil, fmt.Errorf("revset: unexpected token %q", p.peek())
+	}
+	return fn, nil
+}
+
+type tokKind int
+
+const (
+	tokWord tokKind = iota
+	tokStr
+	tokLParen
+	tokRParen
+	tokComma
+	tokDotDot
+	tokEOF
+)
+
+type token struct {
+	kind tokKind
+	val  string
+}
+
+func tokenize(s string) []token {
+	var out []token
+	i := 0
+	for i < len(s) {
+		ch := rune(s[i])
+		if unicode.IsSpace(ch) {
+			i++
+			continue
+		}
+		switch {
+		case ch == '(' || ch == ')' || ch == ',':
+			k := tokLParen
+			switch ch {
+			case ')':
+				k = tokRParen
+			case ',':
+				k = tokComma
+			}
+			out = append(out, token{k, string(ch)})
+			i++
+		case i+1 < len(s) && s[i] == '.' && s[i+1] == '.':
+			out = append(out, token{tokDotDot, ".."})
+			i += 2
+		case ch == '"' || ch == '\'':
+			quote := s[i]
+			i++
+			start := i
+			for i < len(s) && s[i] != quote {
+				i++
+			}
+			out = append(out, token{tokStr, s[start:i]})
+			if i < len(s) {
+				i++
+			}
+		default:
+			start := i
+			for i < len(s) && !unicode.IsSpace(rune(s[i])) &&
+				s[i] != '(' && s[i] != ')' && s[i] != ',' {
+				i++
+			}
+			out = append(out, token{tokWord, s[start:i]})
+		}
+	}
+	out = append(out, token{tokEOF, ""})
+	return out
+}
+
+type parser struct {
+	tokens []token
+	pos    int
+}
+
+func (p *parser) peek() string {
+	return p.tokens[p.pos].val
+}
+
+func (p *parser) peekKind() tokKind {
+	return p.tokens[p.pos].kind
+}
+
+func (p *parser) consume() token {
+	t := p.tokens[p.pos]
+	if t.kind != tokEOF {
+		p.pos++
+	}
+	return t
+}
+
+func (p *parser) eof() bool {
+	return p.tokens[p.pos].kind == tokEOF
+}
+
+func (p *parser) expect(kind tokKind) (string, error) {
+	t := p.consume()
+	if t.kind != kind {
+		return "", fmt.Errorf("revset: expected %v, got %q", kind, t.val)
+	}
+	return t.val, nil
+}
+
+func (p *parser) parseOr() (Func, error) {
+	left, err := p.parseAnd()
+	if err != nil {
+		return nil, err
+	}
+	for p.peekKind() == tokWord && strings.EqualFold(p.peek(), "or") {
+		p.consume()
+		right, err := p.parseAnd()
+		if err != nil {
+			return nil, err
+		}
+		l, r := left, right
+		left = func(id [32]byte, c *object.Commit, ph object.Phase) bool {
+			return l(id, c, ph) || r(id, c, ph)
+		}
+	}
+	return left, nil
+}
+
+func (p *parser) parseAnd() (Func, error) {
+	left, err := p.parseNot()
+	if err != nil {
+		return nil, err
+	}
+	for p.peekKind() == tokWord && strings.EqualFold(p.peek(), "and") {
+		p.consume()
+		right, err := p.parseNot()
+		if err != nil {
+			return nil, err
+		}
+		l, r := left, right
+		left = func(id [32]byte, c *object.Commit, ph object.Phase) bool {
+			return l(id, c, ph) && r(id, c, ph)
+		}
+	}
+	return left, nil
+}
+
+func (p *parser) parseNot() (Func, error) {
+	if p.peekKind() == tokWord && strings.EqualFold(p.peek(), "not") {
+		p.consume()
+		inner, err := p.parseNot()
+		if err != nil {
+			return nil, err
+		}
+		return func(id [32]byte, c *object.Commit, ph object.Phase) bool {
+			return !inner(id, c, ph)
+		}, nil
+	}
+	return p.parsePrimary()
+}
+
+func (p *parser) parsePrimary() (Func, error) {
+	if p.peekKind() == tokLParen {
+		p.consume()
+		fn, err := p.parseOr()
+		if err != nil {
+			return nil, err
+		}
+		if _, err := p.expect(tokRParen); err != nil {
+			return nil, err
+		}
+		return fn, nil
+	}
+
+	name, err := p.expect(tokWord)
+	if err != nil {
+		return nil, err
+	}
+
+	if p.peekKind() != tokLParen {
+		return nil, fmt.Errorf("revset: expected '(' after %q", name)
+	}
+	p.consume()
+
+	args, err := p.parseArgs()
+	if err != nil {
+		return nil, err
+	}
+
+	if _, err := p.expect(tokRParen); err != nil {
+		return nil, err
+	}
+
+	return buildPredicate(strings.ToLower(name), args)
+}
+
+func (p *parser) parseArgs() ([]string, error) {
+	var args []string
+	if p.peekKind() == tokRParen {
+		return args, nil
+	}
+	for {
+		switch p.peekKind() {
+		case tokWord, tokStr:
+			args = append(args, p.consume().val)
+		case tokDotDot:
+			args = append(args, p.consume().val)
+		default:
+			return nil, fmt.Errorf("revset: unexpected token in arg list: %q", p.peek())
+		}
+		if p.peekKind() != tokComma {
+			break
+		}
+		p.consume()
+	}
+	return args, nil
+}
+
+func buildPredicate(name string, args []string) (Func, error) {
+	switch name {
+	case "all":
+		if len(args) != 0 {
+			return nil, fmt.Errorf("revset: all() takes no arguments")
+		}
+		return func(_ [32]byte, _ *object.Commit, _ object.Phase) bool { return true }, nil
+
+	case "none":
+		if len(args) != 0 {
+			return nil, fmt.Errorf("revset: none() takes no arguments")
+		}
+		return func(_ [32]byte, _ *object.Commit, _ object.Phase) bool { return false }, nil
+
+	case "draft":
+		if len(args) != 0 {
+			return nil, fmt.Errorf("revset: draft() takes no arguments")
+		}
+		return func(_ [32]byte, _ *object.Commit, ph object.Phase) bool {
+			return ph == object.PhaseDraft
+		}, nil
+
+	case "public":
+		if len(args) != 0 {
+			return nil, fmt.Errorf("revset: public() takes no arguments")
+		}
+		return func(_ [32]byte, _ *object.Commit, ph object.Phase) bool {
+			return ph == object.PhasePublic
+		}, nil
+
+	case "secret":
+		if len(args) != 0 {
+			return nil, fmt.Errorf("revset: secret() takes no arguments")
+		}
+		return func(_ [32]byte, _ *object.Commit, ph object.Phase) bool {
+			return ph == object.PhaseSecret
+		}, nil
+
+	case "phase":
+		if len(args) != 1 {
+			return nil, fmt.Errorf("revset: phase() requires exactly one argument")
+		}
+		var want object.Phase
+		switch strings.ToLower(args[0]) {
+		case "draft":
+			want = object.PhaseDraft
+		case "public":
+			want = object.PhasePublic
+		case "secret":
+			want = object.PhaseSecret
+		default:
+			return nil, fmt.Errorf("revset: unknown phase %q (want draft|public|secret)", args[0])
+		}
+		return func(_ [32]byte, _ *object.Commit, ph object.Phase) bool {
+			return ph == want
+		}, nil
+
+	case "author":
+		if len(args) != 1 {
+			return nil, fmt.Errorf("revset: author() requires exactly one argument")
+		}
+		pat := strings.ToLower(args[0])
+		return func(_ [32]byte, c *object.Commit, _ object.Phase) bool {
+			return strings.Contains(strings.ToLower(c.Author.Name), pat) ||
+				strings.Contains(strings.ToLower(c.Author.Email), pat)
+		}, nil
+
+	case "message":
+		if len(args) != 1 {
+			return nil, fmt.Errorf("revset: message() requires exactly one argument")
+		}
+		pat := strings.ToLower(args[0])
+		return func(_ [32]byte, c *object.Commit, _ object.Phase) bool {
+			return strings.Contains(strings.ToLower(c.Message), pat)
+		}, nil
+
+	case "date":
+		return buildDatePredicate(args)
+
+	default:
+		return nil, fmt.Errorf("revset: unknown predicate %q", name)
+	}
+}
+
+func buildDatePredicate(args []string) (Func, error) {
+	raw := strings.Join(args, "")
+	if raw == "" {
+		return nil, fmt.Errorf("revset: date() requires an argument")
+	}
+
+	const layout = "2006-01-02"
+	var from, to time.Time
+
+	if idx := strings.Index(raw, ".."); idx >= 0 {
+		fromStr := raw[:idx]
+		toStr := raw[idx+2:]
+		if fromStr != "" {
+			t, err := time.ParseInLocation(layout, fromStr, time.Local)
+			if err != nil {
+				return nil, fmt.Errorf("revset: date: invalid from date %q", fromStr)
+			}
+			from = t
+		}
+		if toStr != "" {
+			t, err := time.ParseInLocation(layout, toStr, time.Local)
+			if err != nil {
+				return nil, fmt.Errorf("revset: date: invalid to date %q", toStr)
+			}
+			to = t.Add(24 * time.Hour)
+		}
+	} else {
+		t, err := time.ParseInLocation(layout, raw, time.Local)
+		if err != nil {
+			return nil, fmt.Errorf("revset: date: invalid date %q", raw)
+		}
+		from = t
+		to = t.Add(24 * time.Hour)
+	}
+
+	return func(_ [32]byte, c *object.Commit, _ object.Phase) bool {
+		ts := c.Author.Timestamp
+		if !from.IsZero() && ts.Before(from) {
+			return false
+		}
+		if !to.IsZero() && !ts.Before(to) {
+			return false
+		}
+		return true
+	}, nil
+}

internal/store/codec.go [A]
--- /dev/null
+++ b/internal/store/codec.go
@@ -1,0 +1,106 @@
+package store
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"github.com/klauspost/compress/zstd"
+	lz4 "github.com/pierrec/lz4/v4"
+)
+
+type codec interface {
+	Compress(src []byte) []byte
+	Decompress(src []byte) ([]byte, error)
+	Close()
+}
+
+func newCodec(name string, dict []byte) (codec, error) {
+	switch name {
+	case "", "zstd":
+		return newZstdCodec(dict)
+	case "lz4":
+		return &lz4Codec{}, nil
+	case "none":
+		return &noneCodec{}, nil
+	default:
+		return nil, fmt.Errorf("unknown compression %q: must be zstd, lz4, or none", name)
+	}
+}
+
+type zstdCodec struct {
+	enc *zstd.Encoder
+	dec *zstd.Decoder
+}
+
+func newZstdCodec(dict []byte) (*zstdCodec, error) {
+	encOpts := []zstd.EOption{zstd.WithEncoderLevel(zstd.SpeedDefault)}
+	if len(dict) > 0 {
+		encOpts = append(encOpts, zstd.WithEncoderDict(dict))
+	}
+	enc, err := zstd.NewWriter(nil, encOpts...)
+	if err != nil {
+		return nil, fmt.Errorf("zstd encoder: %w", err)
+	}
+
+	decOpts := []zstd.DOption{}
+	if len(dict) > 0 {
+		decOpts = append(decOpts, zstd.WithDecoderDicts(dict))
+	}
+	dec, err := zstd.NewReader(nil, decOpts...)
+	if err != nil {
+		enc.Close()
+		return nil, fmt.Errorf("zstd decoder: %w", err)
+	}
+	return &zstdCodec{enc: enc, dec: dec}, nil
+}
+
+func (c *zstdCodec) Compress(src []byte) []byte {
+	return c.enc.EncodeAll(src, nil)
+}
+
+func (c *zstdCodec) Decompress(src []byte) ([]byte, error) {
+	return c.dec.DecodeAll(src, nil)
+}
+
+func (c *zstdCodec) Close() {
+	c.enc.Close()
+	c.dec.Close()
+}
+
+type lz4Codec struct{}
+
+func (c *lz4Codec) Compress(src []byte) []byte {
+	var buf bytes.Buffer
+	w := lz4.NewWriter(&buf)
+	_, _ = w.Write(src)
+	_ = w.Close()
+	return buf.Bytes()
+}
+
+func (c *lz4Codec) Decompress(src []byte) ([]byte, error) {
+	r := lz4.NewReader(bytes.NewReader(src))
+	out, err := io.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("lz4 decompress: %w", err)
+	}
+	return out, nil
+}
+
+func (c *lz4Codec) Close() {}
+
+type noneCodec struct{}
+
+func (c *noneCodec) Compress(src []byte) []byte {
+	out := make([]byte, len(src))
+	copy(out, src)
+	return out
+}
+
+func (c *noneCodec) Decompress(src []byte) ([]byte, error) {
+	out := make([]byte, len(src))
+	copy(out, src)
+	return out, nil
+}
+
+func (c *noneCodec) Close() {}

internal/store/delta.go [A]
--- /dev/null
+++ b/internal/store/delta.go
@@ -1,0 +1,136 @@
+package store
+
+import (
+	"encoding/binary"
+	"fmt"
+)
+
+const (
+	deltaBlockSize = 16
+	deltaMaxDepth  = 5
+
+	deltaOpCopy   = byte(0x01)
+	deltaOpInsert = byte(0x02)
+	deltaMagic    = "ADP1"
+)
+
+func ComputeDelta(base, target []byte) []byte {
+	type blockKey [deltaBlockSize]byte
+
+	table := make(map[blockKey]int32, len(base)/deltaBlockSize+1)
+	for i := 0; i+deltaBlockSize <= len(base); i += deltaBlockSize {
+		var k blockKey
+		copy(k[:], base[i:])
+		if _, exists := table[k]; !exists {
+			table[k] = int32(i)
+		}
+	}
+
+	buf := make([]byte, 0, len(target)/2+8)
+	buf = append(buf, deltaMagic...)
+	buf = binary.BigEndian.AppendUint32(buf, uint32(len(target)))
+
+	pending := make([]byte, 0, 256)
+
+	flushInsert := func() {
+		for len(pending) > 0 {
+			n := len(pending)
+			if n > 0xFFFF {
+				n = 0xFFFF
+			}
+			buf = append(buf, deltaOpInsert)
+			buf = binary.BigEndian.AppendUint32(buf, uint32(n))
+			buf = append(buf, pending[:n]...)
+			pending = pending[n:]
+		}
+	}
+
+	i := 0
+	for i < len(target) {
+		bestSrc, bestLen := 0, 0
+		if i+deltaBlockSize <= len(target) {
+			var k blockKey
+			copy(k[:], target[i:])
+			if srcOff, ok := table[k]; ok {
+				s := int(srcOff)
+				l := deltaBlockSize
+				for i+l < len(target) && s+l < len(base) && target[i+l] == base[s+l] {
+					l++
+				}
+				bestLen = l
+				bestSrc = s
+			}
+		}
+		if bestLen >= deltaBlockSize {
+			flushInsert()
+			totalLen := bestLen
+			for totalLen > 0 {
+				chunk := totalLen
+				if chunk > 0xFFFF {
+					chunk = 0xFFFF
+				}
+				buf = append(buf, deltaOpCopy)
+				buf = binary.BigEndian.AppendUint32(buf, uint32(bestSrc))
+				buf = binary.BigEndian.AppendUint32(buf, uint32(chunk))
+				bestSrc += chunk
+				totalLen -= chunk
+			}
+			i += bestLen
+		} else {
+			pending = append(pending, target[i])
+			i++
+		}
+	}
+	flushInsert()
+	return buf
+}
+
+func ApplyDelta(base, delta []byte) ([]byte, error) {
+	if len(delta) < 8 {
+		return nil, fmt.Errorf("delta: stream too short (%d bytes)", len(delta))
+	}
+	if string(delta[:4]) != deltaMagic {
+		return nil, fmt.Errorf("delta: invalid magic %q", delta[:4])
+	}
+	targetSize := int(binary.BigEndian.Uint32(delta[4:8]))
+	out := make([]byte, 0, targetSize)
+
+	pos := 8
+	for pos < len(delta) {
+		op := delta[pos]
+		pos++
+		switch op {
+		case deltaOpCopy:
+			if pos+8 > len(delta) {
+				return nil, fmt.Errorf("delta: COPY instruction truncated at pos %d", pos)
+			}
+			srcOff := int(binary.BigEndian.Uint32(delta[pos : pos+4]))
+			length := int(binary.BigEndian.Uint32(delta[pos+4 : pos+8]))
+			pos += 8
+			if srcOff < 0 || length < 0 || srcOff+length > len(base) {
+				return nil, fmt.Errorf("delta: COPY out of bounds (srcOff=%d len=%d baseLen=%d)",
+					srcOff, length, len(base))
+			}
+			out = append(out, base[srcOff:srcOff+length]...)
+
+		case deltaOpInsert:
+			if pos+4 > len(delta) {
+				return nil, fmt.Errorf("delta: INSERT length truncated at pos %d", pos)
+			}
+			length := int(binary.BigEndian.Uint32(delta[pos : pos+4]))
+			pos += 4
+			if pos+length > len(delta) {
+				return nil, fmt.Errorf("delta: INSERT data truncated (need %d, have %d)", length, len(delta)-pos)
+			}
+			out = append(out, delta[pos:pos+length]...)
+			pos += length
+
+		default:
+			return nil, fmt.Errorf("delta: unknown opcode 0x%02x at pos %d", op, pos-1)
+		}
+	}
+	if len(out) != targetSize {
+		return nil, fmt.Errorf("delta: size mismatch (expected %d, got %d)", targetSize, len(out))
+	}
+	return out, nil
+}

internal/store/delta_test.go [A]
--- /dev/null
+++ b/internal/store/delta_test.go
@@ -1,0 +1,189 @@
+package store_test
+
+import (
+	"bytes"
+	"path/filepath"
+	"testing"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/store"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func TestDelta_RoundTrip(t *testing.T) {
+	base := bytes.Repeat([]byte("The quick brown fox jumps over the lazy dog\n"), 50)
+	target := make([]byte, len(base))
+	copy(target, base)
+	copy(target[500:], []byte("The QUICK brown fox jumps over the LAZY dog\n"))
+	copy(target[1000:], []byte("A completely different line right here      \n"))
+
+	delta := store.ComputeDelta(base, target)
+	got, err := store.ApplyDelta(base, delta)
+	if err != nil {
+		t.Fatalf("ApplyDelta: %v", err)
+	}
+	if !bytes.Equal(got, target) {
+		t.Error("roundtrip mismatch")
+	}
+}
+
+func TestDelta_IdenticalContent(t *testing.T) {
+	data := bytes.Repeat([]byte("identical repeated data\n"), 30)
+	delta := store.ComputeDelta(data, data)
+	got, err := store.ApplyDelta(data, delta)
+	if err != nil {
+		t.Fatalf("ApplyDelta: %v", err)
+	}
+	if !bytes.Equal(got, data) {
+		t.Error("roundtrip mismatch for identical content")
+	}
+}
+
+func TestDelta_EmptyBase(t *testing.T) {
+	target := []byte("brand new content with no base to compare against")
+	delta := store.ComputeDelta(nil, target)
+	got, err := store.ApplyDelta(nil, delta)
+	if err != nil {
+		t.Fatalf("ApplyDelta: %v", err)
+	}
+	if !bytes.Equal(got, target) {
+		t.Error("roundtrip mismatch for empty base")
+	}
+}
+
+func TestDelta_EmptyTarget(t *testing.T) {
+	base := []byte("content that will be replaced by nothing")
+	delta := store.ComputeDelta(base, nil)
+	got, err := store.ApplyDelta(base, delta)
+	if err != nil {
+		t.Fatalf("ApplyDelta: %v", err)
+	}
+	if len(got) != 0 {
+		t.Errorf("expected empty target, got %d bytes", len(got))
+	}
+}
+
+func TestDelta_SizeSavings(t *testing.T) {
+	base := bytes.Repeat([]byte("line of content that repeats many times\n"), 256)
+	tail := bytes.Repeat([]byte("line of content that repeats many times\n"), 250)
+	tail = append(tail, []byte("changed ending section\n")...)
+	target := tail
+
+	delta := store.ComputeDelta(base, target)
+	if len(delta) >= len(target)/2 {
+		t.Errorf("delta (%d B) is not significantly smaller than target (%d B)", len(delta), len(target))
+	}
+}
+
+func openPackStore(t *testing.T) *store.SQLiteStore {
+	t.Helper()
+	dir := t.TempDir()
+	s, err := store.OpenSQLiteStore(
+		filepath.Join(dir, "store.db"),
+		filepath.Join(dir, "packs"),
+		1,
+		0,
+		"zstd",
+	)
+	if err != nil {
+		t.Fatalf("OpenSQLiteStore: %v", err)
+	}
+	t.Cleanup(func() { s.Close() })
+	return s
+}
+
+func TestGCRepackWithDelta(t *testing.T) {
+	s := openPackStore(t)
+
+	base := bytes.Repeat([]byte("pack file content line repeating many times over\n"), 20)
+	variant := make([]byte, len(base))
+	copy(variant, base)
+	copy(variant[len(variant)-50:], bytes.Repeat([]byte("X"), 50))
+
+	encodeBlob := func(data []byte) ([32]byte, []byte) {
+		id := object.HashBlob(&object.Blob{Content: data})
+		var buf bytes.Buffer
+		object.EncodeBlob(&buf, &object.Blob{Content: data})
+		return id, buf.Bytes()
+	}
+
+	blobID1, rawBlob1 := encodeBlob(base)
+	blobID2, rawBlob2 := encodeBlob(variant)
+
+	tree := &object.Tree{Entries: []object.TreeEntry{
+		{Name: "file1.txt", Mode: object.ModeFile, ObjectID: blobID1},
+		{Name: "file2.txt", Mode: object.ModeFile, ObjectID: blobID2},
+	}}
+	treeID := object.HashTree(tree)
+	var treeBuf bytes.Buffer
+	object.EncodeTree(&treeBuf, tree)
+
+	sig := object.Signature{Name: "Test", Email: "t@x.com", Timestamp: time.Now()}
+	commit := &object.Commit{
+		TreeID:    treeID,
+		Author:    sig,
+		Committer: sig,
+		Message:   "gc repack delta test",
+	}
+	commitID := object.HashCommit(commit)
+	var commitBuf bytes.Buffer
+	object.EncodeCommit(&commitBuf, commit)
+
+	tx, err := s.Begin()
+	if err != nil {
+		t.Fatalf("Begin: %v", err)
+	}
+	for _, pair := range []struct {
+		id   [32]byte
+		kind string
+		raw  []byte
+	}{
+		{blobID1, "blob", rawBlob1},
+		{blobID2, "blob", rawBlob2},
+		{treeID, "tree", treeBuf.Bytes()},
+		{commitID, "commit", commitBuf.Bytes()},
+	} {
+		if err := s.WriteObject(tx, pair.id, pair.kind, pair.raw); err != nil {
+			t.Fatalf("WriteObject %s: %v", pair.kind, err)
+		}
+	}
+	if err := s.Commit(tx); err != nil {
+		t.Fatalf("Commit: %v", err)
+	}
+
+	btx, _ := s.Begin()
+	if err := s.SetBookmark(btx, store.Bookmark{Name: "main", CommitID: commitID}); err != nil {
+		t.Fatalf("SetBookmark: %v", err)
+	}
+	s.Commit(btx) //nolint:errcheck
+
+	if _, err := s.GC(90, func(string, int, int) {}); err != nil {
+		t.Fatalf("GC: %v", err)
+	}
+
+	_, got1, err := s.ReadObject(blobID1)
+	if err != nil {
+		t.Fatalf("ReadObject blob1 after GC: %v", err)
+	}
+	_, got2, err := s.ReadObject(blobID2)
+	if err != nil {
+		t.Fatalf("ReadObject blob2 after GC: %v", err)
+	}
+
+	b1, err := object.DecodeBlob(got1)
+	if err != nil {
+		t.Fatalf("DecodeBlob1: %v", err)
+	}
+	b2, err := object.DecodeBlob(got2)
+	if err != nil {
+		t.Fatalf("DecodeBlob2: %v", err)
+	}
+	if !bytes.Equal(b1.Content, base) {
+		t.Error("blob1 content mismatch after GC repack")
+	}
+	if !bytes.Equal(b2.Content, variant) {
+		t.Error("blob2 content mismatch after GC repack")
+	}
+}

internal/store/gc.go [A]
--- /dev/null
+++ b/internal/store/gc.go
@@ -1,0 +1,522 @@
+package store
+
+import (
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+	"time"
+
+	"arche/internal/object"
+)
+
+type GCStats struct {
+	ObjectsDeleted     int
+	PackEntriesDeleted int
+	PackFilesRebuilt   int
+	BytesFreed         int64
+}
+
+type GCProgress func(phase string, done, total int)
+
+type GCer interface {
+	GC(retentionDays int, progress GCProgress) (*GCStats, error)
+}
+
+func (s *SQLiteStore) GC(retentionDays int, progress GCProgress) (*GCStats, error) {
+	if progress == nil {
+		progress = func(string, int, int) {}
+	}
+	if retentionDays <= 0 {
+		retentionDays = 90
+	}
+	retentionCutoff := time.Now().AddDate(0, 0, -retentionDays).Unix()
+
+	live := make(map[[32]byte]struct{})
+
+	progress("roots", 0, 0)
+	roots, err := s.gcCollectRoots()
+	if err != nil {
+		return nil, fmt.Errorf("gc: collect roots: %w", err)
+	}
+	for i, root := range roots {
+		progress("mark", i+1, len(roots))
+		if err := s.gcMark(root, live); err != nil {
+			return nil, fmt.Errorf("gc: mark: %w", err)
+		}
+	}
+	if err := s.gcMarkObsolete(live, retentionCutoff); err != nil {
+		return nil, fmt.Errorf("gc: mark obsolete: %w", err)
+	}
+
+	progress("sweep", 0, 0)
+	stats := &GCStats{}
+
+	deleted, err := s.gcSweepObjects(live)
+	if err != nil {
+		return nil, fmt.Errorf("gc: sweep objects: %w", err)
+	}
+	stats.ObjectsDeleted = deleted
+
+	packStats, err := s.gcRepackPacks(live, progress)
+	if err != nil {
+		return nil, fmt.Errorf("gc: repack: %w", err)
+	}
+	stats.PackEntriesDeleted = packStats.PackEntriesDeleted
+	stats.PackFilesRebuilt = packStats.PackFilesRebuilt
+	stats.BytesFreed = packStats.BytesFreed
+
+	return stats, nil
+}
+
+func (s *SQLiteStore) gcCollectRoots() ([][32]byte, error) {
+	seen := make(map[[32]byte]struct{})
+	var roots [][32]byte
+
+	add := func(raw []byte) {
+		if len(raw) != 32 {
+			return
+		}
+		var id [32]byte
+		copy(id[:], raw)
+		if _, ok := seen[id]; !ok {
+			seen[id] = struct{}{}
+			roots = append(roots, id)
+		}
+	}
+	addHex := func(h string) {
+		b, err := hex.DecodeString(h)
+		if err == nil && len(b) == 32 {
+			add(b)
+		}
+	}
+
+	rows, err := s.db.Query("SELECT commit_id FROM bookmarks")
+	if err != nil {
+		return nil, err
+	}
+	for rows.Next() {
+		var raw []byte
+		if scanErr := rows.Scan(&raw); scanErr != nil {
+			rows.Close()
+			return nil, scanErr
+		}
+		add(raw)
+	}
+	rows.Close()
+	if err := rows.Err(); err != nil {
+		return nil, err
+	}
+
+	rows, err = s.db.Query("SELECT commit_id FROM changes WHERE commit_id IS NOT NULL")
+	if err != nil {
+		return nil, err
+	}
+	for rows.Next() {
+		var raw []byte
+		if scanErr := rows.Scan(&raw); scanErr != nil {
+			rows.Close()
+			return nil, scanErr
+		}
+		add(raw)
+	}
+	rows.Close()
+	if err := rows.Err(); err != nil {
+		return nil, err
+	}
+
+	rows, err = s.db.Query("SELECT before, after FROM operations")
+	if err != nil {
+		return nil, err
+	}
+	for rows.Next() {
+		var before, after string
+		if scanErr := rows.Scan(&before, &after); scanErr != nil {
+			rows.Close()
+			return nil, scanErr
+		}
+		for _, snap := range []string{before, after} {
+			var rs struct {
+				Tip       string            `json:"tip"`
+				Bookmarks map[string]string `json:"bookmarks"`
+			}
+			if json.Unmarshal([]byte(snap), &rs) == nil {
+				addHex(rs.Tip)
+				for _, v := range rs.Bookmarks {
+					addHex(v)
+				}
+			}
+		}
+	}
+	rows.Close()
+	if err := rows.Err(); err != nil {
+		return nil, err
+	}
+
+	return roots, nil
+}
+
+func (s *SQLiteStore) gcMark(startID [32]byte, live map[[32]byte]struct{}) error {
+	queue := [][32]byte{startID}
+	for len(queue) > 0 {
+		id := queue[len(queue)-1]
+		queue = queue[:len(queue)-1]
+
+		if _, ok := live[id]; ok {
+			continue
+		}
+
+		kind, raw, err := s.ReadObject(id)
+		if err != nil {
+			continue
+		}
+		live[id] = struct{}{}
+
+		switch object.Kind(kind) {
+		case object.KindCommit:
+			c, err := object.DecodeCommit(raw)
+			if err != nil {
+				return err
+			}
+			queue = append(queue, c.TreeID)
+			queue = append(queue, c.Parents...)
+
+		case object.KindTree:
+			t, err := object.DecodeTree(raw)
+			if err != nil {
+				return err
+			}
+			for _, e := range t.Entries {
+				queue = append(queue, e.ObjectID)
+			}
+
+		case object.KindConflict:
+			c, err := object.DecodeConflict(raw)
+			if err != nil {
+				return err
+			}
+			if c.Base != nil && c.Base.BlobID != object.ZeroID {
+				live[c.Base.BlobID] = struct{}{}
+			}
+			if c.Ours.BlobID != object.ZeroID {
+				live[c.Ours.BlobID] = struct{}{}
+			}
+			if c.Theirs.BlobID != object.ZeroID {
+				live[c.Theirs.BlobID] = struct{}{}
+			}
+
+		case object.KindBlob:
+			// Already marked above; no children to follow.
+
+		case object.KindObsolete:
+			// Handled separately by gcMarkObsolete; the normal DAG traversal
+			// from roots never reaches obsolete markers.
+		}
+	}
+	return nil
+}
+
+func (s *SQLiteStore) gcMarkObsolete(live map[[32]byte]struct{}, retentionCutoff int64) error {
+	rows, err := s.db.Query("SELECT id, data FROM objects WHERE kind = ?", string(object.KindObsolete))
+	if err != nil {
+		return err
+	}
+	defer rows.Close()
+
+	for rows.Next() {
+		var idRaw, compressed []byte
+		if err := rows.Scan(&idRaw, &compressed); err != nil {
+			return err
+		}
+		raw, err := s.codec.Decompress(compressed)
+		if err != nil {
+			continue
+		}
+		o, err := object.DecodeObsolete(raw)
+		if err != nil {
+			continue
+		}
+		var id [32]byte
+		copy(id[:], idRaw)
+
+		if _, ok := live[o.Predecessor]; ok {
+			live[id] = struct{}{}
+			continue
+		}
+		if o.Timestamp > retentionCutoff {
+			live[id] = struct{}{}
+		}
+	}
+	return rows.Err()
+}
+
+func (s *SQLiteStore) gcSweepObjects(live map[[32]byte]struct{}) (int, error) {
+	rows, err := s.db.Query("SELECT id FROM objects")
+	if err != nil {
+		return 0, err
+	}
+	var dead [][]byte
+	for rows.Next() {
+		var id []byte
+		if err := rows.Scan(&id); err != nil {
+			rows.Close()
+			return 0, err
+		}
+		var key [32]byte
+		copy(key[:], id)
+		if _, ok := live[key]; !ok {
+			cp := make([]byte, len(id))
+			copy(cp, id)
+			dead = append(dead, cp)
+		}
+	}
+	rows.Close()
+	if err := rows.Err(); err != nil {
+		return 0, err
+	}
+	if len(dead) == 0 {
+		return 0, nil
+	}
+
+	tx, err := s.db.Begin()
+	if err != nil {
+		return 0, err
+	}
+	stmt, err := tx.Prepare("DELETE FROM objects WHERE id = ?")
+	if err != nil {
+		tx.Rollback() //nolint:errcheck
+		return 0, err
+	}
+	for _, id := range dead {
+		if _, err := stmt.Exec(id); err != nil {
+			stmt.Close()
+			tx.Rollback() //nolint:errcheck
+			return 0, err
+		}
+	}
+	stmt.Close()
+	if err := tx.Commit(); err != nil {
+		return 0, err
+	}
+	return len(dead), nil
+}
+
+func (s *SQLiteStore) gcRepackPacks(live map[[32]byte]struct{}, progress GCProgress) (*GCStats, error) {
+	type packRec struct {
+		blobID   [32]byte
+		packFile string
+		offset   int64
+		rawSize  int64
+	}
+
+	rows, err := s.db.Query("SELECT blob_id, pack_file, offset, raw_size FROM pack_index")
+	if err != nil {
+		return nil, err
+	}
+	var liveEntries []packRec
+	var deadCount int
+	for rows.Next() {
+		var raw []byte
+		var e packRec
+		if err := rows.Scan(&raw, &e.packFile, &e.offset, &e.rawSize); err != nil {
+			rows.Close()
+			return nil, err
+		}
+		copy(e.blobID[:], raw)
+		if _, ok := live[e.blobID]; ok {
+			liveEntries = append(liveEntries, e)
+		} else {
+			deadCount++
+		}
+	}
+	rows.Close()
+	if err := rows.Err(); err != nil {
+		return nil, err
+	}
+
+	stats := &GCStats{PackEntriesDeleted: deadCount}
+	if deadCount == 0 {
+		return stats, nil
+	}
+
+	oldPackFiles := make(map[string]int64)
+	pRows, err := s.db.Query("SELECT DISTINCT pack_file FROM pack_index")
+	if err != nil {
+		return nil, err
+	}
+	for pRows.Next() {
+		var pf string
+		if err := pRows.Scan(&pf); err != nil {
+			pRows.Close()
+			return nil, err
+		}
+		info, statErr := os.Stat(filepath.Join(s.packDir, pf))
+		if statErr == nil {
+			oldPackFiles[pf] = info.Size()
+		} else {
+			oldPackFiles[pf] = 0
+		}
+	}
+	pRows.Close()
+
+	s.pack.mu.Lock()
+	if s.pack.cur != nil {
+		s.pack.cur.Sync() //nolint:errcheck
+		s.pack.cur.Close()
+		s.pack.cur = nil
+	}
+	s.pack.mu.Unlock()
+
+	if len(liveEntries) == 0 {
+		if _, err := s.db.Exec("DELETE FROM pack_index"); err != nil {
+			return nil, err
+		}
+		for pf, size := range oldPackFiles {
+			os.Remove(filepath.Join(s.packDir, pf)) //nolint:errcheck
+			stats.BytesFreed += size
+		}
+		return stats, nil
+	}
+
+	newPM, err := newPackManager(s.packDir, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	type newEntry struct {
+		blobID      [32]byte
+		packFile    string
+		offset      int64
+		rawSize     int64
+		deltaBaseID [32]byte
+		deltaDepth  int
+	}
+
+	sort.Slice(liveEntries, func(i, j int) bool {
+		return liveEntries[i].rawSize > liveEntries[j].rawSize
+	})
+
+	type rawBlob struct {
+		blobID  [32]byte
+		rawSize int64
+		content []byte
+	}
+	rawBlobs := make([]rawBlob, 0, len(liveEntries))
+	for _, e := range liveEntries {
+		_, content, err := s.ReadObject(e.blobID)
+		if err != nil {
+			newPM.close()
+			return nil, fmt.Errorf("gc: read blob %s for repack: %w",
+				hex.EncodeToString(e.blobID[:])[:8], err)
+		}
+		rawBlobs = append(rawBlobs, rawBlob{blobID: e.blobID, rawSize: e.rawSize, content: content})
+	}
+
+	var newEntries []newEntry
+	depthOf := make(map[[32]byte]int, len(rawBlobs))
+
+	for i, rb := range rawBlobs {
+		progress("repack", i+1, len(rawBlobs))
+
+		var bestBase *rawBlob
+		if i > 0 {
+			prev := &rawBlobs[i-1]
+			if prev.rawSize > 0 && rb.rawSize > 0 {
+				lo, hi := prev.rawSize, rb.rawSize
+				if lo > hi {
+					lo, hi = hi, lo
+				}
+				if hi <= lo*6/5 && depthOf[prev.blobID] < deltaMaxDepth {
+					bestBase = prev
+				}
+			}
+		}
+
+		if bestBase != nil {
+			deltaBytes := ComputeDelta(bestBase.content, rb.content)
+			compDelta := s.codec.Compress(deltaBytes)
+			compFull := s.codec.Compress(rb.content)
+			if len(compDelta) < len(compFull)*4/5 {
+				pe, writeErr := newPM.write(compDelta, rb.rawSize)
+				if writeErr != nil {
+					newPM.close()
+					return nil, fmt.Errorf("gc: write delta blob: %w", writeErr)
+				}
+				d := depthOf[bestBase.blobID] + 1
+				depthOf[rb.blobID] = d
+				newEntries = append(newEntries, newEntry{
+					blobID:      rb.blobID,
+					packFile:    pe.packFile,
+					offset:      pe.offset,
+					rawSize:     rb.rawSize,
+					deltaBaseID: bestBase.blobID,
+					deltaDepth:  d,
+				})
+				continue
+			}
+		}
+
+		compFull := s.codec.Compress(rb.content)
+		pe, err := newPM.write(compFull, rb.rawSize)
+		if err != nil {
+			newPM.close()
+			return nil, fmt.Errorf("gc: write compacted blob: %w", err)
+		}
+		depthOf[rb.blobID] = 0
+		newEntries = append(newEntries, newEntry{
+			blobID:   rb.blobID,
+			packFile: pe.packFile,
+			offset:   pe.offset,
+			rawSize:  rb.rawSize,
+		})
+	}
+	newPM.close()
+
+	tx, err := s.db.Begin()
+	if err != nil {
+		return nil, err
+	}
+	if _, err := tx.Exec("DELETE FROM pack_index"); err != nil {
+		tx.Rollback() //nolint:errcheck
+		return nil, err
+	}
+	stmt, err := tx.Prepare(
+		"INSERT INTO pack_index (blob_id, pack_file, offset, raw_size, delta_base_id, delta_depth) VALUES (?, ?, ?, ?, ?, ?)",
+	)
+	if err != nil {
+		tx.Rollback() //nolint:errcheck
+		return nil, err
+	}
+	for _, ne := range newEntries {
+		var deltaBaseID interface{}
+		if ne.deltaBaseID != ([32]byte{}) {
+			deltaBaseID = ne.deltaBaseID[:]
+		}
+		if _, err := stmt.Exec(ne.blobID[:], ne.packFile, ne.offset, ne.rawSize, deltaBaseID, ne.deltaDepth); err != nil {
+			stmt.Close()
+			tx.Rollback() //nolint:errcheck
+			return nil, err
+		}
+	}
+	stmt.Close()
+	if err := tx.Commit(); err != nil {
+		return nil, err
+	}
+
+	newPackFiles := make(map[string]struct{})
+	for _, ne := range newEntries {
+		newPackFiles[ne.packFile] = struct{}{}
+	}
+	stats.PackFilesRebuilt = len(newPackFiles)
+
+	for pf, size := range oldPackFiles {
+		if _, isNew := newPackFiles[pf]; !isNew {
+			os.Remove(filepath.Join(s.packDir, pf)) //nolint:errcheck
+			stats.BytesFreed += size
+		}
+	}
+
+	return stats, nil
+}

internal/store/migrate/migrate.go [A]
--- /dev/null
+++ b/internal/store/migrate/migrate.go
@@ -1,0 +1,113 @@
+package migrate
+
+import (
+	"database/sql"
+	_ "embed"
+	"fmt"
+	"time"
+)
+
+//go:embed sql/001_initial.sql
+var sql001 string
+
+//go:embed sql/002_conflicts.sql
+var sql002 string
+
+//go:embed sql/003_wcache_mode.sql
+var sql003 string
+
+//go:embed sql/004_pack_delta.sql
+var sql004 string
+
+//go:embed sql/005_zstd_dict.sql
+var sql005 string
+
+//go:embed sql/006_file_locks.sql
+var sql006 string
+
+//go:embed sql/007_wcache_dirty.sql
+var sql007 string
+
+type migration struct {
+	version int
+	sql     string
+}
+
+var all = []migration{
+	{1, sql001},
+	{2, sql002},
+	{3, sql003},
+	{4, sql004},
+	{5, sql005},
+	{6, sql006},
+	{7, sql007},
+}
+
+func Run(db *sql.DB) error {
+	if err := ensureMigrationsTable(db); err != nil {
+		return err
+	}
+
+	applied, err := appliedVersions(db)
+	if err != nil {
+		return err
+	}
+
+	for _, m := range all {
+		if applied[m.version] {
+			continue
+		}
+		if err := applyMigration(db, m); err != nil {
+			return fmt.Errorf("migrate v%d: %w", m.version, err)
+		}
+	}
+	return nil
+}
+
+func ensureMigrationsTable(db *sql.DB) error {
+	_, err := db.Exec(`CREATE TABLE IF NOT EXISTS schema_migrations (
+		version    INTEGER PRIMARY KEY,
+		applied_at INTEGER NOT NULL
+	)`)
+	return err
+}
+
+func appliedVersions(db *sql.DB) (map[int]bool, error) {
+	rows, err := db.Query("SELECT version FROM schema_migrations")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	m := make(map[int]bool)
+	for rows.Next() {
+		var v int
+		if err := rows.Scan(&v); err != nil {
+			return nil, err
+		}
+		m[v] = true
+	}
+	return m, rows.Err()
+}
+
+func applyMigration(db *sql.DB, m migration) error {
+	tx, err := db.Begin()
+	if err != nil {
+		return err
+	}
+
+	if _, err := tx.Exec(m.sql); err != nil {
+		tx.Rollback()
+		return fmt.Errorf("exec: %w", err)
+	}
+
+	if _, err := tx.Exec(
+		"INSERT INTO schema_migrations (version, applied_at) VALUES (?, ?)",
+		m.version, time.Now().Unix(),
+	); err != nil {
+		tx.Rollback()
+		return fmt.Errorf("record: %w", err)
+	}
+
+	return tx.Commit()
+}

internal/store/migrate/sql/001_initial.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/001_initial.sql
@@ -1,0 +1,45 @@
+CREATE TABLE IF NOT EXISTS objects (
+    id   BLOB    PRIMARY KEY,
+    kind TEXT    NOT NULL,
+    data BLOB    NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS bookmarks (
+    name      TEXT PRIMARY KEY,
+    commit_id BLOB NOT NULL REFERENCES objects(id),
+    remote    TEXT
+);
+
+CREATE TABLE IF NOT EXISTS phases (
+    commit_id BLOB    PRIMARY KEY REFERENCES objects(id),
+    phase     INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS changes (
+    change_id TEXT PRIMARY KEY,
+    commit_id BLOB REFERENCES objects(id)
+);
+
+CREATE TABLE IF NOT EXISTS pack_index (
+    blob_id   BLOB    PRIMARY KEY,
+    pack_file TEXT    NOT NULL,
+    offset    INTEGER NOT NULL,
+    raw_size  INTEGER NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS wcache (
+    path     TEXT    PRIMARY KEY,
+    inode    INTEGER NOT NULL,
+    mtime_ns INTEGER NOT NULL,
+    size     INTEGER NOT NULL,
+    blob_id  BLOB    NOT NULL
+);
+
+CREATE TABLE IF NOT EXISTS operations (
+    seq       INTEGER PRIMARY KEY AUTOINCREMENT,
+    kind      TEXT    NOT NULL,
+    timestamp INTEGER NOT NULL,
+    before    TEXT    NOT NULL DEFAULT '',
+    after     TEXT    NOT NULL DEFAULT '',
+    metadata  TEXT
+);

internal/store/migrate/sql/002_conflicts.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/002_conflicts.sql
@@ -1,0 +1,3 @@
+CREATE TABLE IF NOT EXISTS conflicts (
+    path TEXT PRIMARY KEY
+);

internal/store/migrate/sql/003_wcache_mode.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/003_wcache_mode.sql
@@ -1,0 +1,2 @@
+-- Add entry mode to wcache so fast-path snapshot can avoid re-statting unmodified files.
+ALTER TABLE wcache ADD COLUMN mode INTEGER NOT NULL DEFAULT 0;

internal/store/migrate/sql/004_pack_delta.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/004_pack_delta.sql
@@ -1,0 +1,5 @@
+-- Add delta-compression support to pack_index.
+-- delta_base_id: the blob ID this entry is encoded relative to (NULL = full object).
+-- delta_depth: length of the delta chain (0 = full object, 1 = one level of delta, …).
+ALTER TABLE pack_index ADD COLUMN delta_base_id BLOB;
+ALTER TABLE pack_index ADD COLUMN delta_depth INTEGER NOT NULL DEFAULT 0;

internal/store/migrate/sql/005_zstd_dict.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/005_zstd_dict.sql
@@ -1,0 +1,9 @@
+-- Persistent zstd dictionary storage.
+-- Each row is a trained dictionary; the decoder loads all stored dicts so it
+-- can decompress both old (no-dict) and new (with-dict) compressed frames.
+-- The encoder always uses the latest dict (highest id).
+CREATE TABLE IF NOT EXISTS zstd_dicts (
+    id         INTEGER PRIMARY KEY AUTOINCREMENT,
+    created_at INTEGER NOT NULL,
+    dict       BLOB    NOT NULL
+);

internal/store/migrate/sql/006_file_locks.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/006_file_locks.sql
@@ -1,0 +1,8 @@
+-- Exclusive file locks for large-binary / lockable files (Perforce-style).
+-- owner format: "username@hostname"
+CREATE TABLE IF NOT EXISTS file_locks (
+    path        TEXT    PRIMARY KEY,
+    owner       TEXT    NOT NULL,
+    acquired_at INTEGER NOT NULL,
+    comment     TEXT    NOT NULL DEFAULT ''
+);

internal/store/migrate/sql/007_wcache_dirty.sql [A]
--- /dev/null
+++ b/internal/store/migrate/sql/007_wcache_dirty.sql
@@ -1,0 +1,3 @@
+-- Add dirty flag to wcache so the fsnotify watcher can mark changed paths.
+-- dirty = 0 (clean), dirty = 1 (watcher detected a change, needs re-hash).
+ALTER TABLE wcache ADD COLUMN dirty INTEGER NOT NULL DEFAULT 0;

internal/store/pack.go [A]
--- /dev/null
+++ b/internal/store/pack.go
@@ -1,0 +1,163 @@
+package store
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sync"
+)
+
+const (
+	packMagic           = "ARCHE-PACK-V1\x00\x00"
+	defaultPackSealSize = 256 * 1024 * 1024
+)
+
+type packManager struct {
+	dir      string
+	mu       sync.Mutex
+	cur      *os.File
+	size     int64
+	name     string
+	sealSize int64
+}
+
+func newPackManager(dir string, sealSize int) (*packManager, error) {
+	if err := os.MkdirAll(dir, 0o755); err != nil {
+		return nil, fmt.Errorf("pack: create dir %s: %w", dir, err)
+	}
+	ss := int64(sealSize)
+	if ss <= 0 {
+		ss = defaultPackSealSize
+	}
+	pm := &packManager{dir: dir, sealSize: ss}
+	return pm, nil
+}
+
+func (pm *packManager) close() {
+	pm.mu.Lock()
+	defer pm.mu.Unlock()
+	if pm.cur != nil {
+		_ = pm.cur.Close()
+		pm.cur = nil
+	}
+}
+
+type packEntry struct {
+	packFile string
+	offset   int64
+	rawSize  int64
+}
+
+func (pm *packManager) write(compressed []byte, rawSize int64) (packEntry, error) {
+	pm.mu.Lock()
+	defer pm.mu.Unlock()
+
+	if pm.cur == nil || pm.size >= pm.sealSize {
+		if err := pm.openNewPack(); err != nil {
+			return packEntry{}, err
+		}
+	}
+
+	offset := pm.size
+
+	hdr := make([]byte, 16)
+	binary.BigEndian.PutUint64(hdr[0:8], uint64(rawSize))
+	binary.BigEndian.PutUint64(hdr[8:16], uint64(len(compressed)))
+	_, err := pm.cur.Write(hdr)
+	if err != nil {
+		return packEntry{}, fmt.Errorf("pack write header: %w", err)
+	}
+	_, err = pm.cur.Write(compressed)
+	if err != nil {
+		return packEntry{}, fmt.Errorf("pack write data: %w", err)
+	}
+
+	pm.size += int64(16 + len(compressed))
+	return packEntry{packFile: pm.name, offset: offset, rawSize: rawSize}, nil
+}
+
+func (pm *packManager) read(packFile string, offset int64) ([]byte, error) {
+	path := filepath.Join(pm.dir, packFile)
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, fmt.Errorf("pack open %s: %w", packFile, err)
+	}
+	defer f.Close()
+
+	magic := make([]byte, len(packMagic))
+	if _, err := readFull(f, magic); err != nil {
+		return nil, fmt.Errorf("pack %s: cannot read magic header: %w", packFile, err)
+	}
+	if string(magic) != packMagic {
+		return nil, fmt.Errorf("pack %s: invalid magic header (file may be corrupt or not an Arche pack)", packFile)
+	}
+
+	if _, err := f.Seek(offset, 0); err != nil {
+		return nil, fmt.Errorf("pack seek: %w", err)
+	}
+
+	hdr := make([]byte, 16)
+	if _, err := readFull(f, hdr); err != nil {
+		return nil, fmt.Errorf("pack read header: %w", err)
+	}
+	compSize := binary.BigEndian.Uint64(hdr[8:16])
+
+	comp := make([]byte, compSize)
+	if _, err := readFull(f, comp); err != nil {
+		return nil, fmt.Errorf("pack read data: %w", err)
+	}
+	return comp, nil
+}
+
+func (pm *packManager) openNewPack() error {
+	if pm.cur != nil {
+		if err := pm.cur.Sync(); err != nil {
+			return fmt.Errorf("pack sync: %w", err)
+		}
+		if err := pm.cur.Close(); err != nil {
+			return fmt.Errorf("pack close: %w", err)
+		}
+	}
+
+	name := fmt.Sprintf("%016x.pack", uniquePackID())
+	path := filepath.Join(pm.dir, name)
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0o644)
+	if err != nil {
+		return fmt.Errorf("pack create %s: %w", name, err)
+	}
+
+	if _, err := f.WriteString(packMagic); err != nil {
+		f.Close()
+		return fmt.Errorf("pack write magic: %w", err)
+	}
+
+	pm.cur = f
+	pm.name = name
+	pm.size = int64(len(packMagic))
+	return nil
+}
+
+var (
+	packCounter uint64
+	packMu      sync.Mutex
+)
+
+func uniquePackID() uint64 {
+	packMu.Lock()
+	defer packMu.Unlock()
+	packCounter++
+	return uint64(os.Getpid())<<32 | packCounter
+}
+
+func readFull(f *os.File, buf []byte) (int, error) {
+	total := 0
+	for total < len(buf) {
+		n, err := f.Read(buf[total:])
+		total += n
+		if err != nil {
+			return total, err
+		}
+	}
+	return total, nil
+}

internal/store/sqlite.go [A]
--- /dev/null
+++ b/internal/store/sqlite.go
@@ -1,0 +1,733 @@
+package store
+
+import (
+	"database/sql"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/store/migrate"
+
+	"github.com/klauspost/compress/zstd"
+	_ "github.com/mattn/go-sqlite3"
+)
+
+const defaultPackThreshold = 128 * 1024
+
+type SQLiteStore struct {
+	db              *sql.DB
+	pack            *packManager
+	codec           codec
+	packDir         string
+	packThreshold   int
+	compressionName string
+}
+
+func OpenSQLiteStore(dbPath, packDir string, packThreshold, packSealSize int, compression string) (*SQLiteStore, error) {
+	if packThreshold <= 0 {
+		packThreshold = defaultPackThreshold
+	}
+	db, err := sql.Open("sqlite3", dbPath+"?_busy_timeout=5000")
+	if err != nil {
+		return nil, fmt.Errorf("store open %s: %w", dbPath, err)
+	}
+	db.SetMaxOpenConns(1)
+
+	if _, err := db.Exec("PRAGMA journal_mode = WAL; PRAGMA foreign_keys = ON;"); err != nil {
+		db.Close()
+		return nil, fmt.Errorf("store pragma: %w", err)
+	}
+
+	if err := migrate.Run(db); err != nil {
+		db.Close()
+		return nil, fmt.Errorf("store migrate: %w", err)
+	}
+
+	pm, err := newPackManager(packDir, packSealSize)
+	if err != nil {
+		db.Close()
+		return nil, err
+	}
+
+	var dictData []byte
+	_ = db.QueryRow("SELECT dict FROM zstd_dicts ORDER BY id DESC LIMIT 1").Scan(&dictData)
+
+	cd, err := newCodec(compression, dictData)
+	if err != nil {
+		db.Close()
+		pm.close()
+		return nil, err
+	}
+
+	return &SQLiteStore{
+		db:              db,
+		pack:            pm,
+		codec:           cd,
+		packDir:         packDir,
+		packThreshold:   packThreshold,
+		compressionName: compression,
+	}, nil
+}
+
+func (s *SQLiteStore) Begin() (*Tx, error) {
+	sqlTx, err := s.db.Begin()
+	if err != nil {
+		return nil, fmt.Errorf("begin tx: %w", err)
+	}
+	return &Tx{sqlTx: sqlTx}, nil
+}
+
+func (s *SQLiteStore) Commit(tx *Tx) error {
+	return tx.sqlTx.Commit()
+}
+
+func (s *SQLiteStore) Rollback(tx *Tx) error {
+	return tx.sqlTx.Rollback()
+}
+
+func (s *SQLiteStore) Close() error {
+	s.pack.close()
+	s.codec.Close()
+	return s.db.Close()
+}
+
+func (s *SQLiteStore) AddConflict(tx *Tx, path string) error {
+	_, err := tx.sqlTx.Exec("INSERT OR IGNORE INTO conflicts (path) VALUES (?)", path)
+	return err
+}
+
+func (s *SQLiteStore) ClearConflict(tx *Tx, path string) error {
+	_, err := tx.sqlTx.Exec("DELETE FROM conflicts WHERE path = ?", path)
+	return err
+}
+
+func (s *SQLiteStore) ClearAllConflicts(tx *Tx) error {
+	_, err := tx.sqlTx.Exec("DELETE FROM conflicts")
+	return err
+}
+
+func (s *SQLiteStore) ListConflicts() ([]string, error) {
+	rows, err := s.db.Query("SELECT path FROM conflicts ORDER BY path")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []string
+	for rows.Next() {
+		var p string
+		if err := rows.Scan(&p); err != nil {
+			return nil, err
+		}
+		out = append(out, p)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) HasObject(id [32]byte) (bool, error) {
+	var count int
+	err := s.db.QueryRow("SELECT COUNT(*) FROM objects WHERE id = ?", id[:]).Scan(&count)
+	if err != nil {
+		return false, err
+	}
+	if count > 0 {
+		return true, nil
+	}
+
+	err = s.db.QueryRow("SELECT COUNT(*) FROM pack_index WHERE blob_id = ?", id[:]).Scan(&count)
+	if err != nil {
+		return false, err
+	}
+	return count > 0, nil
+}
+
+func (s *SQLiteStore) ReadObject(id [32]byte) (kind string, raw []byte, err error) {
+	var compressed []byte
+	rowErr := s.db.QueryRow("SELECT kind, data FROM objects WHERE id = ?", id[:]).Scan(&kind, &compressed)
+	if rowErr == nil {
+		raw, err = s.codec.Decompress(compressed)
+		if err != nil {
+			return "", nil, fmt.Errorf("decompress object %s: %w", hex.EncodeToString(id[:])[:12], err)
+		}
+		return kind, raw, nil
+	}
+	if !errors.Is(rowErr, sql.ErrNoRows) {
+		return "", nil, rowErr
+	}
+
+	var packFile string
+	var offset, rawSize int64
+	var deltaBaseIDRaw []byte
+	var deltaDepth int
+	rowErr = s.db.QueryRow(
+		"SELECT pack_file, offset, raw_size, delta_base_id, delta_depth FROM pack_index WHERE blob_id = ?", id[:],
+	).Scan(&packFile, &offset, &rawSize, &deltaBaseIDRaw, &deltaDepth)
+	if errors.Is(rowErr, sql.ErrNoRows) {
+		return "", nil, fmt.Errorf("object %s not found", hex.EncodeToString(id[:])[:12])
+	}
+	if rowErr != nil {
+		return "", nil, rowErr
+	}
+
+	compressed, err = s.pack.read(packFile, offset)
+	if err != nil {
+		return "", nil, err
+	}
+
+	if len(deltaBaseIDRaw) > 0 {
+		if deltaDepth > deltaMaxDepth {
+			return "", nil, fmt.Errorf("pack object %s: delta chain depth %d exceeds limit %d",
+				hex.EncodeToString(id[:])[:12], deltaDepth, deltaMaxDepth)
+		}
+		deltaBytes, decErr := s.codec.Decompress(compressed)
+		if decErr != nil {
+			return "", nil, fmt.Errorf("decompress delta %s: %w", hex.EncodeToString(id[:])[:12], decErr)
+		}
+		var baseID [32]byte
+		copy(baseID[:], deltaBaseIDRaw)
+		_, baseRaw, baseErr := s.ReadObject(baseID)
+		if baseErr != nil {
+			return "", nil, fmt.Errorf("read delta base for %s: %w", hex.EncodeToString(id[:])[:12], baseErr)
+		}
+		raw, err = ApplyDelta(baseRaw, deltaBytes)
+		if err != nil {
+			return "", nil, fmt.Errorf("apply delta %s: %w", hex.EncodeToString(id[:])[:12], err)
+		}
+		return string(object.KindBlob), raw, nil
+	}
+
+	raw, err = s.codec.Decompress(compressed)
+	if err != nil {
+		return "", nil, fmt.Errorf("decompress pack object %s: %w", hex.EncodeToString(id[:])[:12], err)
+	}
+	return string(object.KindBlob), raw, nil
+}
+
+func (s *SQLiteStore) WriteObject(tx *Tx, id [32]byte, kind string, raw []byte) error {
+	compressed := s.codec.Compress(raw)
+
+	if len(raw) > s.packThreshold && kind == string(object.KindBlob) {
+		entry, err := s.pack.write(compressed, int64(len(raw)))
+		if err != nil {
+			return err
+		}
+		_, err = tx.sqlTx.Exec(
+			"INSERT OR IGNORE INTO pack_index (blob_id, pack_file, offset, raw_size) VALUES (?, ?, ?, ?)",
+			id[:], entry.packFile, entry.offset, entry.rawSize,
+		)
+		return err
+	}
+
+	_, err := tx.sqlTx.Exec(
+		"INSERT OR IGNORE INTO objects (id, kind, data) VALUES (?, ?, ?)",
+		id[:], kind, compressed,
+	)
+	return err
+}
+
+func (s *SQLiteStore) ListObjectsByKind(kind string) ([][32]byte, error) {
+	rows, err := s.db.Query("SELECT id FROM objects WHERE kind = ?", kind)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var ids [][32]byte
+	for rows.Next() {
+		var raw []byte
+		if err := rows.Scan(&raw); err != nil {
+			return nil, err
+		}
+		var id [32]byte
+		copy(id[:], raw)
+		ids = append(ids, id)
+	}
+	return ids, rows.Err()
+}
+
+func (s *SQLiteStore) GetBookmark(name string) (*Bookmark, error) {
+	var cid []byte
+	var remote sql.NullString
+	err := s.db.QueryRow("SELECT commit_id, remote FROM bookmarks WHERE name = ?", name).Scan(&cid, &remote)
+	if errors.Is(err, sql.ErrNoRows) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	b := &Bookmark{Name: name, Remote: remote.String}
+	copy(b.CommitID[:], cid)
+	return b, nil
+}
+
+func (s *SQLiteStore) SetBookmark(tx *Tx, b Bookmark) error {
+	var remote interface{}
+	if b.Remote != "" {
+		remote = b.Remote
+	}
+	_, err := tx.sqlTx.Exec(
+		"INSERT OR REPLACE INTO bookmarks (name, commit_id, remote) VALUES (?, ?, ?)",
+		b.Name, b.CommitID[:], remote,
+	)
+	return err
+}
+
+func (s *SQLiteStore) DeleteBookmark(tx *Tx, name string) error {
+	_, err := tx.sqlTx.Exec("DELETE FROM bookmarks WHERE name = ?", name)
+	return err
+}
+
+func (s *SQLiteStore) ListBookmarks() ([]Bookmark, error) {
+	rows, err := s.db.Query("SELECT name, commit_id, remote FROM bookmarks ORDER BY name")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	var out []Bookmark
+	for rows.Next() {
+		var b Bookmark
+		var cid []byte
+		var remote sql.NullString
+		if err := rows.Scan(&b.Name, &cid, &remote); err != nil {
+			return nil, err
+		}
+		copy(b.CommitID[:], cid)
+		b.Remote = remote.String
+		out = append(out, b)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) GetPhase(commitID [32]byte) (object.Phase, error) {
+	var phase int
+	err := s.db.QueryRow("SELECT phase FROM phases WHERE commit_id = ?", commitID[:]).Scan(&phase)
+	if errors.Is(err, sql.ErrNoRows) {
+		return object.PhaseDraft, nil
+	}
+
+	if err != nil {
+		return 0, err
+	}
+
+	return object.Phase(phase), nil
+}
+
+func (s *SQLiteStore) SetPhase(tx *Tx, commitID [32]byte, phase object.Phase) error {
+	_, err := tx.sqlTx.Exec(
+		"INSERT OR REPLACE INTO phases (commit_id, phase) VALUES (?, ?)",
+		commitID[:], int(phase),
+	)
+	return err
+}
+
+func (s *SQLiteStore) ListSecretCommitIDs() ([][32]byte, error) {
+	rows, err := s.db.Query("SELECT commit_id FROM phases WHERE phase = ?", int(object.PhaseSecret))
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out [][32]byte
+	for rows.Next() {
+		var raw []byte
+		if err := rows.Scan(&raw); err != nil {
+			return nil, err
+		}
+		var id [32]byte
+		copy(id[:], raw)
+		out = append(out, id)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) AllocChangeID(tx *Tx) (string, error) {
+	for length := 8; length <= 32; length += 2 {
+		id := object.NewChangeID(length)
+		_, err := tx.sqlTx.Exec("INSERT INTO changes (change_id, commit_id) VALUES (?, NULL)", id)
+		if err == nil {
+			return id, nil
+		}
+		if !isSQLiteConstraintError(err) {
+			return "", fmt.Errorf("alloc change ID: %w", err)
+		}
+	}
+	return "", errors.New("change ID allocation failed after max retries")
+}
+
+func (s *SQLiteStore) GetChangeCommit(changeID string) ([32]byte, error) {
+	var rows *sql.Rows
+	var err error
+	if len(changeID) < 8 {
+		rows, err = s.db.Query(
+			"SELECT change_id, commit_id FROM changes WHERE change_id LIKE ? AND commit_id IS NOT NULL",
+			changeID+"%",
+		)
+	} else {
+		rows, err = s.db.Query(
+			"SELECT change_id, commit_id FROM changes WHERE change_id = ? AND commit_id IS NOT NULL",
+			changeID,
+		)
+	}
+
+	if err != nil {
+		return object.ZeroID, err
+	}
+	defer rows.Close()
+
+	var found [32]byte
+	var count int
+	for rows.Next() {
+		var cid []byte
+		var chid string
+		if err := rows.Scan(&chid, &cid); err != nil {
+			return object.ZeroID, err
+		}
+		copy(found[:], cid)
+		count++
+	}
+
+	if err := rows.Err(); err != nil {
+		return object.ZeroID, err
+	}
+
+	if count == 0 {
+		return object.ZeroID, sql.ErrNoRows
+	}
+
+	if count > 1 {
+		return object.ZeroID, fmt.Errorf("ambiguous change ID prefix %q matches %d changes", changeID, count)
+	}
+
+	return found, nil
+}
+
+func (s *SQLiteStore) SetChangeCommit(tx *Tx, changeID string, commitID [32]byte) error {
+	_, err := tx.sqlTx.Exec(
+		"UPDATE changes SET commit_id = ? WHERE change_id = ?",
+		commitID[:], changeID,
+	)
+	return err
+}
+
+func (s *SQLiteStore) ListChanges() ([]Bookmark, error) {
+	rows, err := s.db.Query("SELECT change_id, commit_id FROM changes WHERE commit_id IS NOT NULL")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []Bookmark
+	for rows.Next() {
+		var name string
+		var commitID []byte
+		if err := rows.Scan(&name, &commitID); err != nil {
+			return nil, err
+		}
+		var id [32]byte
+		copy(id[:], commitID)
+		out = append(out, Bookmark{Name: name, CommitID: id})
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) GetWCacheEntry(path string) (*WCacheEntry, error) {
+	var e WCacheEntry
+	var blobID []byte
+	var dirty int
+	err := s.db.QueryRow(
+		"SELECT path, inode, mtime_ns, size, blob_id, mode, dirty FROM wcache WHERE path = ?", path,
+	).Scan(&e.Path, &e.Inode, &e.MtimeNs, &e.Size, &blobID, &e.Mode, &dirty)
+	if errors.Is(err, sql.ErrNoRows) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	copy(e.BlobID[:], blobID)
+	e.Dirty = dirty != 0
+	return &e, nil
+}
+
+func (s *SQLiteStore) SetWCacheEntry(tx *Tx, e WCacheEntry) error {
+	dirty := 0
+	if e.Dirty {
+		dirty = 1
+	}
+	_, err := tx.sqlTx.Exec(
+		"INSERT OR REPLACE INTO wcache (path, inode, mtime_ns, size, blob_id, mode, dirty) VALUES (?, ?, ?, ?, ?, ?, ?)",
+		e.Path, e.Inode, e.MtimeNs, e.Size, e.BlobID[:], e.Mode, dirty,
+	)
+	return err
+}
+
+func (s *SQLiteStore) DeleteWCacheEntry(tx *Tx, path string) error {
+	_, err := tx.sqlTx.Exec("DELETE FROM wcache WHERE path = ?", path)
+	return err
+}
+
+func (s *SQLiteStore) ListWCacheEntries() ([]WCacheEntry, error) {
+	rows, err := s.db.Query("SELECT path, inode, mtime_ns, size, blob_id, mode, dirty FROM wcache ORDER BY path")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	var out []WCacheEntry
+	for rows.Next() {
+		var e WCacheEntry
+		var blobID []byte
+		var dirty int
+		if err := rows.Scan(&e.Path, &e.Inode, &e.MtimeNs, &e.Size, &blobID, &e.Mode, &dirty); err != nil {
+			return nil, err
+		}
+		copy(e.BlobID[:], blobID)
+		e.Dirty = dirty != 0
+		out = append(out, e)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) ClearWCache(tx *Tx) error {
+	_, err := tx.sqlTx.Exec("DELETE FROM wcache")
+	return err
+}
+
+func (s *SQLiteStore) MarkWCacheDirty(path string) error {
+	_, err := s.db.Exec(
+		`INSERT INTO wcache (path, inode, mtime_ns, size, blob_id, mode, dirty)
+		 VALUES (?, 0, 0, 0, zeroblob(32), 0, 1)
+		 ON CONFLICT(path) DO UPDATE SET dirty = 1`,
+		path,
+	)
+	return err
+}
+
+func (s *SQLiteStore) ListDirtyWCacheEntries() ([]WCacheEntry, error) {
+	rows, err := s.db.Query("SELECT path, inode, mtime_ns, size, blob_id, mode, dirty FROM wcache WHERE dirty = 1 ORDER BY path")
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	var out []WCacheEntry
+	for rows.Next() {
+		var e WCacheEntry
+		var blobID []byte
+		var dirty int
+		if err := rows.Scan(&e.Path, &e.Inode, &e.MtimeNs, &e.Size, &blobID, &e.Mode, &dirty); err != nil {
+			return nil, err
+		}
+		copy(e.BlobID[:], blobID)
+		e.Dirty = dirty != 0
+		out = append(out, e)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) ClearWCacheDirtyFlags(tx *Tx) error {
+	_, err := tx.sqlTx.Exec("UPDATE wcache SET dirty = 0 WHERE dirty = 1")
+	return err
+}
+
+func (s *SQLiteStore) InsertOperation(tx *Tx, op Operation) (int64, error) {
+	if op.Timestamp == 0 {
+		op.Timestamp = time.Now().Unix()
+	}
+	result, err := tx.sqlTx.Exec(
+		"INSERT INTO operations (kind, timestamp, before, after, metadata) VALUES (?, ?, ?, ?, ?)",
+		op.Kind, op.Timestamp, op.Before, op.After, nullableString(op.Metadata),
+	)
+	if err != nil {
+		return 0, err
+	}
+	return result.LastInsertId()
+}
+
+func (s *SQLiteStore) ListOperations(n int) ([]Operation, error) {
+	query := "SELECT seq, kind, timestamp, before, after, metadata FROM operations ORDER BY seq DESC"
+	if n > 0 {
+		query += fmt.Sprintf(" LIMIT %d", n)
+	}
+	rows, err := s.db.Query(query)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	var out []Operation
+	for rows.Next() {
+		var op Operation
+		var meta sql.NullString
+		if err := rows.Scan(&op.Seq, &op.Kind, &op.Timestamp, &op.Before, &op.After, &meta); err != nil {
+			return nil, err
+		}
+		op.Metadata = meta.String
+		out = append(out, op)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) GetOperation(seq int64) (*Operation, error) {
+	var op Operation
+	var meta sql.NullString
+	err := s.db.QueryRow(
+		"SELECT seq, kind, timestamp, before, after, metadata FROM operations WHERE seq = ?", seq,
+	).Scan(&op.Seq, &op.Kind, &op.Timestamp, &op.Before, &op.After, &meta)
+	if errors.Is(err, sql.ErrNoRows) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	op.Metadata = meta.String
+	return &op, nil
+}
+
+func (s *SQLiteStore) GetLastOperation() (*Operation, error) {
+	var op Operation
+	var meta sql.NullString
+	err := s.db.QueryRow(
+		"SELECT seq, kind, timestamp, before, after, metadata FROM operations ORDER BY seq DESC LIMIT 1",
+	).Scan(&op.Seq, &op.Kind, &op.Timestamp, &op.Before, &op.After, &meta)
+	if errors.Is(err, sql.ErrNoRows) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	op.Metadata = meta.String
+	return &op, nil
+}
+
+func isSQLiteConstraintError(err error) bool {
+	if err == nil {
+		return false
+	}
+	return strings.Contains(err.Error(), "UNIQUE constraint failed") ||
+		strings.Contains(err.Error(), "constraint failed")
+}
+
+func nullableString(s string) interface{} {
+	if s == "" {
+		return nil
+	}
+	return s
+}
+
+func (s *SQLiteStore) AcquireLock(tx *Tx, path, owner, comment string) error {
+	var existingOwner string
+	err := tx.sqlTx.QueryRow("SELECT owner FROM file_locks WHERE path = ?", path).Scan(&existingOwner)
+	if err == nil && existingOwner != owner {
+		return fmt.Errorf("file %q is locked by %q", path, existingOwner)
+	}
+	_, err = tx.sqlTx.Exec(`
+		INSERT INTO file_locks (path, owner, acquired_at, comment) VALUES (?, ?, ?, ?)
+		ON CONFLICT(path) DO UPDATE SET acquired_at = excluded.acquired_at, comment = excluded.comment`,
+		path, owner, time.Now().Unix(), comment,
+	)
+	return err
+}
+
+func (s *SQLiteStore) ReleaseLock(tx *Tx, path, owner string) error {
+	res, err := tx.sqlTx.Exec("DELETE FROM file_locks WHERE path = ? AND owner = ?", path, owner)
+	if err != nil {
+		return err
+	}
+	n, _ := res.RowsAffected()
+	if n == 0 {
+		return fmt.Errorf("lock on %q is not held by %q", path, owner)
+	}
+	return nil
+}
+
+func (s *SQLiteStore) ReleaseLockAdmin(tx *Tx, path string) error {
+	_, err := tx.sqlTx.Exec("DELETE FROM file_locks WHERE path = ?", path)
+	return err
+}
+
+func (s *SQLiteStore) GetLock(path string) (*FileLock, error) {
+	var l FileLock
+	var comment sql.NullString
+	err := s.db.QueryRow(
+		"SELECT path, owner, acquired_at, comment FROM file_locks WHERE path = ?", path,
+	).Scan(&l.Path, &l.Owner, &l.AcquiredAt, &comment)
+	if errors.Is(err, sql.ErrNoRows) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
+	l.Comment = comment.String
+	return &l, nil
+}
+
+func (s *SQLiteStore) ListLocks() ([]FileLock, error) {
+	rows, err := s.db.Query(
+		"SELECT path, owner, acquired_at, comment FROM file_locks ORDER BY acquired_at DESC",
+	)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+	var out []FileLock
+	for rows.Next() {
+		var l FileLock
+		var comment sql.NullString
+		if err := rows.Scan(&l.Path, &l.Owner, &l.AcquiredAt, &comment); err != nil {
+			return nil, err
+		}
+		l.Comment = comment.String
+		out = append(out, l)
+	}
+	return out, rows.Err()
+}
+
+func (s *SQLiteStore) TrainAndSaveDict() error {
+	rows, err := s.db.Query(
+		"SELECT data FROM objects WHERE kind = 'blob' ORDER BY RANDOM() LIMIT 200",
+	)
+	if err != nil {
+		return fmt.Errorf("sample blobs: %w", err)
+	}
+	defer rows.Close()
+
+	var samples [][]byte
+	for rows.Next() {
+		var compressed []byte
+		if scanErr := rows.Scan(&compressed); scanErr != nil {
+			return scanErr
+		}
+		raw, decErr := s.codec.Decompress(compressed)
+		if decErr == nil && len(raw) > 0 {
+			samples = append(samples, raw)
+		}
+	}
+	if err := rows.Err(); err != nil {
+		return err
+	}
+	if len(samples) < 5 {
+		return fmt.Errorf("not enough blobs to train dictionary (need at least 5, found %d)", len(samples))
+	}
+
+	dict, err := zstd.BuildDict(zstd.BuildDictOptions{
+		Contents: samples,
+	})
+	if err != nil {
+		return fmt.Errorf("build dict: %w", err)
+	}
+
+	if _, err := s.db.Exec(
+		"INSERT INTO zstd_dicts (created_at, dict) VALUES (?, ?)",
+		time.Now().Unix(), dict,
+	); err != nil {
+		return fmt.Errorf("save dict: %w", err)
+	}
+
+	s.codec.Close()
+	newCD, err := newCodec(s.compressionName, dict)
+	if err != nil {
+		return fmt.Errorf("reload codec: %w", err)
+	}
+	s.codec = newCD
+	return nil
+}

internal/store/store.go [A]
--- /dev/null
+++ b/internal/store/store.go
@@ -1,0 +1,102 @@
+package store
+
+import (
+	"database/sql"
+
+	"arche/internal/object"
+)
+
+type Bookmark struct {
+	Name     string
+	CommitID [32]byte
+	Remote   string
+}
+
+type WCacheEntry struct {
+	Path    string
+	Inode   uint64
+	MtimeNs int64
+	Size    int64
+	BlobID  [32]byte
+	Mode    uint8 // object.EntryMode value (0=file,1=exec,2=symlink,3=dir)
+	Dirty   bool  // set by watcher; cleared by snap/status after processing
+}
+
+type Operation struct {
+	Seq       int64
+	Kind      string
+	Timestamp int64
+	Before    string
+	After     string
+	Metadata  string
+}
+
+type Tx struct {
+	sqlTx *sql.Tx
+}
+
+func (t *Tx) SQLTx() *sql.Tx { return t.sqlTx }
+
+type Store interface {
+	HasObject(id [32]byte) (bool, error)
+	ReadObject(id [32]byte) (kind string, raw []byte, err error)
+	WriteObject(tx *Tx, id [32]byte, kind string, raw []byte) error
+	ListObjectsByKind(kind string) ([][32]byte, error)
+
+	GetBookmark(name string) (*Bookmark, error)
+	SetBookmark(tx *Tx, b Bookmark) error
+	DeleteBookmark(tx *Tx, name string) error
+	ListBookmarks() ([]Bookmark, error)
+
+	GetPhase(commitID [32]byte) (object.Phase, error)
+	SetPhase(tx *Tx, commitID [32]byte, phase object.Phase) error
+
+	AllocChangeID(tx *Tx) (string, error)
+	GetChangeCommit(changeID string) ([32]byte, error)
+	SetChangeCommit(tx *Tx, changeID string, commitID [32]byte) error
+	ListChanges() ([]Bookmark, error)
+
+	GetWCacheEntry(path string) (*WCacheEntry, error)
+	SetWCacheEntry(tx *Tx, e WCacheEntry) error
+	DeleteWCacheEntry(tx *Tx, path string) error
+	ListWCacheEntries() ([]WCacheEntry, error)
+	ClearWCache(tx *Tx) error
+	MarkWCacheDirty(path string) error
+	ListDirtyWCacheEntries() ([]WCacheEntry, error)
+	ClearWCacheDirtyFlags(tx *Tx) error
+
+	InsertOperation(tx *Tx, op Operation) (int64, error)
+	ListOperations(n int) ([]Operation, error)
+	GetOperation(seq int64) (*Operation, error)
+	GetLastOperation() (*Operation, error)
+
+	AddConflict(tx *Tx, path string) error
+	ClearConflict(tx *Tx, path string) error
+	ClearAllConflicts(tx *Tx) error
+	ListConflicts() ([]string, error)
+	ListSecretCommitIDs() ([][32]byte, error)
+
+	Begin() (*Tx, error)
+	Commit(tx *Tx) error
+	Rollback(tx *Tx) error
+	Close() error
+}
+
+type FileLock struct {
+	Path       string
+	Owner      string
+	AcquiredAt int64
+	Comment    string
+}
+
+type LockStore interface {
+	AcquireLock(tx *Tx, path, owner, comment string) error
+	ReleaseLock(tx *Tx, path, owner string) error
+	ReleaseLockAdmin(tx *Tx, path string) error
+	GetLock(path string) (*FileLock, error)
+	ListLocks() ([]FileLock, error)
+}
+
+type DictTrainer interface {
+	TrainAndSaveDict() error
+}

internal/store/store_test.go [A]
--- /dev/null
+++ b/internal/store/store_test.go
@@ -1,0 +1,271 @@
+package store_test
+
+import (
+	"bytes"
+	"path/filepath"
+	"testing"
+
+	"arche/internal/object"
+	"arche/internal/store"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func openTestStore(t *testing.T) *store.SQLiteStore {
+	t.Helper()
+	dir := t.TempDir()
+	packDir := filepath.Join(dir, "packs")
+	s, err := store.OpenSQLiteStore(filepath.Join(dir, "store.db"), packDir, 0, 0, "zstd")
+	if err != nil {
+		t.Fatalf("OpenSQLiteStore: %v", err)
+	}
+	t.Cleanup(func() { s.Close() })
+	return s
+}
+
+func TestStore_WriteReadObject(t *testing.T) {
+	s := openTestStore(t)
+
+	data := []byte("hello world")
+	id := object.HashBlob(&object.Blob{Content: data})
+	var buf bytes.Buffer
+	object.EncodeBlob(&buf, &object.Blob{Content: data})
+	raw := buf.Bytes()
+
+	tx, err := s.Begin()
+	if err != nil {
+		t.Fatalf("Begin: %v", err)
+	}
+	if err := s.WriteObject(tx, id, "blob", raw); err != nil {
+		t.Fatalf("WriteObject: %v", err)
+	}
+	if err := s.Commit(tx); err != nil {
+		t.Fatalf("Commit: %v", err)
+	}
+
+	kind, got, err := s.ReadObject(id)
+	if err != nil {
+		t.Fatalf("ReadObject: %v", err)
+	}
+	if kind != "blob" {
+		t.Errorf("kind: want blob got %q", kind)
+	}
+	if string(got) != string(raw) {
+		t.Errorf("data mismatch")
+	}
+}
+
+func TestStore_HasObject(t *testing.T) {
+	s := openTestStore(t)
+
+	data := []byte("content")
+	id := object.HashBlob(&object.Blob{Content: data})
+	var rawBuf bytes.Buffer
+	object.EncodeBlob(&rawBuf, &object.Blob{Content: data})
+	raw := rawBuf.Bytes()
+
+	ok, err := s.HasObject(id)
+	if err != nil || ok {
+		t.Errorf("should not exist before write; has=%v err=%v", ok, err)
+	}
+
+	tx, _ := s.Begin()
+	s.WriteObject(tx, id, "blob", raw) //nolint:errcheck
+	s.Commit(tx)                       //nolint:errcheck
+
+	ok, err = s.HasObject(id)
+	if err != nil || !ok {
+		t.Errorf("should exist after write; has=%v err=%v", ok, err)
+	}
+}
+
+func TestStore_WriteObject_Idempotent(t *testing.T) {
+	s := openTestStore(t)
+	var rawBuf2 bytes.Buffer
+	object.EncodeBlob(&rawBuf2, &object.Blob{Content: []byte("dup")})
+	raw := rawBuf2.Bytes()
+	id := object.HashBlob(&object.Blob{Content: []byte("dup")})
+
+	for range 3 {
+		tx, _ := s.Begin()
+		if err := s.WriteObject(tx, id, "blob", raw); err != nil {
+			t.Fatalf("WriteObject iteration: %v", err)
+		}
+		s.Commit(tx) //nolint:errcheck
+	}
+
+	_, got, err := s.ReadObject(id)
+	if err != nil {
+		t.Fatalf("ReadObject: %v", err)
+	}
+	if !bytes.Equal(got, raw) {
+		t.Error("data corrupted after duplicate writes")
+	}
+}
+
+func TestStore_Bookmarks(t *testing.T) {
+	s := openTestStore(t)
+
+	var bmBuf bytes.Buffer
+	object.EncodeBlob(&bmBuf, &object.Blob{Content: []byte("bookmark-obj")})
+	id := object.HashBlob(&object.Blob{Content: []byte("bookmark-obj")})
+	txObj, _ := s.Begin()
+	s.WriteObject(txObj, id, "blob", bmBuf.Bytes()) //nolint:errcheck
+	s.Commit(txObj)                                 //nolint:errcheck
+
+	tx, _ := s.Begin()
+	err := s.SetBookmark(tx, store.Bookmark{Name: "main", CommitID: id})
+	if err != nil {
+		t.Fatalf("SetBookmark: %v", err)
+	}
+	s.Commit(tx) //nolint:errcheck
+
+	got, err := s.GetBookmark("main")
+	if err != nil || got == nil {
+		t.Fatalf("GetBookmark: %v", err)
+	}
+	if got.CommitID != id {
+		t.Errorf("commit ID mismatch")
+	}
+
+	bms, err := s.ListBookmarks()
+	if err != nil || len(bms) != 1 {
+		t.Errorf("ListBookmarks: want 1, got %d err=%v", len(bms), err)
+	}
+
+	tx2, _ := s.Begin()
+	s.DeleteBookmark(tx2, "main") //nolint:errcheck
+	s.Commit(tx2)                 //nolint:errcheck
+
+	bms, _ = s.ListBookmarks()
+	if len(bms) != 0 {
+		t.Errorf("after delete want 0 bookmarks, got %d", len(bms))
+	}
+}
+
+func TestStore_Phase(t *testing.T) {
+	s := openTestStore(t)
+
+	var id [32]byte
+	id[0] = 0x01
+
+	var phaseBuf bytes.Buffer
+	object.EncodeBlob(&phaseBuf, &object.Blob{Content: []byte("x")})
+	tx, _ := s.Begin()
+	s.WriteObject(tx, id, "blob", phaseBuf.Bytes()) //nolint:errcheck
+	s.Commit(tx)                                    //nolint:errcheck
+
+	phase, _ := s.GetPhase(id)
+	_ = phase
+
+	tx2, _ := s.Begin()
+	if err := s.SetPhase(tx2, id, object.PhasePublic); err != nil {
+		t.Fatalf("SetPhase: %v", err)
+	}
+	s.Commit(tx2) //nolint:errcheck
+
+	phase, err := s.GetPhase(id)
+	if err != nil {
+		t.Fatalf("GetPhase: %v", err)
+	}
+	if phase != object.PhasePublic {
+		t.Errorf("phase: want Public got %v", phase)
+	}
+}
+
+func TestStore_OperationLog(t *testing.T) {
+	s := openTestStore(t)
+
+	tx, _ := s.Begin()
+	_, err := s.InsertOperation(tx, store.Operation{
+		Kind:      "snap",
+		Timestamp: 1000,
+		Before:    `{"head":"old"}`,
+		After:     `{"head":"new"}`,
+	})
+	if err != nil {
+		t.Fatalf("InsertOperation: %v", err)
+	}
+	s.Commit(tx) //nolint:errcheck
+
+	ops, err := s.ListOperations(10)
+	if err != nil || len(ops) == 0 {
+		t.Errorf("ListOperations: want >=1, got %d err=%v", len(ops), err)
+	}
+
+	last, err := s.GetLastOperation()
+	if err != nil || last == nil {
+		t.Fatalf("GetLastOperation: %v", err)
+	}
+	if last.Kind != "snap" {
+		t.Errorf("last op kind: want snap got %q", last.Kind)
+	}
+}
+
+func TestStore_WCache(t *testing.T) {
+	s := openTestStore(t)
+
+	e := store.WCacheEntry{
+		Path:    "src/main.go",
+		Inode:   42,
+		MtimeNs: 1234567890,
+		Size:    512,
+	}
+
+	tx, _ := s.Begin()
+	if err := s.SetWCacheEntry(tx, e); err != nil {
+		t.Fatalf("SetWCacheEntry: %v", err)
+	}
+	s.Commit(tx) //nolint:errcheck
+
+	got, err := s.GetWCacheEntry("src/main.go")
+	if err != nil || got == nil {
+		t.Fatalf("GetWCacheEntry: %v", err)
+	}
+	if got.Inode != 42 || got.Size != 512 || got.MtimeNs != 1234567890 {
+		t.Errorf("entry mismatch: %+v", got)
+	}
+
+	entries, err := s.ListWCacheEntries()
+	if err != nil || len(entries) != 1 {
+		t.Errorf("ListWCacheEntries: want 1, got %d err=%v", len(entries), err)
+	}
+
+	tx2, _ := s.Begin()
+	s.DeleteWCacheEntry(tx2, "src/main.go") //nolint:errcheck
+	s.Commit(tx2)                           //nolint:errcheck
+
+	entries, _ = s.ListWCacheEntries()
+	if len(entries) != 0 {
+		t.Errorf("after delete want 0 entries, got %d", len(entries))
+	}
+}
+
+func TestStore_ChangeCommit(t *testing.T) {
+	s := openTestStore(t)
+
+	var ccBuf bytes.Buffer
+	object.EncodeBlob(&ccBuf, &object.Blob{Content: []byte("change-obj")})
+	commitID := object.HashBlob(&object.Blob{Content: []byte("change-obj")})
+	txObj, _ := s.Begin()
+	s.WriteObject(txObj, commitID, "blob", ccBuf.Bytes()) //nolint:errcheck
+	s.Commit(txObj)                                       //nolint:errcheck
+
+	tx, _ := s.Begin()
+	changeID, err := s.AllocChangeID(tx)
+	if err != nil {
+		t.Fatalf("AllocChangeID: %v", err)
+	}
+	if err := s.SetChangeCommit(tx, changeID, commitID); err != nil {
+		t.Fatalf("SetChangeCommit: %v", err)
+	}
+	s.Commit(tx) //nolint:errcheck
+
+	got, err := s.GetChangeCommit(changeID)
+	if err != nil {
+		t.Fatalf("GetChangeCommit: %v", err)
+	}
+	if got != commitID {
+		t.Errorf("commit ID mismatch")
+	}
+}

internal/syncpkg/bloom.go [A]
--- /dev/null
+++ b/internal/syncpkg/bloom.go
@@ -1,0 +1,91 @@
+package syncpkg
+
+import (
+	"encoding/binary"
+	"fmt"
+	"math"
+)
+
+type BloomFilter struct {
+	k     int
+	m     uint64
+	n     uint64
+	words []uint64
+}
+
+func NewBloom(n int) *BloomFilter {
+	if n <= 0 {
+		n = 1
+	}
+	m := uint64(math.Ceil(-float64(n) * math.Log(0.01) / (math.Log(2) * math.Log(2))))
+	if m == 0 {
+		m = 64
+	}
+	m = ((m + 63) / 64) * 64
+	return &BloomFilter{
+		k:     7,
+		m:     m,
+		words: make([]uint64, m/64),
+	}
+}
+
+func (f *BloomFilter) Add(id [32]byte) {
+	h1, h2 := splitID(id)
+	for i := uint64(0); i < uint64(f.k); i++ {
+		bit := (h1 + i*h2) % f.m
+		f.words[bit/64] |= 1 << (bit % 64)
+	}
+	f.n++
+}
+
+func (f *BloomFilter) Test(id [32]byte) bool {
+	h1, h2 := splitID(id)
+	for i := uint64(0); i < uint64(f.k); i++ {
+		bit := (h1 + i*h2) % f.m
+		if f.words[bit/64]&(1<<(bit%64)) == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+func (f *BloomFilter) Len() int { return int(f.n) }
+
+func (f *BloomFilter) Bytes() []byte {
+	buf := make([]byte, 4+8+8+len(f.words)*8)
+	binary.BigEndian.PutUint32(buf[0:4], uint32(f.k))
+	binary.BigEndian.PutUint64(buf[4:12], f.m)
+	binary.BigEndian.PutUint64(buf[12:20], f.n)
+	for i, w := range f.words {
+		binary.BigEndian.PutUint64(buf[20+i*8:], w)
+	}
+	return buf
+}
+
+func BloomFilterFrom(data []byte) (*BloomFilter, error) {
+	if len(data) < 20 {
+		return nil, fmt.Errorf("bloom: data too short (%d bytes)", len(data))
+	}
+	k := int(binary.BigEndian.Uint32(data[0:4]))
+	m := binary.BigEndian.Uint64(data[4:12])
+	n := binary.BigEndian.Uint64(data[12:20])
+	nWords := m / 64
+	expected := int(20 + nWords*8)
+	if len(data) < expected {
+		return nil, fmt.Errorf("bloom: expected %d bytes, got %d", expected, len(data))
+	}
+	words := make([]uint64, nWords)
+	for i := range words {
+		words[i] = binary.BigEndian.Uint64(data[20+i*8:])
+	}
+	return &BloomFilter{k: k, m: m, n: n, words: words}, nil
+}
+
+func splitID(id [32]byte) (h1, h2 uint64) {
+	h1 = binary.BigEndian.Uint64(id[0:8])
+	h2 = binary.BigEndian.Uint64(id[8:16])
+	if h2 == 0 {
+		h2 = 1
+	}
+	return
+}

internal/syncpkg/client.go [A]
--- /dev/null
+++ b/internal/syncpkg/client.go
@@ -1,0 +1,685 @@
+package syncpkg
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+	"time"
+
+	"arche/internal/issuedb"
+	"arche/internal/issues"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+)
+
+type Client struct {
+	url    string
+	token  string
+	repo   *repo.Repo
+	client *http.Client
+}
+
+func NewClient(r *repo.Repo, rawURL, token string) *Client {
+	httpURL, httpClient := resolveClientTransport(rawURL, token)
+	return &Client{
+		url:    strings.TrimRight(httpURL, "/"),
+		token:  token,
+		repo:   r,
+		client: httpClient,
+	}
+}
+
+func resolveClientTransport(rawURL, token string) (string, *http.Client) {
+	u, err := url.Parse(rawURL)
+	if err != nil {
+		return rawURL, &http.Client{}
+	}
+
+	switch u.Scheme {
+	case "arche+ssh":
+		host := u.Hostname()
+		port := u.Port()
+		if port == "" {
+			port = "22"
+		}
+		sshHost := host + ":" + port
+		repoName := strings.TrimPrefix(u.Path, "/")
+		sshClient, err := NewSSHClient(sshHost, repoName, "")
+		if err != nil {
+			return rawURL, &http.Client{}
+		}
+		_ = token
+		return "http://localhost", sshClient
+
+	case "arche+mtls":
+		host := u.Hostname()
+		port := u.Port()
+		if port == "" {
+			port = "8443"
+		}
+		certFile := u.Query().Get("cert")
+		keyFile := u.Query().Get("key")
+		mtlsClient, err := NewMTLSClient(certFile, keyFile)
+		if err != nil {
+			return rawURL, &http.Client{}
+		}
+		_ = token
+		baseURL := "https://" + host + ":" + port
+		return baseURL, mtlsClient
+
+	default:
+		return rawURL, &http.Client{}
+	}
+}
+
+func (c *Client) Pull() error {
+	info, err := c.getInfo()
+	if err != nil {
+		return fmt.Errorf("pull: info: %w", err)
+	}
+
+	allLocalIDs, err := listAllObjectIDs(c.repo)
+	if err != nil {
+		return fmt.Errorf("pull: enumerate local: %w", err)
+	}
+	filter := NewBloom(max(len(allLocalIDs), 1))
+	for _, id := range allLocalIDs {
+		filter.Add(id)
+	}
+
+	pack, err := c.fetchBloom(filter.Bytes())
+	if err != nil {
+		return fmt.Errorf("pull: bloom/fetch: %w", err)
+	}
+
+	if err := storePackEntries(c.repo, pack); err != nil {
+		return fmt.Errorf("pull: store pack: %w", err)
+	}
+
+	if err := ensureChangeInStore(c.repo, pack); err != nil {
+		return fmt.Errorf("pull: register change IDs: %w", err)
+	}
+
+	for round := 0; round < 3; round++ {
+		missing, err := c.checkMissingObjects(info.Bookmarks)
+		if err != nil || len(missing) == 0 {
+			break
+		}
+		pack2, err := c.fetchExact(missing)
+		if err != nil {
+			return fmt.Errorf("pull: convergence round %d: %w", round+1, err)
+		}
+		if err := storePackEntries(c.repo, pack2); err != nil {
+			return fmt.Errorf("pull: store convergence pack: %w", err)
+		}
+		if err := ensureChangeInStore(c.repo, pack2); err != nil {
+			return fmt.Errorf("pull: register change IDs (round %d): %w", round+1, err)
+		}
+	}
+
+	for name, hexID := range info.Bookmarks {
+		idBytes, err := hex.DecodeString(hexID)
+		if err != nil || len(idBytes) != 32 {
+			continue
+		}
+		var id [32]byte
+		copy(id[:], idBytes)
+
+		tx, err := c.repo.Store.Begin()
+		if err != nil {
+			return fmt.Errorf("pull: begin tx: %w", err)
+		}
+		if err := c.repo.Store.SetBookmark(tx, store.Bookmark{Name: name, CommitID: id, Remote: c.url}); err != nil {
+			c.repo.Store.Rollback(tx)
+			return fmt.Errorf("pull: set bookmark %q: %w", name, err)
+		}
+		if err := c.repo.Store.Commit(tx); err != nil {
+			return fmt.Errorf("pull: commit bookmark: %w", err)
+		}
+	}
+
+	if err := c.pullIssues(); err != nil {
+		return fmt.Errorf("pull: issues: %w", err)
+	}
+
+	return nil
+}
+
+type PushOptions struct {
+	Force       bool
+	ForcePublic bool
+}
+
+func (c *Client) Push() error { return c.PushWith(PushOptions{}) }
+
+func (c *Client) PushWith(opts PushOptions) error {
+	info, err := c.getInfo()
+	if err != nil {
+		return fmt.Errorf("push: info: %w", err)
+	}
+
+	allLocalIDs, err := listPushableObjectIDs(c.repo)
+	if err != nil {
+		return fmt.Errorf("push: enumerate local: %w", err)
+	}
+
+	var toSend [][32]byte
+	if info.CommitCount == 0 {
+		toSend = allLocalIDs
+	} else {
+		toSend, err = c.objectsToSend(info)
+		if err != nil {
+			return fmt.Errorf("push: diff objects: %w", err)
+		}
+	}
+
+	if len(toSend) > 0 {
+		if err := c.pushObjects(toSend); err != nil {
+			return fmt.Errorf("push: upload pack: %w", err)
+		}
+	}
+
+	before, _ := c.repo.CaptureRefState()
+
+	localBMs, err := c.repo.Store.ListBookmarks()
+	if err != nil {
+		return fmt.Errorf("push: list bookmarks: %w", err)
+	}
+
+	var pushedNames []string
+	var allToPromote [][32]byte
+	for _, bm := range localBMs {
+		if phase, _ := c.repo.Store.GetPhase(bm.CommitID); phase == object.PhaseSecret {
+			continue
+		}
+
+		remoteBM := info.Bookmarks[bm.Name]
+		newHex := hex.EncodeToString(bm.CommitID[:])
+		resp, err := c.updateRemoteBookmark(bm.Name, remoteBM, newHex, opts)
+		if err != nil {
+			return fmt.Errorf("push: update bookmark %q: %w", bm.Name, err)
+		}
+		if !resp.OK {
+			return fmt.Errorf("push rejected for bookmark %q: %s", bm.Name, resp.Err)
+		}
+
+		promotable, err := collectPromotable(c.repo, bm.CommitID)
+		if err != nil {
+			return fmt.Errorf("push: collect promotable for %q: %w", bm.Name, err)
+		}
+		allToPromote = append(allToPromote, promotable...)
+		pushedNames = append(pushedNames, bm.Name)
+	}
+
+	if len(allToPromote) > 0 || len(pushedNames) > 0 {
+		seen := make(map[[32]byte]bool)
+		deduped := allToPromote[:0]
+		for _, id := range allToPromote {
+			if !seen[id] {
+				seen[id] = true
+				deduped = append(deduped, id)
+			}
+		}
+		meta := fmt.Sprintf("pushed [%s] to %s", strings.Join(pushedNames, ", "), c.url)
+		tx, err := c.repo.Store.Begin()
+		if err != nil {
+			return fmt.Errorf("push: begin tx: %w", err)
+		}
+		for _, id := range deduped {
+			if err := c.repo.Store.SetPhase(tx, id, object.PhasePublic); err != nil {
+				c.repo.Store.Rollback(tx)
+				return fmt.Errorf("push: promote phase: %w", err)
+			}
+		}
+		op := store.Operation{
+			Kind: "push", Timestamp: time.Now().Unix(),
+			Before: before, After: before,
+			Metadata: meta,
+		}
+		if _, err := c.repo.Store.InsertOperation(tx, op); err != nil {
+			c.repo.Store.Rollback(tx)
+			return fmt.Errorf("push: insert oplog: %w", err)
+		}
+		if err := c.repo.Store.Commit(tx); err != nil {
+			return fmt.Errorf("push: commit tx: %w", err)
+		}
+	}
+
+	if err := c.pushIssues(); err != nil {
+		return fmt.Errorf("push: issues: %w", err)
+	}
+	return nil
+}
+
+func (c *Client) getInfo() (*InfoResponse, error) {
+	req, _ := http.NewRequest(http.MethodGet, c.url+"/arche/v1/info", nil)
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("info: HTTP %d", resp.StatusCode)
+	}
+	var info InfoResponse
+	if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
+		return nil, fmt.Errorf("info decode: %w", err)
+	}
+	return &info, nil
+}
+
+func (c *Client) fetchBloom(filterBytes []byte) ([]PackEntry, error) {
+	req, _ := http.NewRequest(http.MethodPost, c.url+"/arche/v1/bloom",
+		bytes.NewReader(filterBytes))
+	req.Header.Set("Content-Type", "application/octet-stream")
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		body, _ := io.ReadAll(resp.Body)
+		return nil, fmt.Errorf("bloom: HTTP %d: %s", resp.StatusCode, body)
+	}
+	return ReadPack(resp.Body)
+}
+
+func (c *Client) pushObjects(ids [][32]byte) error {
+	pr, pw := io.Pipe()
+	errCh := make(chan error, 1)
+	go func() {
+		entries := make([]PackEntry, 0, len(ids))
+		for _, id := range ids {
+			kind, data, err := c.repo.Store.ReadObject(id)
+			if err != nil {
+				continue
+			}
+			entries = append(entries, PackEntry{ID: id, Kind: kind, Data: data})
+		}
+		errCh <- WritePack(pw, entries)
+		pw.Close()
+	}()
+
+	req, _ := http.NewRequest(http.MethodPost, c.url+"/arche/v1/push", pr)
+	req.Header.Set("Content-Type", "application/octet-stream")
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if packErr := <-errCh; packErr != nil && err == nil {
+		err = packErr
+	}
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		body, _ := io.ReadAll(resp.Body)
+		return fmt.Errorf("push objects: HTTP %d: %s", resp.StatusCode, body)
+	}
+	return nil
+}
+
+func (c *Client) updateRemoteBookmark(name, expectedHex, newHex string, opts PushOptions) (*RefUpdateResponse, error) {
+	body, _ := json.Marshal(RefUpdateRequest{
+		Name:        name,
+		ExpectedID:  expectedHex,
+		NewID:       newHex,
+		Force:       opts.Force,
+		ForcePublic: opts.ForcePublic,
+	})
+
+	req, _ := http.NewRequest(http.MethodPost, c.url+"/arche/v1/update-bookmark",
+		bytes.NewReader(body))
+	req.Header.Set("Content-Type", "application/json")
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	defer resp.Body.Close()
+	var result RefUpdateResponse
+	if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+		return nil, fmt.Errorf("decode update-bookmark response: %w", err)
+	}
+
+	return &result, nil
+}
+
+func (c *Client) setAuth(req *http.Request) {
+	if c.token != "" {
+		req.Header.Set("Authorization", "Bearer "+c.token)
+	}
+}
+
+func (c *Client) objectsToSend(info *InfoResponse) ([][32]byte, error) {
+	allLocal, err := listPushableObjectIDs(c.repo)
+	if err != nil {
+		return nil, err
+	}
+
+	remoteKnown := make(map[[32]byte]bool)
+	for _, hexID := range info.Bookmarks {
+		b, err := hex.DecodeString(hexID)
+		if err != nil || len(b) != 32 {
+			continue
+		}
+		var id [32]byte
+		copy(id[:], b)
+		c.walkAncestors(id, remoteKnown)
+	}
+
+	var toSend [][32]byte
+	for _, id := range allLocal {
+		if !remoteKnown[id] {
+			toSend = append(toSend, id)
+		}
+	}
+	return toSend, nil
+}
+
+func (c *Client) walkAncestors(id [32]byte, known map[[32]byte]bool) {
+	if known[id] {
+		return
+	}
+	known[id] = true
+
+	_, raw, err := c.repo.Store.ReadObject(id)
+	if err != nil {
+		return
+	}
+	if bytes.HasPrefix(raw, []byte("arche-commit\x00")) {
+		commit, err := object.DecodeCommit(raw)
+		if err != nil {
+			return
+		}
+		c.walkAncestors(commit.TreeID, known)
+		for _, p := range commit.Parents {
+			c.walkAncestors(p, known)
+		}
+	} else if bytes.HasPrefix(raw, []byte("arche-tree\x00")) {
+		t, err := object.DecodeTree(raw)
+		if err != nil {
+			return
+		}
+		for _, e := range t.Entries {
+			c.walkAncestors(e.ObjectID, known)
+		}
+	}
+}
+
+func ensureChangeInStore(r *repo.Repo, entries []PackEntry) error {
+	if len(entries) == 0 {
+		return nil
+	}
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+
+	for _, e := range entries {
+		if !strings.HasPrefix(e.Kind, "commit") {
+			continue
+		}
+		c, err := object.DecodeCommit(e.Data)
+		if err != nil {
+			continue
+		}
+		if c.ChangeID == "" {
+			continue
+		}
+		r.Store.SetChangeCommit(tx, c.ChangeID, e.ID)
+	}
+	return r.Store.Commit(tx)
+}
+
+func (c *Client) pullIssues() error {
+	req, err := http.NewRequest(http.MethodGet, c.url+"/arche/v1/issues", nil)
+	if err != nil {
+		return err
+	}
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return fmt.Errorf("server returned %d", resp.StatusCode)
+	}
+	var evs []issues.IssueEvent
+	if err := json.NewDecoder(resp.Body).Decode(&evs); err != nil {
+		return fmt.Errorf("decode: %w", err)
+	}
+	if len(evs) == 0 {
+		return nil
+	}
+	idb, err := issuedb.Open(c.repo.ArcheDir())
+	if err != nil {
+		return fmt.Errorf("open issuedb: %w", err)
+	}
+	defer idb.Close()
+	return idb.Issues.MergeEvents(evs)
+}
+
+func (c *Client) pushIssues() error {
+	idb, err := issuedb.Open(c.repo.ArcheDir())
+	if err != nil {
+		return fmt.Errorf("open issuedb: %w", err)
+	}
+	defer idb.Close()
+
+	all, err := idb.Issues.AllEvents()
+	if err != nil {
+		return fmt.Errorf("list events: %w", err)
+	}
+	if len(all) == 0 {
+		return nil
+	}
+
+	secretIDs, err := c.repo.Store.ListSecretCommitIDs()
+	if err != nil {
+		return fmt.Errorf("list secret commits: %w", err)
+	}
+	secretHexSet := make(map[string]bool, len(secretIDs))
+	for _, id := range secretIDs {
+		secretHexSet[fmt.Sprintf("%x", id)] = true
+	}
+
+	filtered := all[:0]
+	for _, ev := range all {
+		if ev.Kind == "ref" {
+			var ref string
+			json.Unmarshal(ev.Payload, &ref) //nolint:errcheck
+			if c.isSecretCommitRef(ref) {
+				continue
+			}
+		}
+		if ev.Kind == "comment" && len(secretHexSet) > 0 {
+			ev.Payload = redactSecretHashesInPayload(ev.Payload, secretHexSet)
+		}
+		filtered = append(filtered, ev)
+	}
+
+	body, err := json.Marshal(filtered)
+	if err != nil {
+		return err
+	}
+	req, err := http.NewRequest(http.MethodPost, c.url+"/arche/v1/issues", bytes.NewReader(body))
+	if err != nil {
+		return err
+	}
+	req.Header.Set("Content-Type", "application/json")
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return fmt.Errorf("server returned %d", resp.StatusCode)
+	}
+	return nil
+}
+
+func (c *Client) isSecretCommitRef(ref string) bool {
+	if len(ref) < 8 {
+		return false
+	}
+	b, err := hex.DecodeString(ref)
+	if err != nil || len(b) != 32 {
+		return false
+	}
+	var id [32]byte
+	copy(id[:], b)
+	phase, err := c.repo.Store.GetPhase(id)
+	if err != nil {
+		return false
+	}
+	return phase == object.PhaseSecret
+}
+
+var hexHashRe = regexp.MustCompile(`\b[0-9a-fA-F]{8,64}\b`)
+
+func redactSecretHashesInPayload(payload []byte, secretHexSet map[string]bool) []byte {
+	var text string
+	if err := json.Unmarshal(payload, &text); err != nil {
+		return payload
+	}
+	redacted := hexHashRe.ReplaceAllStringFunc(text, func(match string) string {
+		lower := strings.ToLower(match)
+		for h := range secretHexSet {
+			if strings.HasPrefix(h, lower) {
+				return "[redacted]"
+			}
+		}
+		return match
+	})
+	if redacted == text {
+		return payload
+	}
+	out, err := json.Marshal(redacted)
+	if err != nil {
+		return payload
+	}
+	return out
+}
+
+func (c *Client) checkMissingObjects(serverBookmarks map[string]string) ([][32]byte, error) {
+	seen := make(map[[32]byte]bool)
+	var missing [][32]byte
+
+	var walk func(id [32]byte)
+	walk = func(id [32]byte) {
+		if seen[id] {
+			return
+		}
+		seen[id] = true
+		kind, data, err := c.repo.Store.ReadObject(id)
+		if err != nil {
+			missing = append(missing, id)
+			return
+		}
+		if strings.HasPrefix(kind, "commit") {
+			commit, err := object.DecodeCommit(data)
+			if err != nil {
+				return
+			}
+			walk(commit.TreeID)
+			for _, p := range commit.Parents {
+				walk(p)
+			}
+		} else if kind == "tree" {
+			tree, err := object.DecodeTree(data)
+			if err != nil {
+				return
+			}
+			for _, e := range tree.Entries {
+				walk(e.ObjectID)
+			}
+		}
+	}
+
+	for _, hexID := range serverBookmarks {
+		b, err := hex.DecodeString(hexID)
+		if err != nil || len(b) != 32 {
+			continue
+		}
+		var id [32]byte
+		copy(id[:], b)
+		walk(id)
+	}
+	return missing, nil
+}
+
+func (c *Client) fetchExact(ids [][32]byte) ([]PackEntry, error) {
+	hexIDs := make([]string, len(ids))
+	for i, id := range ids {
+		hexIDs[i] = hex.EncodeToString(id[:])
+	}
+	body, err := json.Marshal(hexIDs)
+	if err != nil {
+		return nil, err
+	}
+	req, err := http.NewRequest(http.MethodPost, c.url+"/arche/v1/fetch", bytes.NewReader(body))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "application/json")
+	c.setAuth(req)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		b, _ := io.ReadAll(resp.Body)
+		return nil, fmt.Errorf("fetch: HTTP %d: %s", resp.StatusCode, b)
+	}
+	return ReadPack(resp.Body)
+}
+
+func collectPromotable(r *repo.Repo, tipID [32]byte) ([][32]byte, error) {
+	seen := make(map[[32]byte]bool)
+	queue := [][32]byte{tipID}
+	var toPromote [][32]byte
+
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		if seen[id] {
+			continue
+		}
+		seen[id] = true
+
+		phase, _ := r.Store.GetPhase(id)
+		switch phase {
+		case object.PhasePublic, object.PhaseSecret:
+			continue
+		}
+		toPromote = append(toPromote, id)
+
+		_, raw, err := r.Store.ReadObject(id)
+		if err != nil || !bytes.HasPrefix(raw, []byte("arche-commit\x00")) {
+			continue
+		}
+		cm, err := object.DecodeCommit(raw)
+		if err != nil {
+			continue
+		}
+		for _, p := range cm.Parents {
+			if !seen[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+	return toPromote, nil
+}

internal/syncpkg/mtls_transport.go [A]
--- /dev/null
+++ b/internal/syncpkg/mtls_transport.go
@@ -1,0 +1,50 @@
+package syncpkg
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/http"
+	"os"
+	"path/filepath"
+)
+
+func NewMTLSClient(certFile, keyFile string) (*http.Client, error) {
+	if certFile == "" || keyFile == "" {
+		d, err := mtlsDefaultDir()
+		if err != nil {
+			return nil, err
+		}
+		if certFile == "" {
+			certFile = filepath.Join(d, "client.crt")
+		}
+		if keyFile == "" {
+			keyFile = filepath.Join(d, "client.key")
+		}
+	}
+
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, fmt.Errorf("arche+mtls: load client cert %s / %s: %w", certFile, keyFile, err)
+	}
+
+	tlsCfg := &tls.Config{
+		Certificates:       []tls.Certificate{cert},
+		InsecureSkipVerify: true, //nolint:gosec
+		MinVersion:         tls.VersionTLS12,
+	}
+
+	transport := &http.Transport{TLSClientConfig: tlsCfg}
+	return &http.Client{Transport: transport}, nil
+}
+
+func mtlsDefaultDir() (string, error) {
+	home, err := os.UserHomeDir()
+	if err != nil {
+		return "", fmt.Errorf("arche+mtls: resolve home dir: %w", err)
+	}
+	d := filepath.Join(home, ".config", "arche", "mtls")
+	if err := os.MkdirAll(d, 0o700); err != nil {
+		return "", fmt.Errorf("arche+mtls: create cert dir %s: %w", d, err)
+	}
+	return d, nil
+}

internal/syncpkg/pack.go [A]
--- /dev/null
+++ b/internal/syncpkg/pack.go
@@ -1,0 +1,115 @@
+package syncpkg
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+const (
+	packEntryEnd    byte = 0
+	packEntryObject byte = 1
+)
+
+type PackEntry struct {
+	ID   [32]byte
+	Kind string
+	Data []byte
+}
+
+func WritePack(w io.Writer, entries []PackEntry) error {
+	for _, e := range entries {
+		if err := writePackEntry(w, e); err != nil {
+			return err
+		}
+	}
+	return writePackEnd(w)
+}
+
+func writePackEntry(w io.Writer, e PackEntry) error {
+	if _, err := w.Write([]byte{packEntryObject}); err != nil {
+		return err
+	}
+	if _, err := w.Write(e.ID[:]); err != nil {
+		return err
+	}
+	kindBytes := []byte(e.Kind)
+	if len(kindBytes) > 255 {
+		return fmt.Errorf("kind string too long: %d", len(kindBytes))
+	}
+	if _, err := w.Write([]byte{byte(len(kindBytes))}); err != nil {
+		return err
+	}
+	if _, err := w.Write(kindBytes); err != nil {
+		return err
+	}
+	var lenBuf [4]byte
+	binary.BigEndian.PutUint32(lenBuf[:], uint32(len(e.Data)))
+	if _, err := w.Write(lenBuf[:]); err != nil {
+		return err
+	}
+	_, err := w.Write(e.Data)
+	return err
+}
+
+func writePackEnd(w io.Writer) error {
+	_, err := w.Write([]byte{packEntryEnd})
+	return err
+}
+
+func ReadPack(r io.Reader) ([]PackEntry, error) {
+	var entries []PackEntry
+	for {
+		var typeBuf [1]byte
+		if _, err := io.ReadFull(r, typeBuf[:]); err != nil {
+			if err == io.EOF {
+				return nil, fmt.Errorf("pack stream ended without end-of-pack marker")
+			}
+			return nil, fmt.Errorf("read pack entry type: %w", err)
+		}
+		switch typeBuf[0] {
+		case packEntryEnd:
+			return entries, nil
+		case packEntryObject:
+			e, err := readPackEntry(r)
+			if err != nil {
+				return nil, err
+			}
+			entries = append(entries, e)
+		default:
+			return nil, fmt.Errorf("unknown pack entry type %d", typeBuf[0])
+		}
+	}
+}
+
+func readPackEntry(r io.Reader) (PackEntry, error) {
+	var e PackEntry
+	if _, err := io.ReadFull(r, e.ID[:]); err != nil {
+		return e, fmt.Errorf("read object ID: %w", err)
+	}
+
+	var kindLenBuf [1]byte
+	if _, err := io.ReadFull(r, kindLenBuf[:]); err != nil {
+		return e, fmt.Errorf("read kind length: %w", err)
+	}
+	kindBytes := make([]byte, kindLenBuf[0])
+	if len(kindBytes) > 0 {
+		if _, err := io.ReadFull(r, kindBytes); err != nil {
+			return e, fmt.Errorf("read kind: %w", err)
+		}
+	}
+	e.Kind = string(kindBytes)
+
+	var lenBuf [4]byte
+	if _, err := io.ReadFull(r, lenBuf[:]); err != nil {
+		return e, fmt.Errorf("read data length: %w", err)
+	}
+	dataLen := binary.BigEndian.Uint32(lenBuf[:])
+	if dataLen > 0 {
+		e.Data = make([]byte, dataLen)
+		if _, err := io.ReadFull(r, e.Data); err != nil {
+			return e, fmt.Errorf("read data: %w", err)
+		}
+	}
+	return e, nil
+}

internal/syncpkg/protocol.go [A]
--- /dev/null
+++ b/internal/syncpkg/protocol.go
@@ -1,0 +1,21 @@
+package syncpkg
+
+type InfoResponse struct {
+	HeadChangeID string            `json:"head_change_id"`
+	CommitCount  int64             `json:"commit_count"`
+	Bookmarks    map[string]string `json:"bookmarks"`
+}
+
+type RefUpdateRequest struct {
+	Name        string `json:"name"`
+	ExpectedID  string `json:"expected_id"`
+	NewID       string `json:"new_id"`
+	Force       bool   `json:"force,omitempty"`
+	ForcePublic bool   `json:"force_public,omitempty"`
+}
+
+type RefUpdateResponse struct {
+	OK      bool   `json:"ok"`
+	Current string `json:"current,omitempty"`
+	Err     string `json:"error,omitempty"`
+}

internal/syncpkg/server.go [A]
--- /dev/null
+++ b/internal/syncpkg/server.go
@@ -1,0 +1,597 @@
+package syncpkg
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"log/slog"
+	"net/http"
+	"strings"
+
+	"arche/internal/issuedb"
+	"arche/internal/issues"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+)
+
+type Server struct {
+	repo     *repo.Repo
+	token    string
+	canWrite bool
+
+	OnBookmarkUpdated func(name, oldHex, newHex string)
+
+	PreUpdateHook func(name, oldHex, newHex string) error
+}
+
+func NewServer(r *repo.Repo, token string) *Server {
+	return &Server{repo: r, token: token, canWrite: true}
+}
+
+func NewServerAuth(r *repo.Repo, canWrite bool) *Server {
+	return &Server{repo: r, canWrite: canWrite}
+}
+
+func (s *Server) Handler() http.Handler {
+	mux := http.NewServeMux()
+	mux.HandleFunc("/arche/v1/info", s.handleInfo)
+	mux.HandleFunc("/arche/v1/bloom", s.handleBloom)
+	mux.HandleFunc("/arche/v1/fetch", s.handleFetch)
+	mux.HandleFunc("/arche/v1/push", s.handlePush)
+	mux.HandleFunc("/arche/v1/update-bookmark", s.handleUpdateBookmark)
+	mux.HandleFunc("/arche/v1/issues", s.handleIssues)
+	return s.authMiddleware(mux)
+}
+
+func (s *Server) authMiddleware(next http.Handler) http.Handler {
+	if s.token == "" {
+		return next
+	}
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		auth := r.Header.Get("Authorization")
+		expected := "Bearer " + s.token
+		if auth != expected {
+			http.Error(w, "Unauthorized", http.StatusUnauthorized)
+			return
+		}
+		next.ServeHTTP(w, r)
+	})
+}
+
+func (s *Server) handleInfo(w http.ResponseWriter, r *http.Request) {
+	if r.Method != http.MethodGet {
+		http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	headCID, _ := s.repo.Head()
+	bare := object.StripChangeIDPrefix(headCID)
+	commitID, _ := s.repo.Store.GetChangeCommit(bare)
+
+	count, _ := countCommits(s.repo)
+
+	bms, _ := s.repo.Store.ListBookmarks()
+	bmMap := make(map[string]string, len(bms))
+	for _, bm := range bms {
+		bmMap[bm.Name] = hex.EncodeToString(bm.CommitID[:])
+	}
+	_ = commitID
+
+	resp := InfoResponse{
+		HeadChangeID: headCID,
+		CommitCount:  count,
+		Bookmarks:    bmMap,
+	}
+	w.Header().Set("Content-Type", "application/json")
+	json.NewEncoder(w).Encode(resp)
+}
+
+func (s *Server) handleBloom(w http.ResponseWriter, r *http.Request) {
+	if r.Method != http.MethodPost {
+		http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	body, err := io.ReadAll(io.LimitReader(r.Body, 16<<20))
+	if err != nil {
+		http.Error(w, "read body: "+err.Error(), http.StatusBadRequest)
+		return
+	}
+
+	var filter *BloomFilter
+	if len(body) > 0 {
+		filter, err = BloomFilterFrom(body)
+		if err != nil {
+			http.Error(w, "parse bloom filter: "+err.Error(), http.StatusBadRequest)
+			return
+		}
+	}
+
+	missing, err := collectMissingObjects(s.repo, filter)
+	if err != nil {
+		http.Error(w, "collect missing: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/octet-stream")
+	if err := writeObjectPack(w, s.repo, missing); err != nil {
+		return
+	}
+}
+
+func (s *Server) handlePush(w http.ResponseWriter, r *http.Request) {
+	if r.Method != http.MethodPost {
+		http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+	if !s.canWrite {
+		http.Error(w, "Forbidden: read-only access", http.StatusForbidden)
+		return
+	}
+
+	entries, err := ReadPack(r.Body)
+	if err != nil {
+		http.Error(w, "read pack: "+err.Error(), http.StatusBadRequest)
+		return
+	}
+
+	if err := storePackEntries(s.repo, entries); err != nil {
+		http.Error(w, "store objects: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	json.NewEncoder(w).Encode(map[string]bool{"ok": true})
+}
+
+func (s *Server) handleUpdateBookmark(w http.ResponseWriter, r *http.Request) {
+	if r.Method != http.MethodPost {
+		http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+	if !s.canWrite {
+		http.Error(w, "Forbidden: read-only access", http.StatusForbidden)
+		return
+	}
+
+	var req RefUpdateRequest
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		http.Error(w, "decode body: "+err.Error(), http.StatusBadRequest)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+
+	newIDBytes, err := hex.DecodeString(req.NewID)
+	if err != nil || len(newIDBytes) != 32 {
+		http.Error(w, "invalid new_id", http.StatusBadRequest)
+		return
+	}
+	var newID [32]byte
+	copy(newID[:], newIDBytes)
+
+	existing, _ := s.repo.Store.GetBookmark(req.Name)
+	currentHex := ""
+	var currentID [32]byte
+	if existing != nil {
+		currentHex = hex.EncodeToString(existing.CommitID[:])
+		currentID = existing.CommitID
+	}
+
+	if !req.Force && !req.ForcePublic && req.ExpectedID != currentHex {
+		json.NewEncoder(w).Encode(RefUpdateResponse{ //nolint:errcheck
+			OK:      false,
+			Current: currentHex,
+			Err: fmt.Sprintf(
+				"bookmark %q diverged: remote tip is %s, expected %s — pull and merge before pushing",
+				req.Name, currentHex, req.ExpectedID),
+		})
+		return
+	}
+
+	if (req.Force || req.ForcePublic) && existing != nil {
+		phase, _ := s.repo.Store.GetPhase(currentID)
+		if phase == object.PhasePublic && !req.ForcePublic {
+			json.NewEncoder(w).Encode(RefUpdateResponse{ //nolint:errcheck
+				OK:  false,
+				Err: fmt.Sprintf("bookmark %q points to a public commit; use --force-public to overwrite", req.Name),
+			})
+			return
+		}
+	}
+
+	if s.PreUpdateHook != nil {
+		if hookErr := s.PreUpdateHook(req.Name, currentHex, req.NewID); hookErr != nil {
+			json.NewEncoder(w).Encode(RefUpdateResponse{ //nolint:errcheck
+				OK:  false,
+				Err: "pre-update hook rejected: " + hookErr.Error(),
+			})
+			return
+		}
+	}
+
+	tx, err := s.repo.Store.Begin()
+	if err != nil {
+		http.Error(w, "begin tx: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	if req.ForcePublic && existing != nil {
+		marker := &object.ObsoleteMarker{
+			Predecessor: currentID,
+			Successors:  [][32]byte{newID},
+			Reason:      "force-push",
+			Timestamp:   0,
+		}
+		if _, err := repo.WriteObsoleteTx(s.repo.Store, tx, marker); err != nil {
+			s.repo.Store.Rollback(tx)
+			http.Error(w, "write obsolete marker: "+err.Error(), http.StatusInternalServerError)
+			return
+		}
+		slog.Warn("force-pushing public bookmark",
+			"bookmark", req.Name,
+			"old_tip", currentHex[:8],
+			"new_tip", req.NewID[:8],
+		)
+	}
+
+	if err := s.repo.Store.SetBookmark(tx, store.Bookmark{Name: req.Name, CommitID: newID}); err != nil {
+		s.repo.Store.Rollback(tx)
+		http.Error(w, "set bookmark: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	if err := s.repo.Store.Commit(tx); err != nil {
+		http.Error(w, "commit: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	if s.OnBookmarkUpdated != nil {
+		go s.OnBookmarkUpdated(req.Name, currentHex, req.NewID)
+	}
+
+	json.NewEncoder(w).Encode(RefUpdateResponse{OK: true}) //nolint:errcheck
+}
+
+func collectMissingObjects(r *repo.Repo, filter *BloomFilter) ([][32]byte, error) {
+	allIDs, err := listAllObjectIDs(r)
+	if err != nil {
+		return nil, err
+	}
+	if filter == nil {
+		return allIDs, nil
+	}
+	var missing [][32]byte
+	for _, id := range allIDs {
+		if !filter.Test(id) {
+			missing = append(missing, id)
+		}
+	}
+	return missing, nil
+}
+
+func listAllObjectIDs(r *repo.Repo) ([][32]byte, error) {
+	seen := make(map[[32]byte]bool)
+	var queue [][32]byte
+
+	if headCID, err := r.Head(); err == nil {
+		bare := object.StripChangeIDPrefix(headCID)
+		if id, err := r.Store.GetChangeCommit(bare); err == nil {
+			if !seen[id] {
+				seen[id] = true
+				queue = append(queue, id)
+			}
+		}
+	}
+	bms, _ := r.Store.ListBookmarks()
+	for _, bm := range bms {
+		if !seen[bm.CommitID] {
+			seen[bm.CommitID] = true
+			queue = append(queue, bm.CommitID)
+		}
+	}
+
+	var result [][32]byte
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		result = append(result, id)
+
+		_, raw, err := r.Store.ReadObject(id)
+		if err != nil {
+			continue
+		}
+		if strings.HasPrefix(string(raw), "arche-commit") {
+			c, err := object.DecodeCommit(raw)
+			if err != nil {
+				continue
+			}
+			if !seen[c.TreeID] {
+				seen[c.TreeID] = true
+				queue = append(queue, c.TreeID)
+			}
+			for _, p := range c.Parents {
+				if !seen[p] {
+					seen[p] = true
+					queue = append(queue, p)
+				}
+			}
+		} else if strings.HasPrefix(string(raw), "arche-tree") {
+			t, err := object.DecodeTree(raw)
+			if err != nil {
+				continue
+			}
+			for _, e := range t.Entries {
+				if !seen[e.ObjectID] {
+					seen[e.ObjectID] = true
+					queue = append(queue, e.ObjectID)
+				}
+			}
+		}
+	}
+
+	issueIDs, _ := r.Store.ListObjectsByKind(string(object.KindIssueEvent))
+	for _, id := range issueIDs {
+		if !seen[id] {
+			seen[id] = true
+			result = append(result, id)
+		}
+	}
+
+	return result, nil
+}
+
+func listPushableObjectIDs(r *repo.Repo) ([][32]byte, error) {
+	seen := make(map[[32]byte]bool)
+	var queue [][32]byte
+
+	addIfNonSecret := func(id [32]byte) {
+		phase, _ := r.Store.GetPhase(id)
+		if phase == object.PhaseSecret {
+			return
+		}
+		if !seen[id] {
+			seen[id] = true
+			queue = append(queue, id)
+		}
+	}
+
+	if headCID, err := r.Head(); err == nil {
+		bare := object.StripChangeIDPrefix(headCID)
+		if id, err := r.Store.GetChangeCommit(bare); err == nil {
+			addIfNonSecret(id)
+		}
+	}
+	bms, _ := r.Store.ListBookmarks()
+	for _, bm := range bms {
+		addIfNonSecret(bm.CommitID)
+	}
+
+	var result [][32]byte
+	for len(queue) > 0 {
+		id := queue[0]
+		queue = queue[1:]
+		result = append(result, id)
+
+		_, raw, err := r.Store.ReadObject(id)
+		if err != nil {
+			continue
+		}
+		if bytes.HasPrefix(raw, []byte("arche-commit\x00")) {
+			c, err := object.DecodeCommit(raw)
+			if err != nil {
+				continue
+			}
+			if !seen[c.TreeID] {
+				seen[c.TreeID] = true
+				queue = append(queue, c.TreeID)
+			}
+			for _, p := range c.Parents {
+				phase, _ := r.Store.GetPhase(p)
+				if phase == object.PhaseSecret {
+					continue
+				}
+				if !seen[p] {
+					seen[p] = true
+					queue = append(queue, p)
+				}
+			}
+		} else if bytes.HasPrefix(raw, []byte("arche-tree\x00")) {
+			t, err := object.DecodeTree(raw)
+			if err != nil {
+				continue
+			}
+			for _, e := range t.Entries {
+				if !seen[e.ObjectID] {
+					seen[e.ObjectID] = true
+					queue = append(queue, e.ObjectID)
+				}
+			}
+		}
+	}
+
+	issueIDs, _ := r.Store.ListObjectsByKind(string(object.KindIssueEvent))
+	for _, id := range issueIDs {
+		if !seen[id] {
+			seen[id] = true
+			result = append(result, id)
+		}
+	}
+
+	return result, nil
+}
+
+func (s *Server) handleFetch(w http.ResponseWriter, r *http.Request) {
+	if r.Method != http.MethodPost {
+		http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	body, err := io.ReadAll(io.LimitReader(r.Body, 4<<20))
+	if err != nil {
+		http.Error(w, "read body: "+err.Error(), http.StatusBadRequest)
+		return
+	}
+	var hexIDs []string
+	if err := json.Unmarshal(body, &hexIDs); err != nil {
+		http.Error(w, "decode: "+err.Error(), http.StatusBadRequest)
+		return
+	}
+	ids := make([][32]byte, 0, len(hexIDs))
+	for _, h := range hexIDs {
+		b, err := hex.DecodeString(h)
+		if err != nil || len(b) != 32 {
+			continue
+		}
+		var id [32]byte
+		copy(id[:], b)
+		ids = append(ids, id)
+	}
+
+	w.Header().Set("Content-Type", "application/octet-stream")
+	writeObjectPack(w, s.repo, ids) //nolint:errcheck
+}
+
+func writeObjectPack(w io.Writer, r *repo.Repo, ids [][32]byte) error {
+	for _, id := range ids {
+		kind, data, err := r.Store.ReadObject(id)
+		if err != nil {
+			continue
+		}
+
+		entry := PackEntry{ID: id, Kind: kind, Data: data}
+		if err := writePackEntry(w, entry); err != nil {
+			return err
+		}
+	}
+
+	return writePackEnd(w)
+}
+
+func storePackEntries(r *repo.Repo, entries []PackEntry) error {
+	if len(entries) == 0 {
+		return nil
+	}
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+
+	for _, e := range entries {
+		if err := r.Store.WriteObject(tx, e.ID, e.Kind, e.Data); err != nil {
+			r.Store.Rollback(tx)
+			return fmt.Errorf("store object %x: %w", e.ID[:6], err)
+		}
+		if e.Kind == string(object.KindCommit) {
+			if err := r.Store.SetPhase(tx, e.ID, object.PhasePublic); err != nil {
+				r.Store.Rollback(tx)
+				return fmt.Errorf("set phase %x: %w", e.ID[:6], err)
+			}
+		}
+	}
+
+	if err := r.Store.Commit(tx); err != nil {
+		return err
+	}
+
+	applyReceivedIssueEvents(r, entries)
+
+	return nil
+}
+
+func applyReceivedIssueEvents(r *repo.Repo, entries []PackEntry) {
+	var issueEvs []issues.IssueEvent
+	for _, e := range entries {
+		if e.Kind != string(object.KindIssueEvent) {
+			continue
+		}
+		obj, err := object.DecodeIssueEvent(e.Data)
+		if err != nil {
+			continue
+		}
+		issueEvs = append(issueEvs, issues.IssueEvent{
+			EventID: fmt.Sprintf("%x", e.ID),
+			IssueID: obj.IssueID,
+			HLCMS:   obj.HLCMS,
+			HLCSeq:  obj.HLCSeq,
+			Kind:    obj.Kind,
+			Payload: obj.Payload,
+			Author:  obj.Author,
+			Created: 0,
+		})
+	}
+	if len(issueEvs) == 0 {
+		return
+	}
+	idb, err := issuedb.Open(r.ArcheDir())
+	if err != nil {
+		return
+	}
+	defer idb.Close()
+	_ = idb.Issues.MergeEvents(issueEvs)
+}
+
+func countCommits(r *repo.Repo) (int64, error) {
+	ids, err := listAllObjectIDs(r)
+	if err != nil {
+		return 0, err
+	}
+	var n int64
+	for _, id := range ids {
+		_, raw, err := r.Store.ReadObject(id)
+		if err != nil {
+			continue
+		}
+		if bytes.HasPrefix(raw, []byte("arche-commit\x00")) {
+			n++
+		}
+	}
+	return n, nil
+}
+
+func (s *Server) handleIssues(w http.ResponseWriter, r *http.Request) {
+	idb, err := issuedb.Open(s.repo.ArcheDir())
+	if err != nil {
+		http.Error(w, "open issuedb: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	defer idb.Close()
+
+	switch r.Method {
+	case http.MethodGet:
+		evs, err := idb.Issues.AllEvents()
+		if err != nil {
+			http.Error(w, "list events: "+err.Error(), http.StatusInternalServerError)
+			return
+		}
+		w.Header().Set("Content-Type", "application/json")
+		json.NewEncoder(w).Encode(evs)
+
+	case http.MethodPost:
+		if !s.canWrite {
+			http.Error(w, "Forbidden: read-only access", http.StatusForbidden)
+			return
+		}
+		body, err := io.ReadAll(io.LimitReader(r.Body, 32<<20))
+		if err != nil {
+			http.Error(w, "read body: "+err.Error(), http.StatusBadRequest)
+			return
+		}
+		var evs []issues.IssueEvent
+		if err := json.Unmarshal(body, &evs); err != nil {
+			http.Error(w, "decode events: "+err.Error(), http.StatusBadRequest)
+			return
+		}
+		if err := idb.Issues.MergeEvents(evs); err != nil {
+			http.Error(w, "merge events: "+err.Error(), http.StatusInternalServerError)
+			return
+		}
+		w.Header().Set("Content-Type", "application/json")
+		json.NewEncoder(w).Encode(map[string]int{"merged": len(evs)})
+
+	default:
+		http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+	}
+}

internal/syncpkg/ssh_transport.go [A]
--- /dev/null
+++ b/internal/syncpkg/ssh_transport.go
@@ -1,0 +1,124 @@
+package syncpkg
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"os"
+	"os/exec"
+	"time"
+)
+
+type SSHTransport struct {
+	Host    string
+	Repo    string
+	KeyFile string
+}
+
+func NewSSHClient(host, repo, keyFile string) (*http.Client, error) {
+	t := &SSHTransport{Host: host, Repo: repo, KeyFile: keyFile}
+	return &http.Client{Transport: t, Timeout: 0}, nil
+}
+
+type sshConn struct {
+	cmd    *exec.Cmd
+	reader io.ReadCloser
+	writer io.WriteCloser
+	local  net.Addr
+	remote net.Addr
+}
+
+func dialSSH(host, repo, keyFile string) (*sshConn, error) {
+	args := []string{
+		"-o", "StrictHostKeyChecking=accept-new",
+		"-o", "BatchMode=yes",
+	}
+	if keyFile != "" {
+		args = append(args, "-i", keyFile)
+	}
+	args = append(args, host, "arche-sync", repo)
+
+	cmd := exec.Command("ssh", args...)
+	cmd.Stderr = os.Stderr
+
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		return nil, fmt.Errorf("ssh stdin: %w", err)
+	}
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		return nil, fmt.Errorf("ssh stdout: %w", err)
+	}
+	if err := cmd.Start(); err != nil {
+		return nil, fmt.Errorf("ssh start: %w", err)
+	}
+
+	return &sshConn{
+		cmd:    cmd,
+		reader: stdout,
+		writer: stdin,
+		local:  &net.TCPAddr{},
+		remote: &net.TCPAddr{},
+	}, nil
+}
+
+func (c *sshConn) Read(b []byte) (int, error)  { return c.reader.Read(b) }
+func (c *sshConn) Write(b []byte) (int, error) { return c.writer.Write(b) }
+func (c *sshConn) Close() error {
+	c.writer.Close() //nolint:errcheck
+	c.reader.Close() //nolint:errcheck
+	return c.cmd.Wait()
+}
+func (c *sshConn) LocalAddr() net.Addr                { return c.local }
+func (c *sshConn) RemoteAddr() net.Addr               { return c.remote }
+func (c *sshConn) SetDeadline(t time.Time) error      { return nil }
+func (c *sshConn) SetReadDeadline(t time.Time) error  { return nil }
+func (c *sshConn) SetWriteDeadline(t time.Time) error { return nil }
+
+func (t *SSHTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	conn, err := dialSSH(t.Host, t.Repo, t.KeyFile)
+	if err != nil {
+		return nil, err
+	}
+
+	local, remote := net.Pipe()
+
+	go func() {
+		io.Copy(local, conn) //nolint:errcheck
+		local.Close()
+	}()
+	go func() {
+		io.Copy(conn, local) //nolint:errcheck
+	}()
+
+	if err := req.Write(remote); err != nil {
+		remote.Close()
+		conn.Close()
+		return nil, fmt.Errorf("write request: %w", err)
+	}
+
+	resp, err := http.ReadResponse(bufio.NewReader(remote), req)
+	if err != nil {
+		remote.Close()
+		conn.Close()
+		return nil, fmt.Errorf("read response: %w", err)
+	}
+
+	resp.Body = &closeOnBodyClose{resp.Body, remote, conn}
+	return resp, nil
+}
+
+type closeOnBodyClose struct {
+	io.ReadCloser
+	pipe net.Conn
+	conn io.Closer
+}
+
+func (c *closeOnBodyClose) Close() error {
+	err := c.ReadCloser.Close()
+	c.pipe.Close() //nolint:errcheck
+	c.conn.Close() //nolint:errcheck
+	return err
+}

internal/syncpkg/syncpkg_test.go [A]
--- /dev/null
+++ b/internal/syncpkg/syncpkg_test.go
@@ -1,0 +1,168 @@
+package syncpkg_test
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"testing"
+
+	"arche/internal/syncpkg"
+)
+
+func makeID(seed byte) [32]byte {
+	var b [32]byte
+	b[0] = seed
+	return sha256.Sum256(b[:])
+}
+
+func TestBloom_AddTest_HitAfterAdd(t *testing.T) {
+	f := syncpkg.NewBloom(100)
+	id := makeID(1)
+	if f.Test(id) {
+		t.Error("should not be present before Add")
+	}
+	f.Add(id)
+	if !f.Test(id) {
+		t.Error("should be present after Add")
+	}
+}
+
+func TestBloom_NoFalseNegatives(t *testing.T) {
+	const n = 200
+	f := syncpkg.NewBloom(n)
+	ids := make([][32]byte, n)
+	for i := range ids {
+		ids[i] = makeID(byte(i))
+		f.Add(ids[i])
+	}
+	for i, id := range ids {
+		if !f.Test(id) {
+			t.Errorf("false negative at index %d", i)
+		}
+	}
+}
+
+func TestBloom_Serialize(t *testing.T) {
+	f := syncpkg.NewBloom(50)
+	for i := range 10 {
+		f.Add(makeID(byte(i)))
+	}
+
+	data := f.Bytes()
+
+	f2, err := syncpkg.BloomFilterFrom(data)
+	if err != nil {
+		t.Fatalf("BloomFilterFrom: %v", err)
+	}
+
+	for i := range 10 {
+		id := makeID(byte(i))
+		if !f2.Test(id) {
+			t.Errorf("id %d not found after deserialization", i)
+		}
+	}
+}
+
+func TestBloom_EmptyBytes(t *testing.T) {
+	_, err := syncpkg.BloomFilterFrom([]byte{})
+	if err == nil {
+		t.Error("expected error for empty data")
+	}
+}
+
+func TestPack_Empty(t *testing.T) {
+	var buf bytes.Buffer
+	if err := syncpkg.WritePack(&buf, nil); err != nil {
+		t.Fatalf("WritePack (empty): %v", err)
+	}
+	entries, err := syncpkg.ReadPack(&buf)
+	if err != nil {
+		t.Fatalf("ReadPack (empty): %v", err)
+	}
+	if len(entries) != 0 {
+		t.Errorf("expected 0 entries, got %d", len(entries))
+	}
+}
+
+func TestPack_SingleEntry(t *testing.T) {
+	id := makeID(7)
+	e := syncpkg.PackEntry{
+		ID:   id,
+		Kind: "blob",
+		Data: []byte("hello pack"),
+	}
+
+	var buf bytes.Buffer
+	if err := syncpkg.WritePack(&buf, []syncpkg.PackEntry{e}); err != nil {
+		t.Fatalf("WritePack: %v", err)
+	}
+
+	entries, err := syncpkg.ReadPack(&buf)
+	if err != nil {
+		t.Fatalf("ReadPack: %v", err)
+	}
+	if len(entries) != 1 {
+		t.Fatalf("len: want 1, got %d", len(entries))
+	}
+	if entries[0].ID != id {
+		t.Errorf("ID mismatch")
+	}
+	if entries[0].Kind != "blob" {
+		t.Errorf("Kind: want blob, got %q", entries[0].Kind)
+	}
+	if !bytes.Equal(entries[0].Data, []byte("hello pack")) {
+		t.Errorf("Data mismatch")
+	}
+}
+
+func TestPack_MultipleEntries(t *testing.T) {
+	kinds := []string{"blob", "tree", "commit"}
+	in := make([]syncpkg.PackEntry, len(kinds))
+	for i, k := range kinds {
+		in[i] = syncpkg.PackEntry{
+			ID:   makeID(byte(i + 10)),
+			Kind: k,
+			Data: []byte("data-" + k),
+		}
+	}
+
+	var buf bytes.Buffer
+	if err := syncpkg.WritePack(&buf, in); err != nil {
+		t.Fatalf("WritePack: %v", err)
+	}
+	out, err := syncpkg.ReadPack(&buf)
+	if err != nil {
+		t.Fatalf("ReadPack: %v", err)
+	}
+	if len(out) != len(in) {
+		t.Fatalf("len: want %d, got %d", len(in), len(out))
+	}
+	for i := range in {
+		if out[i].ID != in[i].ID {
+			t.Errorf("[%d] ID mismatch", i)
+		}
+		if out[i].Kind != in[i].Kind {
+			t.Errorf("[%d] Kind: want %q got %q", i, in[i].Kind, out[i].Kind)
+		}
+		if !bytes.Equal(out[i].Data, in[i].Data) {
+			t.Errorf("[%d] Data mismatch", i)
+		}
+	}
+}
+
+func TestPack_LargeData(t *testing.T) {
+	large := make([]byte, 64*1024)
+	for i := range large {
+		large[i] = byte(i % 251)
+	}
+	e := syncpkg.PackEntry{ID: makeID(99), Kind: "blob", Data: large}
+
+	var buf bytes.Buffer
+	syncpkg.WritePack(&buf, []syncpkg.PackEntry{e}) //nolint:errcheck
+	out, err := syncpkg.ReadPack(&buf)
+	if err != nil {
+		t.Fatalf("ReadPack large: %v", err)
+	}
+	if !bytes.Equal(out[0].Data, large) {
+		t.Error("large data round-trip failed")
+	}
+}

internal/tui/hunks.go [A]
--- /dev/null
+++ b/internal/tui/hunks.go
@@ -1,0 +1,194 @@
+package tui
+
+import (
+	"fmt"
+	"strings"
+
+	"arche/internal/diff"
+
+	tea "github.com/charmbracelet/bubbletea"
+	"github.com/charmbracelet/lipgloss"
+)
+
+type HunkItem struct {
+	FilePath         string
+	HunkIdx          int
+	TotalHunksInFile int
+	Hunk             diff.Hunk
+}
+
+type HunkSelection struct {
+	Selected  []bool
+	Cancelled bool
+}
+
+func RunHunkSelector(items []HunkItem, verb string) (HunkSelection, error) {
+	if len(items) == 0 {
+		return HunkSelection{}, nil
+	}
+	m := hunkModel{
+		items:    items,
+		selected: make([]bool, len(items)),
+		verb:     verb,
+	}
+	prog := tea.NewProgram(m, tea.WithAltScreen())
+	final, err := prog.Run()
+	if err != nil {
+		return HunkSelection{Cancelled: true}, err
+	}
+	fm := final.(hunkModel)
+	return HunkSelection{Selected: fm.selected, Cancelled: fm.quit}, nil
+}
+
+var (
+	styleAdd     = lipgloss.NewStyle().Foreground(lipgloss.Color("2"))
+	styleRemove  = lipgloss.NewStyle().Foreground(lipgloss.Color("1"))
+	styleEqual   = lipgloss.NewStyle().Foreground(lipgloss.Color("240"))
+	styleHeader  = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("12"))
+	styleBar     = lipgloss.NewStyle().Foreground(lipgloss.Color("8"))
+	styleKey     = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("11"))
+	styleInclude = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("2"))
+	styleSkip    = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("1"))
+)
+
+type hunkModel struct {
+	items    []HunkItem
+	selected []bool
+	cursor   int
+	verb     string
+	quit     bool
+	done     bool
+	msg      string
+}
+
+func (m hunkModel) Init() tea.Cmd { return nil }
+
+func (m hunkModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
+	switch msg := msg.(type) {
+	case tea.KeyMsg:
+		switch msg.String() {
+		case "y", "Y":
+			m.selected[m.cursor] = true
+			return m.advance(), nil
+
+		case "n", "N":
+			m.selected[m.cursor] = false
+			return m.advance(), nil
+
+		case "a":
+			for i := m.cursor; i < len(m.items); i++ {
+				m.selected[i] = true
+			}
+			m.msg = "All remaining hunks included."
+			m.done = true
+			return m, tea.Quit
+
+		case "d":
+			for i := m.cursor; i < len(m.items); i++ {
+				m.selected[i] = false
+			}
+			m.msg = "All remaining hunks skipped."
+			m.done = true
+			return m, tea.Quit
+
+		case "left", "h", "p", "backspace":
+			if m.cursor > 0 {
+				m.cursor--
+			}
+
+		case "right", "l", "tab":
+			if m.cursor < len(m.items)-1 {
+				m.cursor++
+			}
+
+		case "enter", "q":
+			m.done = true
+			return m, tea.Quit
+
+		case "ctrl+c", "Q":
+			m.quit = true
+			m.done = true
+			return m, tea.Quit
+		}
+	}
+	return m, nil
+}
+
+func (m hunkModel) advance() hunkModel {
+	if m.cursor < len(m.items)-1 {
+		m.cursor++
+	} else {
+		m.done = true
+	}
+	return m
+}
+
+func (m hunkModel) View() string {
+	if m.done {
+		if m.quit {
+			return styleRemove.Render("Aborted.") + "\n"
+		}
+		if m.msg != "" {
+			return m.msg + "\n"
+		}
+		return styleInclude.Render("Selection confirmed.") + "\n"
+	}
+
+	item := m.items[m.cursor]
+	var sb strings.Builder
+
+	progress := fmt.Sprintf("[%d/%d]", m.cursor+1, len(m.items))
+	fileInfo := fmt.Sprintf("%s  hunk %d/%d",
+		item.FilePath, item.HunkIdx+1, item.TotalHunksInFile)
+	sb.WriteString(styleHeader.Render("arche interactive"))
+	sb.WriteString("  ")
+	sb.WriteString(styleBar.Render(progress))
+	sb.WriteString("  ")
+	sb.WriteString(fileInfo)
+	sb.WriteString("\n")
+	sb.WriteString(styleBar.Render(strings.Repeat("─", 70)))
+	sb.WriteString("\n")
+
+	sb.WriteString(styleBar.Render(item.Hunk.Header()))
+	sb.WriteString("\n")
+
+	for _, l := range item.Hunk.Lines {
+		line := string(l.Kind) + l.Content
+		if !strings.HasSuffix(line, "\n") {
+			line += "\n"
+		}
+		switch l.Kind {
+		case diff.LineAdd:
+			sb.WriteString(styleAdd.Render(line))
+		case diff.LineRemove:
+			sb.WriteString(styleRemove.Render(line))
+		default:
+			sb.WriteString(styleEqual.Render(line))
+		}
+	}
+
+	sb.WriteString(styleBar.Render(strings.Repeat("─", 70)))
+	sb.WriteString("\n")
+
+	switch {
+	case m.selected[m.cursor]:
+		sb.WriteString(styleInclude.Render("✓ included"))
+	default:
+		sb.WriteString(styleSkip.Render("✗ skipped"))
+	}
+	sb.WriteString("\n\n")
+
+	keys := []string{
+		styleKey.Render("y") + " " + m.verb,
+		styleKey.Render("n") + " skip",
+		styleKey.Render("a") + " all",
+		styleKey.Render("d") + " none",
+		styleKey.Render("←/→") + " navigate",
+		styleKey.Render("enter") + " confirm",
+		styleKey.Render("Q") + " abort",
+	}
+	sb.WriteString(strings.Join(keys, "  "))
+	sb.WriteString("\n")
+
+	return sb.String()
+}

internal/ui/server.go [A]
--- /dev/null
+++ b/internal/ui/server.go
@@ -1,0 +1,564 @@
+package ui
+
+import (
+	"embed"
+	"encoding/hex"
+	"fmt"
+	"html/template"
+	"net/http"
+	"strings"
+
+	"arche/internal/diff"
+	"arche/internal/issuedb"
+	"arche/internal/markdown"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/wc"
+)
+
+//go:embed templates/*.html
+var tmplFS embed.FS
+
+type server struct {
+	r   *repo.Repo
+	idb *issuedb.DB
+}
+
+func Serve(r *repo.Repo, port int) error {
+	idb, err := issuedb.Open(r.ArcheDir())
+	if err != nil {
+		return fmt.Errorf("issuedb: %w", err)
+	}
+	defer idb.Close()
+
+	s := &server{r: r, idb: idb}
+	mux := http.NewServeMux()
+	mux.HandleFunc("/", s.handleRoot)
+	mux.HandleFunc("/log", s.handleLog)
+	mux.HandleFunc("/commit", s.handleCommit)
+	mux.HandleFunc("/tree", s.handleTree)
+	mux.HandleFunc("/status", s.handleStatus)
+	mux.HandleFunc("/bookmarks", s.handleBookmarks)
+	mux.HandleFunc("/issues", s.handleIssues)
+	mux.HandleFunc("/issue", s.handleIssue)
+	mux.HandleFunc("/wiki", s.handleWiki)
+	mux.HandleFunc("/wiki/page", s.handleWikiPage)
+
+	addr := fmt.Sprintf("localhost:%d", port)
+	fmt.Printf("arche ui: listening on http://%s\n", addr)
+	return http.ListenAndServe(addr, mux)
+}
+
+var tmplFuncs = template.FuncMap{
+	"markdown": markdown.Render,
+}
+
+func parsePage(name string) (*template.Template, error) {
+	return template.New("").Funcs(tmplFuncs).ParseFS(tmplFS, "templates/base.html", "templates/"+name)
+}
+
+func render(w http.ResponseWriter, page string, data any) {
+	t, err := parsePage(page)
+	if err != nil {
+		http.Error(w, "template parse: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+	w.Header().Set("Content-Type", "text/html; charset=utf-8")
+	if err := t.ExecuteTemplate(w, page, data); err != nil {
+		fmt.Printf("arche ui: template execute %s: %v\n", page, err)
+	}
+}
+
+func shortHex(id [32]byte) string      { return hex.EncodeToString(id[:])[:8] }
+func fullHex(id [32]byte) string       { return hex.EncodeToString(id[:]) }
+func phaseClass(p object.Phase) string { return strings.ToLower(p.String()) }
+
+func bookmarkMap(r *repo.Repo) map[string][]string {
+	bms, _ := r.Store.ListBookmarks()
+	m := make(map[string][]string, len(bms))
+	for _, b := range bms {
+		k := fullHex(b.CommitID)
+		m[k] = append(m[k], b.Name)
+	}
+	return m
+}
+
+type logData struct{ Commits []commitRowData }
+
+type commitRowData struct {
+	HexID      string
+	ShortHex   string
+	ChangeID   string
+	Author     string
+	Date       string
+	Phase      string
+	PhaseClass string
+	Message    string
+	Bookmarks  []string
+	IsHead     bool
+}
+
+func (s *server) handleRoot(w http.ResponseWriter, r *http.Request) {
+	if r.URL.Path == "/" {
+		http.Redirect(w, r, "/log", http.StatusFound)
+		return
+	}
+	http.NotFound(w, r)
+}
+
+func (s *server) handleLog(w http.ResponseWriter, req *http.Request) {
+	const maxCommits = 200
+	_, headID, err := s.r.HeadCommit()
+	if err != nil {
+		http.Error(w, "HEAD: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	headCID, _ := s.r.HeadChangeID()
+	bmMap := bookmarkMap(s.r)
+
+	visited := map[[32]byte]bool{}
+	queue := [][32]byte{headID}
+
+	var rows []commitRowData
+	for len(queue) > 0 && len(rows) < maxCommits {
+		id := queue[0]
+		queue = queue[1:]
+
+		if visited[id] {
+			continue
+		}
+		visited[id] = true
+
+		c, err := s.r.ReadCommit(id)
+		if err != nil {
+			continue
+		}
+
+		phase, _ := s.r.Store.GetPhase(id)
+		hexID := fullHex(id)
+
+		msg := c.Message
+		if idx := strings.IndexByte(msg, '\n'); idx >= 0 {
+			msg = msg[:idx]
+		}
+
+		rows = append(rows, commitRowData{
+			HexID:      hexID,
+			ShortHex:   shortHex(id),
+			ChangeID:   c.ChangeID,
+			Author:     c.Author.Name,
+			Date:       c.Author.Timestamp.Format("2006-01-02 15:04"),
+			Phase:      phase.String(),
+			PhaseClass: phaseClass(phase),
+			Message:    msg,
+			Bookmarks:  bmMap[hexID],
+			IsHead:     c.ChangeID == headCID,
+		})
+
+		for _, p := range c.Parents {
+			if !visited[p] {
+				queue = append(queue, p)
+			}
+		}
+	}
+
+	render(w, "log.html", logData{Commits: rows})
+}
+
+type parentLink struct {
+	HexID    string
+	ShortHex string
+}
+
+type diffLine struct {
+	Class string
+	Text  string
+}
+
+type fileDiffRender struct {
+	Path   string
+	Status string
+	Lines  []diffLine
+}
+
+type commitData struct {
+	HexID      string
+	ShortHex   string
+	ChangeID   string
+	Author     string
+	Committer  string
+	Date       string
+	Phase      string
+	PhaseClass string
+	Message    string
+	Bookmarks  []string
+	Parents    []parentLink
+	Diffs      []fileDiffRender
+}
+
+func parseDiffLines(patch string) []diffLine {
+	if patch == "" {
+		return nil
+	}
+	var out []diffLine
+	for _, line := range strings.Split(patch, "\n") {
+		var class string
+		switch {
+		case strings.HasPrefix(line, "+++"), strings.HasPrefix(line, "---"),
+			strings.HasPrefix(line, "diff "), strings.HasPrefix(line, "@@"):
+			class = "diff-hdr"
+		case strings.HasPrefix(line, "+"):
+			class = "diff-add"
+		case strings.HasPrefix(line, "-"):
+			class = "diff-del"
+		}
+		out = append(out, diffLine{Class: class, Text: line})
+	}
+	return out
+}
+
+func (s *server) handleCommit(w http.ResponseWriter, req *http.Request) {
+	idStr := req.URL.Query().Get("id")
+	if idStr == "" {
+		http.Error(w, "missing id parameter", http.StatusBadRequest)
+		return
+	}
+
+	raw, err := hex.DecodeString(idStr)
+	if err != nil || len(raw) != 32 {
+		http.Error(w, "invalid commit id", http.StatusBadRequest)
+		return
+	}
+
+	var id [32]byte
+	copy(id[:], raw)
+
+	c, err := s.r.ReadCommit(id)
+	if err != nil {
+		http.Error(w, "not found: "+err.Error(), http.StatusNotFound)
+		return
+	}
+
+	phase, _ := s.r.Store.GetPhase(id)
+	bmMap := bookmarkMap(s.r)
+
+	var parents []parentLink
+	for _, p := range c.Parents {
+		parents = append(parents, parentLink{
+			HexID:    fullHex(p),
+			ShortHex: shortHex(p),
+		})
+	}
+
+	diffs, _ := diff.CommitDiff(s.r, id)
+	var rendered []fileDiffRender
+	for _, fd := range diffs {
+		rendered = append(rendered, fileDiffRender{
+			Path:   fd.Path,
+			Status: string(fd.Status),
+			Lines:  parseDiffLines(fd.Patch),
+		})
+	}
+
+	hexID := fullHex(id)
+
+	render(w, "commit.html", commitData{
+		HexID:      hexID,
+		ShortHex:   shortHex(id),
+		ChangeID:   c.ChangeID,
+		Author:     fmt.Sprintf("%s <%s>", c.Author.Name, c.Author.Email),
+		Committer:  fmt.Sprintf("%s <%s>", c.Committer.Name, c.Committer.Email),
+		Date:       c.Author.Timestamp.Format("2006-01-02 15:04:05"),
+		Phase:      phase.String(),
+		PhaseClass: phaseClass(phase),
+		Message:    c.Message,
+		Bookmarks:  bmMap[hexID],
+		Parents:    parents,
+		Diffs:      rendered,
+	})
+}
+
+type pathPart struct {
+	Name string
+	Link string
+}
+
+type treeEntry struct {
+	Name  string
+	IsDir bool
+	Mode  string
+	Link  string
+}
+
+type treeData struct {
+	CommitHex string
+	ShortHex  string
+	TreePath  string
+	PathParts []pathPart
+	Entries   []treeEntry
+}
+
+func modeStr(m object.EntryMode) string {
+	switch m {
+	case object.ModeExec:
+		return "exec"
+	case object.ModeSymlink:
+		return "link"
+	default:
+		return "file"
+	}
+}
+
+func (s *server) handleTree(w http.ResponseWriter, req *http.Request) {
+	idStr := req.URL.Query().Get("id")
+	treePath := req.URL.Query().Get("path")
+
+	var commitID [32]byte
+	if idStr != "" {
+		raw, err := hex.DecodeString(idStr)
+		if err != nil || len(raw) != 32 {
+			http.Error(w, "invalid commit id", http.StatusBadRequest)
+			return
+		}
+		copy(commitID[:], raw)
+	} else {
+		_, id, err := s.r.HeadCommit()
+		if err != nil {
+			http.Error(w, "HEAD: "+err.Error(), http.StatusInternalServerError)
+			return
+		}
+		commitID = id
+	}
+
+	c, err := s.r.ReadCommit(commitID)
+	if err != nil {
+		http.Error(w, "commit not found: "+err.Error(), http.StatusNotFound)
+		return
+	}
+
+	tree, err := s.r.ReadTree(c.TreeID)
+	if err != nil {
+		http.Error(w, "tree not found: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	treePath = strings.Trim(treePath, "/")
+	if treePath != "" {
+		for _, part := range strings.Split(treePath, "/") {
+			var found *object.TreeEntry
+			for i := range tree.Entries {
+				if tree.Entries[i].Name == part {
+					found = &tree.Entries[i]
+					break
+				}
+			}
+			if found == nil {
+				http.Error(w, "path not found", http.StatusNotFound)
+				return
+			}
+			if found.Mode != object.ModeDir {
+				http.Error(w, "not a directory", http.StatusBadRequest)
+				return
+			}
+			tree, err = s.r.ReadTree(found.ObjectID)
+			if err != nil {
+				http.Error(w, "subtree not found: "+err.Error(), http.StatusInternalServerError)
+				return
+			}
+		}
+	}
+
+	commitHex := fullHex(commitID)
+
+	var parts []pathPart
+	if treePath != "" {
+		acc := ""
+		for _, seg := range strings.Split(treePath, "/") {
+			if acc != "" {
+				acc += "/"
+			}
+			acc += seg
+			parts = append(parts, pathPart{
+				Name: seg,
+				Link: fmt.Sprintf("/tree?id=%s&path=%s", commitHex, acc),
+			})
+		}
+	}
+
+	var entries []treeEntry
+	for _, e := range tree.Entries {
+		isDir := e.Mode == object.ModeDir
+		var link string
+		childPath := e.Name
+		if treePath != "" {
+			childPath = treePath + "/" + e.Name
+		}
+		if isDir {
+			link = fmt.Sprintf("/tree?id=%s&path=%s", commitHex, childPath)
+		} else {
+			link = fmt.Sprintf("/tree?id=%s&path=%s", commitHex, childPath)
+		}
+		entries = append(entries, treeEntry{
+			Name:  e.Name,
+			IsDir: isDir,
+			Mode:  modeStr(e.Mode),
+			Link:  link,
+		})
+	}
+
+	render(w, "tree.html", treeData{
+		CommitHex: commitHex,
+		ShortHex:  shortHex(commitID),
+		TreePath:  treePath,
+		PathParts: parts,
+		Entries:   entries,
+	})
+}
+
+type statusRowData struct {
+	StatusChar  string
+	StatusClass string
+	Path        string
+}
+
+type statusData struct {
+	ChangeID   string
+	HexID      string
+	ShortHex   string
+	Phase      string
+	PhaseClass string
+	Changes    []statusRowData
+}
+
+func (s *server) handleStatus(w http.ResponseWriter, req *http.Request) {
+	c, id, err := s.r.HeadCommit()
+	if err != nil {
+		http.Error(w, "HEAD: "+err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	phase, _ := s.r.Store.GetPhase(id)
+
+	changes, _ := wc.New(s.r).Status()
+	var rows []statusRowData
+	for _, ch := range changes {
+		sc := string(ch.Status)
+		rows = append(rows, statusRowData{
+			StatusChar:  sc,
+			StatusClass: sc,
+			Path:        ch.Path,
+		})
+	}
+
+	render(w, "status.html", statusData{
+		ChangeID:   c.ChangeID,
+		HexID:      fullHex(id),
+		ShortHex:   shortHex(id),
+		Phase:      phase.String(),
+		PhaseClass: phaseClass(phase),
+		Changes:    rows,
+	})
+}
+
+type bookmarkRowData struct {
+	Name       string
+	HexID      string
+	ShortHex   string
+	ChangeID   string
+	Phase      string
+	PhaseClass string
+	IsHead     bool
+}
+
+type bookmarksData struct {
+	Bookmarks []bookmarkRowData
+}
+
+func (s *server) handleBookmarks(w http.ResponseWriter, req *http.Request) {
+	headCID, _ := s.r.HeadChangeID()
+	bms, _ := s.r.Store.ListBookmarks()
+	var rows []bookmarkRowData
+	for _, b := range bms {
+		c, err := s.r.ReadCommit(b.CommitID)
+		if err != nil {
+			continue
+		}
+		phase, _ := s.r.Store.GetPhase(b.CommitID)
+		rows = append(rows, bookmarkRowData{
+			Name:       b.Name,
+			HexID:      fullHex(b.CommitID),
+			ShortHex:   shortHex(b.CommitID),
+			ChangeID:   c.ChangeID,
+			Phase:      phase.String(),
+			PhaseClass: phaseClass(phase),
+			IsHead:     c.ChangeID == headCID,
+		})
+	}
+
+	render(w, "bookmarks.html", bookmarksData{Bookmarks: rows})
+}
+
+type issueRowData struct {
+	ID     string
+	Status string
+	Title  string
+}
+
+type issuesData struct {
+	Issues []issueRowData
+}
+
+func (s *server) handleIssues(w http.ResponseWriter, req *http.Request) {
+	stubs, err := s.idb.Issues.ListIssues()
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	var rows []issueRowData
+	for _, st := range stubs {
+		rows = append(rows, issueRowData{ID: st.ID, Status: st.Status, Title: st.Title})
+	}
+	render(w, "issues.html", issuesData{Issues: rows})
+}
+
+func (s *server) handleIssue(w http.ResponseWriter, req *http.Request) {
+	id := req.URL.Query().Get("id")
+	if id == "" {
+		http.Redirect(w, req, "/issues", http.StatusFound)
+		return
+	}
+	iss, err := s.idb.Issues.GetIssue(id)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusNotFound)
+		return
+	}
+	render(w, "issue.html", iss)
+}
+
+type wikiListData struct {
+	Pages interface{}
+}
+
+func (s *server) handleWiki(w http.ResponseWriter, req *http.Request) {
+	pages, err := s.idb.Wiki.List()
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+	render(w, "wiki.html", wikiListData{Pages: pages})
+}
+
+func (s *server) handleWikiPage(w http.ResponseWriter, req *http.Request) {
+	title := req.URL.Query().Get("title")
+	if title == "" {
+		http.Redirect(w, req, "/wiki", http.StatusFound)
+		return
+	}
+	page, err := s.idb.Wiki.Get(title)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusNotFound)
+		return
+	}
+	render(w, "wikipage.html", page)
+}

internal/ui/templates/base.html [A]
--- /dev/null
+++ b/internal/ui/templates/base.html
@@ -1,0 +1,233 @@
+<!DOCTYPE html>
+<html lang="en">
+    <head>
+        <meta charset="utf-8" />
+        <meta name="viewport" content="width=device-width, initial-scale=1" />
+        <title>{{ block "title" . }}Arche{{ end }}</title>
+        <style>
+      * {
+        box-sizing: border-box;
+        margin: 0;
+        padding: 0;
+      }
+      body {
+        font-family: ui-monospace, monospace;
+        font-size: 14px;
+        background: #0d0d0d;
+        color: #d4d4d4;
+        min-height: 100vh;
+      }
+      a {
+        color: #7cb8ff;
+        text-decoration: none;
+      }
+      a:hover {
+        text-decoration: underline;
+      }
+      nav {
+        background: #1a1a1a;
+        border-bottom: 1px solid #333;
+        padding: 8px 16px;
+        display: flex;
+        gap: 16px;
+        align-items: center;
+      }
+      nav .brand {
+        color: #e8c56d;
+        font-weight: bold;
+        font-size: 15px;
+        margin-right: 8px;
+      }
+      nav a {
+        color: #aaa;
+        font-size: 13px;
+      }
+      nav a:hover {
+        color: #d4d4d4;
+      }
+      main {
+        padding: 20px 24px;
+        max-width: 1100px;
+      }
+      h2 {
+        font-size: 15px;
+        color: #e8c56d;
+        margin-bottom: 12px;
+        font-weight: normal;
+        border-bottom: 1px solid #2a2a2a;
+        padding-bottom: 6px;
+      }
+      table {
+        border-collapse: collapse;
+        width: 100%;
+      }
+      th {
+        text-align: left;
+        padding: 6px 10px;
+        border-bottom: 1px solid #2a2a2a;
+        color: #888;
+        font-weight: normal;
+        font-size: 12px;
+      }
+      td {
+        padding: 5px 10px;
+        border-bottom: 1px solid #1e1e1e;
+        vertical-align: top;
+      }
+      tr:hover td {
+        background: #141414;
+      }
+      .hash {
+        color: #888;
+        font-size: 12px;
+      }
+      .phase-draft {
+        color: #7cb8ff;
+      }
+      .phase-public {
+        color: #7ae07a;
+      }
+      .phase-secret {
+        color: #e07a7a;
+      }
+      .added {
+        color: #7ae07a;
+      }
+      .deleted {
+        color: #e07a7a;
+      }
+      .modified {
+        color: #e8c56d;
+      }
+      pre {
+        background: #111;
+        border: 1px solid #252525;
+        padding: 12px;
+        overflow-x: auto;
+        line-height: 1.5;
+        font-size: 13px;
+      }
+      .diff-add {
+        color: #7ae07a;
+      }
+      .diff-del {
+        color: #e07a7a;
+      }
+      .diff-hdr {
+        color: #888;
+      }
+      .badge {
+        display: inline-block;
+        padding: 1px 6px;
+        border-radius: 3px;
+        font-size: 11px;
+        background: #222;
+        color: #aaa;
+      }
+      .info-row {
+        display: flex;
+        flex-wrap: wrap;
+        gap: 16px;
+        margin-bottom: 14px;
+        font-size: 13px;
+        color: #888;
+      }
+      .info-row span {
+        color: #d4d4d4;
+      }
+      .tree-entry {
+        padding: 4px 10px;
+        border-bottom: 1px solid #1a1a1a;
+        display: flex;
+        gap: 10px;
+      }
+      .tree-entry:hover {
+        background: #141414;
+      }
+      .tree-dir {
+        color: #7cb8ff;
+      }
+      .tree-file {
+        color: #d4d4d4;
+      }
+      .status-A {
+        color: #7ae07a;
+      }
+      .status-M {
+        color: #e8c56d;
+      }
+      .status-D {
+        color: #e07a7a;
+      }
+      .empty {
+        color: #555;
+        padding: 20px 0;
+        font-style: italic;
+      }
+      .status-open {
+        color: #7ae07a;
+      }
+      .status-closed {
+        color: #888;
+      }
+      .status-wontfix {
+        color: #e07a7a;
+      }
+      .issue-body {
+        white-space: pre-wrap;
+        background: #111;
+        border: 1px solid #252525;
+        padding: 12px;
+        margin-bottom: 16px;
+        font-size: 13px;
+        line-height: 1.6;
+      }
+      .comment-block {
+        border-left: 3px solid #333;
+        padding: 8px 12px;
+        margin-bottom: 10px;
+      }
+      .comment-meta {
+        font-size: 11px;
+        color: #666;
+        margin-bottom: 4px;
+      }
+      .conflict-box {
+        background: #1a0000;
+        border: 1px solid #e07a7a;
+        padding: 12px;
+        margin-bottom: 16px;
+      }
+      .wiki-content {
+        white-space: pre-wrap;
+        line-height: 1.7;
+        font-size: 13px;
+      }
+        </style>
+    </head>
+    <body>
+        <nav>
+            <span class="brand">
+                <svg width="16"
+                     height="16"
+                     viewBox="0 0 128 128"
+                     xmlns="http://www.w3.org/2000/svg"
+                     style="vertical-align: middle;
+                            margin-right: 5px">
+                    <circle cx="64" cy="64" r="48" stroke="#e8c56d" stroke-width="8" fill="none" />
+                    <circle cx="64" cy="64" r="10" fill="#e8c56d" />
+                </svg>
+                arche
+            </span>
+            <a href="/log">log</a>
+            <a href="/tree">tree</a>
+            <a href="/status">status</a>
+            <a href="/bookmarks">bookmarks</a>
+            <a href="/issues">issues</a>
+            <a href="/wiki">wiki</a>
+        </nav>
+        <main>
+            {{ block "content" . }}{{ end }}
+        </main>
+    </body>
+</html>

internal/ui/templates/bookmarks.html [A]
--- /dev/null
+++ b/internal/ui/templates/bookmarks.html
@@ -1,0 +1,36 @@
+{{ template "base.html" . }} {{ define "title" }}bookmarks — arche{{ end }} {{ define "content" }}
+<h2>bookmarks</h2>
+{{ if .Bookmarks }}
+<table>
+    <thead>
+        <tr>
+            <th>name</th>
+            <th>change</th>
+            <th>hash</th>
+            <th>phase</th>
+        </tr>
+    </thead>
+    <tbody>
+        {{ range .Bookmarks }}
+        <tr>
+            <td>
+                {{.Name}} {{ if .IsHead }}<span class="badge" style="background: #1a2a1a; color: #7ae07a">@</span>{{ end }}
+            </td>
+            <td>
+                <a href="/commit?id={{.HexID}}">ch:{{.ChangeID}}</a>
+            </td>
+            <td>
+                <span class="hash">{{.ShortHex}}</span>
+            </td>
+            <td>
+                <span class="phase-{{.PhaseClass}}">{{.Phase}}</span>
+            </td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+{{ else }}
+<p class="empty">
+    no bookmarks yet — run <code>arche bookmark set &lt;name&gt;</code>
+</p>
+{{ end }} {{ end }}

internal/ui/templates/commit.html [A]
--- /dev/null
+++ b/internal/ui/templates/commit.html
@@ -1,0 +1,56 @@
+{{ template "base.html" . }} {{ define "title" }}commit {{.ShortHex}} — arche{{ end }} {{ define "content" }}
+<h2>commit {{.HexID}}</h2>
+<div class="info-row">
+    <div>
+        change <span>ch:{{.ChangeID}}</span>
+    </div>
+    <div>
+        hash <span class="hash">{{.HexID}}</span>
+    </div>
+    <div>
+        phase <span class="phase-{{.PhaseClass}}">{{.Phase}}</span>
+    </div>
+    {{ range .Bookmarks }}
+    <div>
+        <span class="badge">{{.}}</span>
+    </div>
+    {{ end }}
+</div>
+<div class="info-row">
+    <div>
+        author <span>{{.Author}}</span>
+    </div>
+    <div>
+        date <span>{{.Date}}</span>
+    </div>
+    <div>
+        committer <span>{{.Committer}}</span>
+    </div>
+</div>
+{{ if .Parents }}
+<div class="info-row">
+    <div>
+        parents {{ range .Parents }}
+        <span><a href="/commit?id={{.HexID}}" class="hash">{{.ShortHex}}</a></span>
+        {{ end }}
+    </div>
+</div>
+{{ end }} {{ if .Message }}
+<pre style="margin-bottom:16px">{{.Message}}</pre>
+{{ end }} {{ if .Diffs }}
+<h2 style="margin-top:20px">{{ len .Diffs }} files changed</h2>
+{{ range .Diffs }}
+<div style="margin-top:14px">
+    <div style="padding:4px 0; font-size:13px">
+        {{ if eq .Status "A" }}<span class="added">A</span>
+        {{ else if eq .Status "D" }}<span class="deleted">D</span>
+        {{ else }}<span class="modified">M</span>{{ end }} &nbsp;{{.Path}}
+    </div>
+    {{ if .Lines }}
+    <pre>{{range .Lines}}<span {{if .Class}} class="{{.Class}}"{{end}}>{{.Text}}</span>
+{{end}}</pre>
+    {{ end }}
+</div>
+{{ end }} {{ else }}
+<p class="empty">no changes in this commit</p>
+{{ end }} {{ end }}

internal/ui/templates/issue.html [A]
--- /dev/null
+++ b/internal/ui/templates/issue.html
@@ -1,0 +1,54 @@
+{{ template "base.html" . }} {{ define "title" }}Issue {{.ID}} — Arche{{ end }} {{ define "content" }}
+<h2>
+    Issue <span class="hash">{{.ID}}</span>
+</h2>
+<div class="info-row">
+    <div>
+        Status: <span class="status-{{.Status}}">{{.Status}}</span>
+    </div>
+    {{ if .Labels }}
+    <div>
+        Labels: {{ range .Labels }}<span class="badge">{{.}}</span> {{ end }}
+    </div>
+    {{ end }} {{ if .Refs }}
+    <div>
+        Refs: {{ range .Refs }}<a href="/commit?id={{.}}" class="hash">{{.}}</a>
+        {{ end }}
+    </div>
+    {{ end }}
+</div>
+<h2>{{.Title}}</h2>
+{{ if .BodyConflict }}
+<div class="conflict-box">
+    <p style="color: #e07a7a; margin-bottom: 8px">Body conflict — explicit resolution required</p>
+    <table style="width: 100%">
+        <tr>
+            <td style="width: 50%; vertical-align: top; padding-right: 8px">
+                <div style="color: #888; font-size: 11px; margin-bottom: 4px">Our edit</div>
+                <pre>{{.BodyConflict.OurEdit}}</pre>
+            </td>
+            <td style="width: 50%; vertical-align: top; padding-left: 8px">
+                <div style="color: #888; font-size: 11px; margin-bottom: 4px">Their edit</div>
+                <pre>{{.BodyConflict.TheirEdit}}</pre>
+            </td>
+        </tr>
+    </table>
+</div>
+{{ else if .Body }}
+<div class="issue-body">{{.Body | markdown}}</div>
+{{ else }}
+<p class="empty">No description.</p>
+{{ end }} {{ if .Comments }}
+<h2>Comments ({{ len .Comments }})</h2>
+{{ range .Comments }}
+<div class="comment-block">
+    <div class="comment-meta">
+        {{.Author}} · <span class="hash">{{.EventID}}</span>
+    </div>
+    <div>{{.Text | markdown}}</div>
+</div>
+{{ end }} {{ end }}
+<p style="margin-top: 20px">
+    <a href="/issues">← all issues</a>
+</p>
+{{ end }}

internal/ui/templates/issues.html [A]
--- /dev/null
+++ b/internal/ui/templates/issues.html
@@ -1,0 +1,30 @@
+{{ template "base.html" . }} {{ define "title" }}Issues — Arche{{ end }} {{ define "content" }}
+<h2>Issues</h2>
+{{ if .Issues }}
+<table>
+    <thead>
+        <tr>
+            <th>ID</th>
+            <th>Status</th>
+            <th>Title</th>
+        </tr>
+    </thead>
+    <tbody>
+        {{ range .Issues }}
+        <tr>
+            <td>
+                <a href="/issue?id={{.ID}}" class="hash">{{.ID}}</a>
+            </td>
+            <td>
+                <span class="status-{{.Status}}">{{.Status}}</span>
+            </td>
+            <td>
+                <a href="/issue?id={{.ID}}">{{.Title}}</a>
+            </td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+{{ else }}
+<p class="empty">No issues yet.</p>
+{{ end }} {{ end }}

internal/ui/templates/log.html [A]
--- /dev/null
+++ b/internal/ui/templates/log.html
@@ -1,0 +1,39 @@
+{{ template "base.html" . }} {{ define "title" }}log — arche{{ end }} {{ define "content" }}
+<h2>commit log</h2>
+{{ if .Commits }}
+<table>
+    <thead>
+        <tr>
+            <th>change</th>
+            <th>hash</th>
+            <th>author</th>
+            <th>date</th>
+            <th>phase</th>
+            <th>message</th>
+        </tr>
+    </thead>
+    <tbody>
+        {{ range .Commits }}
+        <tr>
+            <td>
+                <a href="/commit?id={{.HexID}}">ch:{{.ChangeID}}</a> {{ if .Bookmarks }}
+                <br />
+                {{ range .Bookmarks }}<span class="badge">{{.}}</span>
+                {{ end }}{{ end }} {{ if .IsHead }}<span class="badge" style="background: #1a2a1a; color: #7ae07a">@</span>{{ end }}
+            </td>
+            <td>
+                <span class="hash">{{.ShortHex}}</span>
+            </td>
+            <td>{{.Author}}</td>
+            <td>{{.Date}}</td>
+            <td>
+                <span class="phase-{{.PhaseClass}}">{{.Phase}}</span>
+            </td>
+            <td>{{.Message}}</td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+{{ else }}
+<p class="empty">no commits yet</p>
+{{ end }} {{ end }}

internal/ui/templates/status.html [A]
--- /dev/null
+++ b/internal/ui/templates/status.html
@@ -1,0 +1,35 @@
+{{ template "base.html" . }} {{ define "title" }}status — arche{{ end }} {{ define "content" }}
+<h2>working copy status</h2>
+<div class="info-row">
+    <div>
+        change <span><a href="/commit?id={{.HexID}}">ch:{{.ChangeID}}</a></span>
+    </div>
+    <div>
+        hash <span class="hash">{{.ShortHex}}</span>
+    </div>
+    <div>
+        phase <span class="phase-{{.PhaseClass}}">{{.Phase}}</span>
+    </div>
+</div>
+{{ if .Changes }}
+<table style="margin-top: 10px">
+    <thead>
+        <tr>
+            <th style="width: 30px">st</th>
+            <th>path</th>
+        </tr>
+    </thead>
+    <tbody>
+        {{ range .Changes }}
+        <tr>
+            <td>
+                <span class="status-{{.StatusChar}}">{{.StatusChar}}</span>
+            </td>
+            <td>{{.Path}}</td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+{{ else }}
+<p class="empty">working copy is clean</p>
+{{ end }} {{ end }}

internal/ui/templates/tree.html [A]
--- /dev/null
+++ b/internal/ui/templates/tree.html
@@ -1,0 +1,25 @@
+{{ template "base.html" . }} {{ define "title" }}tree {{.ShortHex}}{{ if .TreePath }}/{{.TreePath}}{{ end }} — arche{{ end }} {{define
+"content"}}
+<h2>
+    tree / {{ range .PathParts }}<a href="{{.Link}}">{{.Name}}</a> / {{ end }} &nbsp;<span class="hash">{{.ShortHex}}</span>
+</h2>
+{{ if .Entries }}
+<div style="border: 1px solid #252525;
+            border-radius: 3px;
+            overflow: hidden;
+            margin-top: 10px">
+    {{ range .Entries }}
+    <div class="tree-entry">
+        {{ if .IsDir }}
+        <span style="color: #555">dir</span>
+        <a href="{{.Link}}" class="tree-dir">{{.Name}}/</a>
+    {{ else }}
+        <span style="color: #555">{{.Mode}}</span>
+        <a href="{{.Link}}" class="tree-file">{{.Name}}</a>
+        {{ end }}
+    </div>
+    {{ end }}
+</div>
+{{ else }}
+<p class="empty">empty tree</p>
+{{ end }} {{ end }}

internal/ui/templates/wiki.html [A]
--- /dev/null
+++ b/internal/ui/templates/wiki.html
@@ -1,0 +1,24 @@
+{{ template "base.html" . }} {{ define "title" }}Wiki — Arche{{ end }} {{ define "content" }}
+<h2>Wiki</h2>
+{{ if .Pages }}
+<table>
+    <thead>
+        <tr>
+            <th>Title</th>
+            <th>Author</th>
+        </tr>
+    </thead>
+    <tbody>
+        {{ range .Pages }}
+        <tr>
+            <td>
+                <a href="/wiki/page?title={{.Title}}">{{.Title}}</a>
+            </td>
+            <td class="hash">{{.Author}}</td>
+        </tr>
+        {{ end }}
+    </tbody>
+</table>
+{{ else }}
+<p class="empty">No wiki pages yet.</p>
+{{ end }} {{ end }}

internal/ui/templates/wikipage.html [A]
--- /dev/null
+++ b/internal/ui/templates/wikipage.html
@@ -1,0 +1,12 @@
+{{ template "base.html" . }} {{ define "title" }}{{.Title}} — Wiki — Arche{{ end }} {{ define "content" }}
+<h2>{{.Title}}</h2>
+<div class="info-row">
+    <div>
+        Author: <span>{{.Author}}</span>
+    </div>
+</div>
+<div class="wiki-content">{{.Content | markdown}}</div>
+<p style="margin-top: 20px">
+    <a href="/wiki">← wiki index</a>
+</p>
+{{ end }}

internal/watcher/watcher.go [A]
--- /dev/null
+++ b/internal/watcher/watcher.go
@@ -1,0 +1,95 @@
+package watcher
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"arche/internal/store"
+
+	"github.com/fsnotify/fsnotify"
+	"golang.org/x/sys/unix"
+)
+
+const PidFile = "watch.pid"
+
+func IsActive(archeDir string) bool {
+	data, err := os.ReadFile(filepath.Join(archeDir, PidFile))
+	if err != nil {
+		return false
+	}
+
+	pid, err := strconv.Atoi(strings.TrimSpace(string(data)))
+	if err != nil {
+		return false
+	}
+
+	return unix.Kill(pid, 0) == nil
+}
+
+func Run(ctx context.Context, workRoot, archeDir string, st store.Store) error {
+	if err := os.WriteFile(
+		filepath.Join(archeDir, PidFile),
+		[]byte(strconv.Itoa(os.Getpid())),
+		0o644,
+	); err != nil {
+		return fmt.Errorf("write pid: %w", err)
+	}
+	defer os.Remove(filepath.Join(archeDir, PidFile))
+
+	w, err := fsnotify.NewWatcher()
+	if err != nil {
+		return fmt.Errorf("fsnotify: %w", err)
+	}
+	defer w.Close()
+
+	if err := filepath.Walk(workRoot, func(p string, info os.FileInfo, walkErr error) error {
+		if walkErr != nil || !info.IsDir() {
+			return nil
+		}
+		if filepath.Base(p) == ".arche" {
+			return filepath.SkipDir
+		}
+		return w.Add(p)
+	}); err != nil {
+		return fmt.Errorf("watch setup: %w", err)
+	}
+
+	for {
+		select {
+		case <-ctx.Done():
+			return nil
+
+		case event, ok := <-w.Events:
+			if !ok {
+				return nil
+			}
+			const interesting = fsnotify.Write | fsnotify.Create | fsnotify.Rename | fsnotify.Remove | fsnotify.Chmod
+			if event.Op&interesting == 0 {
+				continue
+			}
+			rel, err := filepath.Rel(workRoot, event.Name)
+			if err != nil || strings.HasPrefix(rel, "..") || strings.HasPrefix(rel, ".arche") {
+				continue
+			}
+			rel = filepath.ToSlash(rel)
+			if markErr := st.MarkWCacheDirty(rel); markErr != nil {
+				fmt.Fprintf(os.Stderr, "arche watch: mark dirty %q: %v\n", rel, markErr)
+			}
+
+			if event.Op&fsnotify.Create != 0 {
+				if info, statErr := os.Lstat(event.Name); statErr == nil && info.IsDir() {
+					_ = w.Add(event.Name)
+				}
+			}
+
+		case _, ok := <-w.Errors:
+			if !ok {
+				return nil
+			}
+		}
+	}
+}

internal/wc/hooks.go [A]
--- /dev/null
+++ b/internal/wc/hooks.go
@@ -1,0 +1,184 @@
+package wc
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"sort"
+	"strings"
+	"syscall"
+	"time"
+
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+)
+
+func RunHooksSequential(repoRoot, label string, hooks []string) error {
+	sh := os.Getenv("SHELL")
+	if sh == "" {
+		sh = "/bin/sh"
+	}
+	for _, cmd := range hooks {
+		fmt.Fprintf(os.Stderr, "Running %s: %s\n", label, cmd)
+		c := exec.Command(sh, "-c", cmd)
+		c.Dir = repoRoot
+		c.Stdout = os.Stdout
+		c.Stderr = os.Stderr
+		if err := c.Run(); err != nil {
+			return fmt.Errorf("hook %q: %w", cmd, err)
+		}
+	}
+	return nil
+}
+
+func (wc *WC) snapshotRestrictedPathsIntoTx(
+	tx *store.Tx,
+	headCommit *object.Commit,
+	headBlobs map[string][32]byte,
+	headModes map[string]object.EntryMode,
+	diffPaths map[string]bool,
+	message string,
+	now time.Time,
+) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	var entries []fileEntry
+
+	for path, blobID := range headBlobs {
+		if diffPaths[path] {
+			continue
+		}
+		mode := headModes[path]
+		absPath := filepath.Join(r.Root, filepath.FromSlash(path))
+		if info, statErr := os.Lstat(absPath); statErr == nil {
+			if hookChangedFile(r.Store, path, info, blobID) {
+				fmt.Fprintf(os.Stderr,
+					"arche snap: hook modified %s (outside snap diff) - leaving as working-copy change\n", path)
+			}
+		}
+		entries = append(entries, fileEntry{path: path, blobID: blobID, mode: mode})
+	}
+
+	var reSnapNames []string
+	for path := range diffPaths {
+		if _, err := os.Lstat(filepath.Join(r.Root, filepath.FromSlash(path))); err == nil {
+			reSnapNames = append(reSnapNames, path)
+		}
+	}
+	if len(reSnapNames) > 0 {
+		sort.Strings(reSnapNames)
+		fmt.Fprintf(os.Stderr, "Re-snapping %d modified file(s) (%s)\n",
+			len(reSnapNames), strings.Join(reSnapNames, ", "))
+	}
+
+	for path := range diffPaths {
+		absPath := filepath.Join(r.Root, filepath.FromSlash(path))
+		info, err := os.Lstat(absPath)
+		if os.IsNotExist(err) {
+			continue
+		}
+		if err != nil {
+			return nil, object.ZeroID, fmt.Errorf("stat %s: %w", path, err)
+		}
+		content, err := readFileContent(absPath, info)
+		if err != nil {
+			return nil, object.ZeroID, fmt.Errorf("read %s: %w", path, err)
+		}
+		blobID, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: content})
+		if err != nil {
+			return nil, object.ZeroID, err
+		}
+		mode := fileMode(info)
+		entries = append(entries, fileEntry{path: path, blobID: blobID, mode: mode})
+		if st, ok := info.Sys().(*syscall.Stat_t); ok {
+			_ = r.Store.SetWCacheEntry(tx, store.WCacheEntry{
+				Path:    path,
+				Inode:   st.Ino,
+				MtimeNs: info.ModTime().UnixNano(),
+				Size:    info.Size(),
+				BlobID:  blobID,
+			})
+		}
+	}
+
+	treeID, err := buildTree(r, tx, entries)
+	if err != nil {
+		return nil, object.ZeroID, fmt.Errorf("build tree: %w", err)
+	}
+
+	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+	c := &object.Commit{
+		TreeID:    treeID,
+		Parents:   headCommit.Parents,
+		ChangeID:  headCommit.ChangeID,
+		Author:    headCommit.Author,
+		Committer: sig,
+		Message:   message,
+		Phase:     headCommit.Phase,
+	}
+	if headCommit.Author.Timestamp.IsZero() {
+		c.Author = sig
+	}
+
+	if err := wc.maybeSign(c); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	commitID, err := repo.WriteCommitTx(r.Store, tx, c)
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.SetChangeCommit(tx, c.ChangeID, commitID); err != nil {
+		return nil, object.ZeroID, err
+	}
+	return c, commitID, nil
+}
+
+func (wc *WC) SnapshotRestrictedPaths(message string, diffPaths map[string]bool) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	headCommit, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	headBlobs := make(map[string][32]byte)
+	headModes := make(map[string]object.EntryMode)
+	if err := flattenTree(r, headCommit.TreeID, "", headBlobs); err != nil {
+		return nil, object.ZeroID, fmt.Errorf("flatten HEAD tree: %w", err)
+	}
+	if err := flattenTreeModes(r, headCommit.TreeID, "", headModes); err != nil {
+		return nil, object.ZeroID, fmt.Errorf("flatten HEAD modes: %w", err)
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	c, commitID, err := wc.snapshotRestrictedPathsIntoTx(tx, headCommit, headBlobs, headModes, diffPaths, message, now)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+	return c, commitID, nil
+}
+
+func hookChangedFile(s store.Store, path string, info os.FileInfo, expectedBlobID [32]byte) bool {
+	if cached, err := s.GetWCacheEntry(path); err == nil && cached != nil {
+		if st, ok := info.Sys().(*syscall.Stat_t); ok {
+			if cached.Inode == st.Ino &&
+				cached.MtimeNs == info.ModTime().UnixNano() &&
+				cached.Size == info.Size() {
+				return cached.BlobID != expectedBlobID
+			}
+		}
+	}
+
+	return true
+}

internal/wc/hunk_snap.go [A]
--- /dev/null
+++ b/internal/wc/hunk_snap.go
@@ -1,0 +1,531 @@
+package wc
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+	"syscall"
+	"time"
+
+	"arche/internal/diff"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+)
+
+func (wc *WC) ComputeWorkingDiffs() ([]diff.FileHunkDiff, error) {
+	r := wc.Repo
+
+	head, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, err
+	}
+
+	var parentTreeID [32]byte
+	if len(head.Parents) > 0 {
+		parent, err := r.ReadCommit(head.Parents[0])
+		if err != nil {
+			return nil, fmt.Errorf("read parent commit: %w", err)
+		}
+		parentTreeID = parent.TreeID
+	}
+
+	parentBlobs := make(map[string][32]byte)
+	parentModes := make(map[string]object.EntryMode)
+	if err := flattenTree(r, parentTreeID, "", parentBlobs); err != nil {
+		return nil, err
+	}
+	if err := flattenTreeModes(r, parentTreeID, "", parentModes); err != nil {
+		return nil, err
+	}
+
+	wcPaths, err := wc.trackedPaths()
+	if err != nil {
+		return nil, err
+	}
+	wcBlobMap := make(map[string]bool, len(wcPaths))
+	for _, p := range wcPaths {
+		wcBlobMap[p] = true
+	}
+
+	allPaths := make(map[string]bool)
+	for p := range parentBlobs {
+		allPaths[p] = true
+	}
+	for _, p := range wcPaths {
+		allPaths[p] = true
+	}
+
+	var out []diff.FileHunkDiff
+	for path := range allPaths {
+		inParent := func() bool { _, ok := parentBlobs[path]; return ok }()
+		inWC := wcBlobMap[path]
+
+		oldContent := ""
+		if inParent {
+			data, err2 := r.ReadBlob(parentBlobs[path])
+			if err2 == nil {
+				oldContent = string(data)
+			}
+		}
+
+		newContent := ""
+		if inWC {
+			abs := filepath.Join(r.Root, filepath.FromSlash(path))
+			data, err2 := os.ReadFile(abs)
+			if err2 == nil {
+				newContent = string(data)
+			}
+		}
+
+		if oldContent == newContent {
+			continue
+		}
+
+		var status rune
+		switch {
+		case !inParent && inWC:
+			status = 'A'
+		case inParent && !inWC:
+			status = 'D'
+		default:
+			status = 'M'
+		}
+
+		fhd := diff.ComputeFileHunks(path, oldContent, newContent, status)
+		out = append(out, fhd)
+	}
+
+	sort.Slice(out, func(i, j int) bool { return out[i].Path < out[j].Path })
+	return out, nil
+}
+
+func (wc *WC) SnapSelectedHunks(
+	message string,
+	diffs []diff.FileHunkDiff,
+	perFile map[string][]bool,
+) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	before, _ := r.CaptureRefState()
+
+	head, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	var parentTreeID [32]byte
+	if len(head.Parents) > 0 {
+		parent, err := r.ReadCommit(head.Parents[0])
+		if err != nil {
+			return nil, object.ZeroID, err
+		}
+		parentTreeID = parent.TreeID
+	}
+
+	parentBlobs := make(map[string][32]byte)
+	parentModes := make(map[string]object.EntryMode)
+	if err := flattenTree(r, parentTreeID, "", parentBlobs); err != nil {
+		return nil, object.ZeroID, err
+	}
+	if err := flattenTreeModes(r, parentTreeID, "", parentModes); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	diffMap := make(map[string]diff.FileHunkDiff, len(diffs))
+	for _, fhd := range diffs {
+		diffMap[fhd.Path] = fhd
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	fileSet := make(map[string]fileEntry, len(parentBlobs))
+	for path, blobID := range parentBlobs {
+		fileSet[path] = fileEntry{path: path, blobID: blobID, mode: parentModes[path]}
+	}
+
+	for path, selected := range perFile {
+		fhd, ok := diffMap[path]
+		if !ok {
+			continue
+		}
+
+		anySelected := false
+		for _, s := range selected {
+			if s {
+				anySelected = true
+				break
+			}
+		}
+
+		switch {
+		case !anySelected:
+			if fhd.Status == 'A' {
+				delete(fileSet, path)
+			}
+
+		case fhd.Status == 'D' && anySelected:
+			delete(fileSet, path)
+
+		case fhd.Status == 'A' && anySelected:
+			content := fhd.NewContent
+			blobID, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: []byte(content)})
+			if err != nil {
+				r.Store.Rollback(tx)
+				return nil, object.ZeroID, err
+			}
+			mode := workingCopyMode(r.Root, path)
+			fileSet[path] = fileEntry{path: path, blobID: blobID, mode: mode}
+
+		default:
+			partialContent := diff.ApplySelectedHunks(fhd, selected)
+			blobID, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: []byte(partialContent)})
+			if err != nil {
+				r.Store.Rollback(tx)
+				return nil, object.ZeroID, err
+			}
+			existingMode := parentModes[path]
+			if existingMode == 0 {
+				existingMode = workingCopyMode(r.Root, path)
+			}
+			fileSet[path] = fileEntry{path: path, blobID: blobID, mode: existingMode}
+		}
+	}
+
+	entries := make([]fileEntry, 0, len(fileSet))
+	for _, e := range fileSet {
+		entries = append(entries, e)
+	}
+	sort.Slice(entries, func(i, j int) bool { return entries[i].path < entries[j].path })
+
+	treeID, err := buildTree(r, tx, entries)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+	c := &object.Commit{
+		TreeID:    treeID,
+		Parents:   head.Parents,
+		ChangeID:  head.ChangeID,
+		Author:    head.Author,
+		Committer: sig,
+		Message:   message,
+		Phase:     head.Phase,
+	}
+	if head.Author.Timestamp.IsZero() {
+		c.Author = sig
+	}
+
+	if err := wc.maybeSign(c); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	commitID, err := repo.WriteCommitTx(r.Store, tx, c)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.SetChangeCommit(tx, c.ChangeID, commitID); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	tx2, err := r.Store.Begin()
+	if err != nil {
+		return c, commitID, err
+	}
+
+	newChangeID, err := r.Store.AllocChangeID(tx2)
+	if err != nil {
+		r.Store.Rollback(tx2)
+		return c, commitID, err
+	}
+
+	newDraft := &object.Commit{
+		TreeID:    treeID,
+		Parents:   [][32]byte{commitID},
+		ChangeID:  newChangeID,
+		Author:    sig,
+		Committer: sig,
+		Message:   "",
+		Phase:     object.PhaseDraft,
+	}
+	newDraftID, err := repo.WriteCommitTx(r.Store, tx2, newDraft)
+	if err != nil {
+		r.Store.Rollback(tx2)
+		return c, commitID, err
+	}
+	if err := r.Store.SetChangeCommit(tx2, newChangeID, newDraftID); err != nil {
+		r.Store.Rollback(tx2)
+		return c, commitID, err
+	}
+
+	after := buildRefState(commitID, object.FormatChangeID(newChangeID))
+	op := store.Operation{
+		Kind:      "snap",
+		Timestamp: now.Unix(),
+		Before:    before,
+		After:     after,
+		Metadata:  "'" + firstLine(message) + "'",
+	}
+	if _, err := r.Store.InsertOperation(tx2, op); err != nil {
+		r.Store.Rollback(tx2)
+		return c, commitID, err
+	}
+	if err := r.Store.Commit(tx2); err != nil {
+		return c, commitID, err
+	}
+
+	if err := r.WriteHead(object.FormatChangeID(newChangeID)); err != nil {
+		return c, commitID, err
+	}
+
+	return c, commitID, nil
+}
+
+func (wc *WC) SnapFirstOfSplit(
+	message string,
+	diffs []diff.FileHunkDiff,
+	perFile map[string][]bool,
+) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	head, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	var parentTreeID [32]byte
+	if len(head.Parents) > 0 {
+		parent, err := r.ReadCommit(head.Parents[0])
+		if err != nil {
+			return nil, object.ZeroID, err
+		}
+		parentTreeID = parent.TreeID
+	}
+
+	parentBlobs := make(map[string][32]byte)
+	parentModes := make(map[string]object.EntryMode)
+	if err := flattenTree(r, parentTreeID, "", parentBlobs); err != nil {
+		return nil, object.ZeroID, err
+	}
+	if err := flattenTreeModes(r, parentTreeID, "", parentModes); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	diffMap := make(map[string]diff.FileHunkDiff, len(diffs))
+	for _, fhd := range diffs {
+		diffMap[fhd.Path] = fhd
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	fileSet := make(map[string]fileEntry, len(parentBlobs))
+	for path, blobID := range parentBlobs {
+		fileSet[path] = fileEntry{path: path, blobID: blobID, mode: parentModes[path]}
+	}
+
+	for path, selected := range perFile {
+		fhd, ok := diffMap[path]
+		if !ok {
+			continue
+		}
+		anySelected := false
+		for _, s := range selected {
+			if s {
+				anySelected = true
+				break
+			}
+		}
+		switch {
+		case !anySelected:
+			if fhd.Status == 'A' {
+				delete(fileSet, path)
+			}
+		case fhd.Status == 'D' && anySelected:
+			delete(fileSet, path)
+		case fhd.Status == 'A' && anySelected:
+			blobID, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: []byte(fhd.NewContent)})
+			if err != nil {
+				r.Store.Rollback(tx)
+				return nil, object.ZeroID, err
+			}
+			fileSet[path] = fileEntry{path: path, blobID: blobID, mode: workingCopyMode(r.Root, path)}
+		default:
+			partialContent := diff.ApplySelectedHunks(fhd, selected)
+			blobID, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: []byte(partialContent)})
+			if err != nil {
+				r.Store.Rollback(tx)
+				return nil, object.ZeroID, err
+			}
+			mode := parentModes[path]
+			if mode == 0 {
+				mode = workingCopyMode(r.Root, path)
+			}
+			fileSet[path] = fileEntry{path: path, blobID: blobID, mode: mode}
+		}
+	}
+
+	entries := make([]fileEntry, 0, len(fileSet))
+	for _, e := range fileSet {
+		entries = append(entries, e)
+	}
+	sort.Slice(entries, func(i, j int) bool { return entries[i].path < entries[j].path })
+
+	treeID, err := buildTree(r, tx, entries)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	newChangeID, err := r.Store.AllocChangeID(tx)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+	c := &object.Commit{
+		TreeID:    treeID,
+		Parents:   head.Parents,
+		ChangeID:  newChangeID,
+		Author:    sig,
+		Committer: sig,
+		Message:   message,
+		Phase:     head.Phase,
+	}
+
+	if err := wc.maybeSign(c); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	commitID, err := repo.WriteCommitTx(r.Store, tx, c)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.SetChangeCommit(tx, newChangeID, commitID); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+	return c, commitID, nil
+}
+
+func (wc *WC) SnapRemaining(msg string, parentID [32]byte) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	wcPaths, err := wc.trackedPaths()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	if err := r.Store.ClearWCache(tx); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, fmt.Errorf("clear wcache: %w", err)
+	}
+
+	var entries []fileEntry
+	for _, rel := range wcPaths {
+		abs := filepath.Join(r.Root, rel)
+		info, err := os.Lstat(abs)
+		if err != nil {
+			continue
+		}
+		data, err := readFileContent(abs, info)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return nil, object.ZeroID, err
+		}
+		blobID, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: data})
+		if err != nil {
+			r.Store.Rollback(tx)
+			return nil, object.ZeroID, err
+		}
+		if st, ok := info.Sys().(*syscall.Stat_t); ok {
+			_ = r.Store.SetWCacheEntry(tx, store.WCacheEntry{
+				Path:    rel,
+				Inode:   st.Ino,
+				MtimeNs: info.ModTime().UnixNano(),
+				Size:    info.Size(),
+				BlobID:  blobID,
+			})
+		}
+		entries = append(entries, fileEntry{path: rel, blobID: blobID, mode: fileMode(info)})
+	}
+
+	treeID, err := buildTree(r, tx, entries)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	newChangeID, err := r.Store.AllocChangeID(tx)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+	c := &object.Commit{
+		TreeID:    treeID,
+		Parents:   [][32]byte{parentID},
+		ChangeID:  newChangeID,
+		Author:    sig,
+		Committer: sig,
+		Message:   msg,
+		Phase:     object.PhaseDraft,
+	}
+
+	commitID, err := repo.WriteCommitTx(r.Store, tx, c)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.SetChangeCommit(tx, newChangeID, commitID); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	if err := r.WriteHead(object.FormatChangeID(newChangeID)); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	return c, commitID, nil
+}
+
+func workingCopyMode(root, path string) object.EntryMode {
+	abs := filepath.Join(root, filepath.FromSlash(path))
+	info, err := os.Lstat(abs)
+	if err != nil {
+		return object.ModeFile
+	}
+	return fileMode(info)
+}

internal/wc/ignore.go [A]
--- /dev/null
+++ b/internal/wc/ignore.go
@@ -1,0 +1,124 @@
+package wc
+
+import (
+	"bufio"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+type Ignore struct {
+	rules []ignoreRule
+}
+
+type ignoreRule struct {
+	pattern string
+	negate  bool
+	dirOnly bool
+}
+
+func loadIgnore(root string) (*Ignore, error) {
+	ig := &Ignore{}
+
+	if home, err := os.UserHomeDir(); err == nil {
+		_ = ig.addFile(filepath.Join(home, ".config", "git", "ignore"))
+		_ = ig.addFile(filepath.Join(home, ".gitignore_global"))
+		_ = ig.addFile(filepath.Join(home, ".config", "arche", "ignore"))
+	}
+
+	_ = ig.addFile(filepath.Join(root, ".gitignore"))
+	_ = ig.addFile(filepath.Join(root, ".archeignore"))
+
+	return ig, nil
+}
+
+func (ig *Ignore) addFile(path string) error {
+	f, err := os.Open(path)
+	if os.IsNotExist(err) {
+		return nil
+	}
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		line := sc.Text()
+		if idx := strings.Index(line, " #"); idx >= 0 {
+			line = line[:idx]
+		}
+		line = strings.TrimRight(line, " \t")
+		if line == "" || strings.HasPrefix(line, "#") {
+			continue
+		}
+
+		rule := ignoreRule{}
+		if strings.HasPrefix(line, "!") {
+			rule.negate = true
+			line = line[1:]
+		}
+		if strings.HasSuffix(line, "/") {
+			rule.dirOnly = true
+			line = strings.TrimSuffix(line, "/")
+		}
+		rule.pattern = line
+		ig.rules = append(ig.rules, rule)
+	}
+	return sc.Err()
+}
+
+func (ig *Ignore) Match(rel string) bool {
+	matched := false
+	for _, r := range ig.rules {
+		if r.dirOnly {
+			continue
+		}
+		if matchPattern(r.pattern, rel) {
+			if r.negate {
+				matched = false
+			} else {
+				matched = true
+			}
+		}
+	}
+	return matched
+}
+
+func (ig *Ignore) MatchDir(rel string) bool {
+	matched := false
+	for _, r := range ig.rules {
+		if matchPattern(r.pattern, rel) {
+			if r.negate {
+				matched = false
+			} else {
+				matched = true
+			}
+		}
+	}
+	return matched
+}
+
+func matchPattern(pattern, rel string) bool {
+	base := filepath.Base(rel)
+
+	if ok, _ := filepath.Match(pattern, base); ok {
+		return true
+	}
+
+	if ok, _ := filepath.Match(pattern, rel); ok {
+		return true
+	}
+
+	if strings.Contains(pattern, "**") {
+		repl := strings.ReplaceAll(pattern, "**", "*")
+		if ok, _ := filepath.Match(repl, rel); ok {
+			return true
+		}
+	}
+
+	if strings.HasPrefix(rel, pattern+"/") {
+		return true
+	}
+	return false
+}

internal/wc/wc.go [A]
--- /dev/null
+++ b/internal/wc/wc.go
@@ -1,0 +1,1016 @@
+package wc
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"syscall"
+	"time"
+
+	"arche/internal/merge"
+	"arche/internal/object"
+	"arche/internal/repo"
+	"arche/internal/store"
+	"arche/internal/watcher"
+)
+
+func dirtySet(r *repo.Repo) (map[string]bool, error) {
+	if !watcher.IsActive(r.ArcheDir()) {
+		return nil, nil
+	}
+	entries, err := r.Store.ListDirtyWCacheEntries()
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]bool, len(entries))
+	for _, e := range entries {
+		m[e.Path] = true
+	}
+	return m, nil
+}
+
+type FileStatus struct {
+	Path   string
+	Status rune
+}
+
+type WC struct {
+	Repo    *repo.Repo
+	SignKey string
+}
+
+func New(r *repo.Repo) *WC { return &WC{Repo: r} }
+
+func (wc *WC) maybeSign(c *object.Commit) error {
+	if wc.SignKey == "" {
+		return nil
+	}
+	body := object.CommitBodyForSigning(c)
+	sig, _, err := object.SignCommitBody(body, wc.SignKey)
+	if err != nil {
+		return fmt.Errorf("commit signing: %w", err)
+	}
+	c.CommitSig = sig
+	return nil
+}
+
+func (wc *WC) snapshotIntoTx(tx *store.Tx, headCommit *object.Commit, paths []string, cacheMap map[string]store.WCacheEntry, dirty map[string]bool, message string, now time.Time) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+
+	var entries []fileEntry
+
+	if err := r.Store.ClearWCache(tx); err != nil {
+		return nil, object.ZeroID, fmt.Errorf("clear wcache: %w", err)
+	}
+
+	for _, rel := range paths {
+		if dirty != nil && !dirty[rel] {
+			if cached, ok := cacheMap[rel]; ok {
+				entries = append(entries, fileEntry{
+					path:   rel,
+					blobID: cached.BlobID,
+					mode:   object.EntryMode(cached.Mode),
+				})
+				if err := r.Store.SetWCacheEntry(tx, cached); err != nil {
+					return nil, object.ZeroID, fmt.Errorf("set wcache: %w", err)
+				}
+				continue
+			}
+		}
+
+		abs := filepath.Join(r.Root, rel)
+		info, err := os.Lstat(abs)
+		if err != nil {
+			continue
+		}
+
+		var blobID [32]byte
+		mode := fileMode(info)
+
+		if cached, ok := cacheMap[rel]; ok {
+			st := info.Sys().(*syscall.Stat_t)
+			inode := st.Ino
+			mtime := info.ModTime().UnixNano()
+			size := info.Size()
+			if cached.Inode == inode && cached.MtimeNs == mtime && cached.Size == size {
+				blobID = cached.BlobID
+			}
+		}
+
+		if blobID == object.ZeroID {
+			content, err := readFileContent(abs, info)
+			if err != nil {
+				return nil, object.ZeroID, err
+			}
+			id, err := repo.WriteBlobTx(r.Store, tx, &object.Blob{Content: content})
+			if err != nil {
+				return nil, object.ZeroID, err
+			}
+			blobID = id
+		}
+
+		st := info.Sys().(*syscall.Stat_t)
+		if err := r.Store.SetWCacheEntry(tx, store.WCacheEntry{
+			Path:    rel,
+			Inode:   st.Ino,
+			MtimeNs: info.ModTime().UnixNano(),
+			Size:    info.Size(),
+			BlobID:  blobID,
+			Mode:    uint8(mode),
+		}); err != nil {
+			return nil, object.ZeroID, fmt.Errorf("set wcache: %w", err)
+		}
+
+		entries = append(entries, fileEntry{path: rel, blobID: blobID, mode: mode})
+	}
+
+	tree, err := buildTree(r, tx, entries)
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	sig := object.Signature{
+		Name:      r.Cfg.User.Name,
+		Email:     r.Cfg.User.Email,
+		Timestamp: now,
+	}
+
+	c := &object.Commit{
+		TreeID:    tree,
+		Parents:   headCommit.Parents,
+		ChangeID:  headCommit.ChangeID,
+		Author:    headCommit.Author,
+		Committer: sig,
+		Message:   message,
+		Phase:     headCommit.Phase,
+	}
+	if headCommit.Author.Timestamp.IsZero() {
+		c.Author = sig
+	}
+
+	if err := wc.maybeSign(c); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	commitID, err := repo.WriteCommitTx(r.Store, tx, c)
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.SetChangeCommit(tx, c.ChangeID, commitID); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	return c, commitID, nil
+}
+
+func (wc *WC) snapshotInput() (paths []string, cacheMap map[string]store.WCacheEntry, dirty map[string]bool, err error) {
+	r := wc.Repo
+
+	cacheEntries, err := r.Store.ListWCacheEntries()
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	cacheMap = make(map[string]store.WCacheEntry, len(cacheEntries))
+	for _, e := range cacheEntries {
+		cacheMap[e.Path] = e
+	}
+
+	dirty, _ = dirtySet(r)
+
+	if dirty != nil {
+		seen := make(map[string]bool, len(cacheMap)+len(dirty))
+		for p := range cacheMap {
+			seen[p] = true
+			paths = append(paths, p)
+		}
+		for p := range dirty {
+			if !seen[p] {
+				paths = append(paths, p)
+			}
+		}
+	} else {
+		paths, err = wc.trackedPaths()
+		if err != nil {
+			return nil, nil, nil, err
+		}
+	}
+
+	return paths, cacheMap, dirty, nil
+}
+
+func (wc *WC) Snapshot(message string) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	head, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	paths, cacheMap, dirty, err := wc.snapshotInput()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	c, commitID, err := wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+	return c, commitID, nil
+}
+
+func (wc *WC) Snap(message string) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	before, err := r.CaptureRefState()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	statusBefore, err := wc.Status()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+	diffPaths := make(map[string]bool, len(statusBefore))
+	for _, fsEntry := range statusBefore {
+		diffPaths[fsEntry.Path] = true
+	}
+
+	useRestrictedPaths := len(r.Cfg.Hooks.PreSnap) > 0
+	if useRestrictedPaths {
+		if err := RunHooksSequential(r.Root, "pre-snap", r.Cfg.Hooks.PreSnap); err != nil {
+			return nil, object.ZeroID, fmt.Errorf("pre-snap hook failed: %w", err)
+		}
+	}
+
+	head, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	type snapshotFn func(tx *store.Tx) (*object.Commit, [32]byte, error)
+	var doSnapshot snapshotFn
+
+	if useRestrictedPaths {
+		headBlobs := make(map[string][32]byte)
+		headModes := make(map[string]object.EntryMode)
+		if err := flattenTree(r, head.TreeID, "", headBlobs); err != nil {
+			return nil, object.ZeroID, err
+		}
+		if err := flattenTreeModes(r, head.TreeID, "", headModes); err != nil {
+			return nil, object.ZeroID, err
+		}
+		doSnapshot = func(tx *store.Tx) (*object.Commit, [32]byte, error) {
+			return wc.snapshotRestrictedPathsIntoTx(tx, head, headBlobs, headModes, diffPaths, message, now)
+		}
+	} else {
+		paths, cacheMap, dirty, err := wc.snapshotInput()
+		if err != nil {
+			return nil, object.ZeroID, err
+		}
+		doSnapshot = func(tx *store.Tx) (*object.Commit, [32]byte, error) {
+			return wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
+		}
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	snapped, snappedID, err := doSnapshot(tx)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	newChangeID, err := r.Store.AllocChangeID(tx)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	sig := object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now}
+	newDraft := &object.Commit{
+		TreeID:    snapped.TreeID,
+		Parents:   [][32]byte{snappedID},
+		ChangeID:  newChangeID,
+		Author:    sig,
+		Committer: sig,
+		Message:   "",
+		Phase:     object.PhaseDraft,
+	}
+
+	newDraftID, err := repo.WriteCommitTx(r.Store, tx, newDraft)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	if err := r.Store.SetChangeCommit(tx, newChangeID, newDraftID); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	after := buildRefState(snappedID, object.FormatChangeID(newChangeID))
+	op := store.Operation{
+		Kind:      "snap",
+		Timestamp: now.Unix(),
+		Before:    before,
+		After:     after,
+		Metadata:  "'" + firstLine(snapped.Message) + "'",
+	}
+	if _, err := r.Store.InsertOperation(tx, op); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	if err := r.WriteHead(object.FormatChangeID(newChangeID)); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	if len(r.Cfg.Hooks.PostSnap) > 0 {
+		if err := RunHooksSequential(r.Root, "post-snap", r.Cfg.Hooks.PostSnap); err != nil {
+			fmt.Fprintf(os.Stderr, "arche snap: post-snap hook: %v\n", err)
+		}
+	}
+
+	return snapped, snappedID, nil
+}
+
+func (wc *WC) Status() ([]FileStatus, error) {
+	r := wc.Repo
+	head, _, err := r.HeadCommit()
+	if err != nil {
+		return nil, err
+	}
+
+	headFiles := make(map[string][32]byte)
+	if err := flattenTree(r, head.TreeID, "", headFiles); err != nil {
+		return nil, err
+	}
+
+	wcPaths, err := wc.trackedPaths()
+	if err != nil {
+		return nil, err
+	}
+	wcSet := make(map[string]bool, len(wcPaths))
+	for _, p := range wcPaths {
+		wcSet[p] = true
+	}
+
+	cacheEntries, _ := r.Store.ListWCacheEntries()
+	cacheMap := make(map[string]store.WCacheEntry, len(cacheEntries))
+	for _, e := range cacheEntries {
+		cacheMap[e.Path] = e
+	}
+	dirty, _ := dirtySet(r)
+
+	var out []FileStatus
+
+	for path, headBlobID := range headFiles {
+		if !wcSet[path] {
+			out = append(out, FileStatus{Path: path, Status: 'D'})
+			continue
+		}
+
+		if dirty != nil && !dirty[path] {
+			if cached, ok := cacheMap[path]; ok {
+				if cached.BlobID != headBlobID {
+					out = append(out, FileStatus{Path: path, Status: 'M'})
+				}
+				continue
+			}
+		}
+
+		curBlobID, err := wc.blobIDForPath(path)
+		if err != nil {
+			continue
+		}
+		if curBlobID != headBlobID {
+			out = append(out, FileStatus{Path: path, Status: 'M'})
+		}
+	}
+
+	ignore, _ := loadIgnore(r.Root)
+	for _, path := range wcPaths {
+		if _, inHead := headFiles[path]; !inHead {
+			if ignore.Match(path) {
+				continue
+			}
+			out = append(out, FileStatus{Path: path, Status: 'A'})
+		}
+	}
+
+	sort.Slice(out, func(i, j int) bool { return out[i].Path < out[j].Path })
+	return out, nil
+}
+
+func (wc *WC) materializeDisk(treeID [32]byte) (map[string][32]byte, map[string]object.EntryMode, error) {
+	r := wc.Repo
+
+	wantFiles := make(map[string][32]byte)
+	wantMode := make(map[string]object.EntryMode)
+	if err := flattenTree(r, treeID, "", wantFiles); err != nil {
+		return nil, nil, err
+	}
+
+	if err := flattenTreeModes(r, treeID, "", wantMode); err != nil {
+		return nil, nil, err
+	}
+
+	ignore, _ := loadIgnore(r.Root)
+	err := filepath.WalkDir(r.Root, func(path string, d fs.DirEntry, err error) error {
+		if err != nil {
+			return nil
+		}
+		rel, _ := filepath.Rel(r.Root, path)
+		if rel == "." {
+			return nil
+		}
+		if d.IsDir() {
+			if rel == archeDirName || strings.HasPrefix(rel, archeDirName+string(os.PathSeparator)) {
+				return filepath.SkipDir
+			}
+			return nil
+		}
+		if ignore.Match(rel) {
+			return nil
+		}
+		if _, ok := wantFiles[rel]; !ok {
+			return os.Remove(path)
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var conflictPaths []string
+	for relPath, blobID := range wantFiles {
+		abs := filepath.Join(r.Root, relPath)
+		if err := os.MkdirAll(filepath.Dir(abs), 0o755); err != nil {
+			return nil, nil, err
+		}
+		content, err := r.ReadBlob(blobID)
+		if err != nil {
+			if conf, cErr := r.ReadConflict(blobID); cErr == nil {
+				content = renderConflictMarkers(r, conf)
+				conflictPaths = append(conflictPaths, relPath)
+				err = nil
+			}
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		perm := fs.FileMode(0o644)
+		if wantMode[relPath] == object.ModeExec {
+			perm = 0o755
+		}
+		if err := os.WriteFile(abs, content, perm); err != nil {
+			return nil, nil, err
+		}
+	}
+
+	for _, p := range conflictPaths {
+		delete(wantFiles, p)
+	}
+
+	return wantFiles, wantMode, nil
+}
+
+func renderConflictMarkers(r *repo.Repo, conf *object.Conflict) []byte {
+	readStr := func(id [32]byte) string {
+		if id == object.ZeroID {
+			return ""
+		}
+		b, _ := r.ReadBlob(id)
+		return string(b)
+	}
+	nl := func(s string) string {
+		if len(s) > 0 && s[len(s)-1] != '\n' {
+			return s + "\n"
+		}
+		return s
+	}
+	if conf.Ours.BlobID == object.ZeroID {
+		return []byte(fmt.Sprintf("<<<<<<< ours\n(deleted)\n=======\n%s>>>>>>> theirs\n", nl(readStr(conf.Theirs.BlobID))))
+	}
+	if conf.Theirs.BlobID == object.ZeroID {
+		return []byte(fmt.Sprintf("<<<<<<< ours\n%s=======\n(deleted)\n>>>>>>> theirs\n", nl(readStr(conf.Ours.BlobID))))
+	}
+	return []byte(fmt.Sprintf("<<<<<<< ours\n%s=======\n%s>>>>>>> theirs\n",
+		nl(readStr(conf.Ours.BlobID)),
+		nl(readStr(conf.Theirs.BlobID))))
+}
+
+func (wc *WC) populateWCacheInTx(tx *store.Tx, wantFiles map[string][32]byte) error {
+	r := wc.Repo
+	if err := r.Store.ClearWCache(tx); err != nil {
+		return err
+	}
+	for relPath, blobID := range wantFiles {
+		abs := filepath.Join(r.Root, relPath)
+		info, err := os.Lstat(abs)
+		if err != nil {
+			continue
+		}
+		st, ok := info.Sys().(*syscall.Stat_t)
+		if !ok {
+			continue
+		}
+		_ = r.Store.SetWCacheEntry(tx, store.WCacheEntry{
+			Path:    relPath,
+			Inode:   st.Ino,
+			MtimeNs: info.ModTime().UnixNano(),
+			Size:    info.Size(),
+			BlobID:  blobID,
+			Mode:    uint8(fileMode(info)),
+		})
+	}
+	return nil
+}
+
+func (wc *WC) MaterializeQuiet(treeID [32]byte) error {
+	r := wc.Repo
+
+	wantFiles, _, err := wc.materializeDisk(treeID)
+	if err != nil {
+		return err
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+	if err := wc.populateWCacheInTx(tx, wantFiles); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+	return r.Store.Commit(tx)
+}
+
+func (wc *WC) Materialize(treeID [32]byte, newChangeID string) error {
+	r := wc.Repo
+
+	before, _ := r.CaptureRefState()
+	now := time.Now()
+
+	wantFiles, _, err := wc.materializeDisk(treeID)
+	if err != nil {
+		return err
+	}
+
+	bare := object.StripChangeIDPrefix(newChangeID)
+	commitID, _ := r.Store.GetChangeCommit(bare)
+	after := buildRefState(commitID, newChangeID)
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return err
+	}
+	if err := wc.populateWCacheInTx(tx, wantFiles); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	op := store.Operation{
+		Kind:      "co",
+		Timestamp: now.Unix(),
+		Before:    before,
+		After:     after,
+		Metadata:  "checked out " + newChangeID,
+	}
+	if _, err := r.Store.InsertOperation(tx, op); err != nil {
+		r.Store.Rollback(tx)
+		return err
+	}
+
+	return r.Store.Commit(tx)
+}
+
+const archeDirName = ".arche"
+
+func (wc *WC) trackedPaths() ([]string, error) {
+	r := wc.Repo
+	ignore, _ := loadIgnore(r.Root)
+
+	var paths []string
+	err := filepath.WalkDir(r.Root, func(path string, d fs.DirEntry, err error) error {
+		if err != nil {
+			return nil
+		}
+		rel, _ := filepath.Rel(r.Root, path)
+		if rel == "." {
+			return nil
+		}
+		if d.IsDir() {
+			if rel == archeDirName || strings.HasPrefix(rel, archeDirName+string(os.PathSeparator)) {
+				return filepath.SkipDir
+			}
+			if ignore.MatchDir(rel) {
+				return filepath.SkipDir
+			}
+			return nil
+		}
+		if ignore.Match(rel) {
+			return nil
+		}
+		paths = append(paths, filepath.ToSlash(rel))
+		return nil
+	})
+	return paths, err
+}
+
+func (wc *WC) blobIDForPath(rel string) ([32]byte, error) {
+	r := wc.Repo
+	abs := filepath.Join(r.Root, rel)
+	info, err := os.Lstat(abs)
+	if err != nil {
+		return object.ZeroID, err
+	}
+	st := info.Sys().(*syscall.Stat_t)
+
+	if cached, _ := r.Store.GetWCacheEntry(rel); cached != nil {
+		if cached.Inode == st.Ino &&
+			cached.MtimeNs == info.ModTime().UnixNano() &&
+			cached.Size == info.Size() {
+			return cached.BlobID, nil
+		}
+	}
+
+	content, err := readFileContent(abs, info)
+	if err != nil {
+		return object.ZeroID, err
+	}
+	b := &object.Blob{Content: content}
+	return object.HashBlob(b), nil
+}
+
+func flattenTree(r *repo.Repo, treeID [32]byte, prefix string, out map[string][32]byte) error {
+	if treeID == object.ZeroID {
+		return nil
+	}
+	t, err := r.ReadTree(treeID)
+	if err != nil {
+		return err
+	}
+	for _, e := range t.Entries {
+		rel := join(prefix, e.Name)
+		switch e.Mode {
+		case object.ModeDir:
+			if err := flattenTree(r, e.ObjectID, rel, out); err != nil {
+				return err
+			}
+		default:
+			out[rel] = e.ObjectID
+		}
+	}
+	return nil
+}
+
+func flattenTreeModes(r *repo.Repo, treeID [32]byte, prefix string, out map[string]object.EntryMode) error {
+	if treeID == object.ZeroID {
+		return nil
+	}
+	t, err := r.ReadTree(treeID)
+	if err != nil {
+		return err
+	}
+	for _, e := range t.Entries {
+		rel := join(prefix, e.Name)
+		switch e.Mode {
+		case object.ModeDir:
+			if err := flattenTreeModes(r, e.ObjectID, rel, out); err != nil {
+				return err
+			}
+		default:
+			out[rel] = e.Mode
+		}
+	}
+	return nil
+}
+
+type fileEntry struct {
+	path   string
+	blobID [32]byte
+	mode   object.EntryMode
+}
+
+func buildTree(r *repo.Repo, tx *store.Tx, entries []fileEntry) ([32]byte, error) {
+	type node struct {
+		isFile   bool
+		blobID   [32]byte
+		mode     object.EntryMode
+		children map[string]*node
+	}
+	root := &node{children: make(map[string]*node)}
+
+	for _, e := range entries {
+		parts := strings.Split(e.path, "/")
+		cur := root
+		for i, part := range parts {
+			if i == len(parts)-1 {
+				cur.children[part] = &node{isFile: true, blobID: e.blobID, mode: e.mode}
+			} else {
+				if _, ok := cur.children[part]; !ok {
+					cur.children[part] = &node{children: make(map[string]*node)}
+				}
+				cur = cur.children[part]
+			}
+		}
+	}
+
+	var writeNode func(n *node) ([32]byte, error)
+	writeNode = func(n *node) ([32]byte, error) {
+		var treeEntries []object.TreeEntry
+		for name, child := range n.children {
+			if child.isFile {
+				treeEntries = append(treeEntries, object.TreeEntry{
+					Name:     name,
+					Mode:     child.mode,
+					ObjectID: child.blobID,
+				})
+			} else {
+				subID, err := writeNode(child)
+				if err != nil {
+					return object.ZeroID, err
+				}
+				treeEntries = append(treeEntries, object.TreeEntry{
+					Name:     name,
+					Mode:     object.ModeDir,
+					ObjectID: subID,
+				})
+			}
+		}
+		sort.Slice(treeEntries, func(i, j int) bool { return treeEntries[i].Name < treeEntries[j].Name })
+		t := &object.Tree{Entries: treeEntries}
+		id, err := repo.WriteTreeTx(r.Store, tx, t)
+		return id, err
+	}
+
+	return writeNode(root)
+}
+
+func fileMode(info os.FileInfo) object.EntryMode {
+	if info.Mode()&0o111 != 0 {
+		return object.ModeExec
+	}
+	if info.Mode()&os.ModeSymlink != 0 {
+		return object.ModeSymlink
+	}
+	return object.ModeFile
+}
+
+func readFileContent(abs string, info os.FileInfo) ([]byte, error) {
+	if info.Mode()&os.ModeSymlink != 0 {
+		target, err := os.Readlink(abs)
+		if err != nil {
+			return nil, err
+		}
+		return []byte(target), nil
+	}
+	return os.ReadFile(abs)
+}
+
+func join(prefix, name string) string {
+	if prefix == "" {
+		return name
+	}
+	return prefix + "/" + name
+}
+
+func buildRefState(commitID [32]byte, changeID string) string {
+	m := map[string]string{
+		"head": changeID,
+		"tip":  fmt.Sprintf("%x", commitID),
+	}
+	b, _ := json.Marshal(m)
+	return string(b)
+}
+
+func firstLine(s string) string {
+	if i := strings.IndexByte(s, '\n'); i >= 0 {
+		return s[:i]
+	}
+	return s
+}
+
+func (wc *WC) Amend(message string) (*object.Commit, [32]byte, error) {
+	r := wc.Repo
+	now := time.Now()
+
+	head, oldHeadID, err := r.HeadCommit()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+	if head.Phase == object.PhasePublic {
+		return nil, object.ZeroID, fmt.Errorf("cannot amend a public commit; use --force-rewrite if you are sure")
+	}
+
+	before, err := r.CaptureRefState()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	if message == "" {
+		message = head.Message
+	}
+
+	paths, cacheMap, dirty, err := wc.snapshotInput()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	tx, err := r.Store.Begin()
+	if err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	amended, amendedID, err := wc.snapshotIntoTx(tx, head, paths, cacheMap, dirty, message, now)
+	if err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	if oldHeadID != amendedID {
+		obs := &object.ObsoleteMarker{
+			Predecessor: oldHeadID,
+			Successors:  [][32]byte{amendedID},
+			Reason:      "amend",
+			Timestamp:   now.Unix(),
+		}
+		if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
+			r.Store.Rollback(tx)
+			return nil, object.ZeroID, err
+		}
+	}
+
+	after := buildRefState(amendedID, object.FormatChangeID(amended.ChangeID))
+	op := store.Operation{
+		Kind:      "amend",
+		Timestamp: now.Unix(),
+		Before:    before,
+		After:     after,
+		Metadata:  "'" + firstLine(amended.Message) + "'",
+	}
+	if _, err := r.Store.InsertOperation(tx, op); err != nil {
+		r.Store.Rollback(tx)
+		return nil, object.ZeroID, err
+	}
+
+	if err := r.Store.Commit(tx); err != nil {
+		return nil, object.ZeroID, err
+	}
+
+	if oldHeadID != amendedID {
+		if err := wc.autoRebaseDownstream(oldHeadID, amendedID, head.ChangeID, now); err != nil {
+			fmt.Fprintf(os.Stderr, "arche: warning: downstream rebase failed: %v\n", err)
+		}
+	}
+
+	return amended, amendedID, nil
+}
+
+func (wc *WC) autoRebaseDownstream(oldParentID, newParentID [32]byte, headChangeID string, now time.Time) error {
+	r := wc.Repo
+
+	allChanges, err := r.Store.ListChanges()
+	if err != nil {
+		return err
+	}
+
+	type draftEntry struct {
+		id       [32]byte
+		changeID string
+		commit   *object.Commit
+	}
+
+	children := make(map[[32]byte][]draftEntry)
+	for _, ch := range allChanges {
+		if ch.CommitID == object.ZeroID {
+			continue
+		}
+		c, err := r.ReadCommit(ch.CommitID)
+		if err != nil || c == nil {
+			continue
+		}
+		if c.Phase != object.PhaseDraft {
+			continue
+		}
+		if c.ChangeID == headChangeID {
+			continue
+		}
+		if len(c.Parents) == 0 {
+			continue
+		}
+		d := draftEntry{id: ch.CommitID, changeID: ch.Name, commit: c}
+		children[c.Parents[0]] = append(children[c.Parents[0]], d)
+	}
+
+	type rebaseTask struct {
+		entry     draftEntry
+		newParent [32]byte
+	}
+	var tasks []rebaseTask
+	queue := []struct {
+		oldID [32]byte
+		newID [32]byte
+	}{{oldParentID, newParentID}}
+
+	for len(queue) > 0 {
+		cur := queue[0]
+		queue = queue[1:]
+		for _, child := range children[cur.oldID] {
+			tasks = append(tasks, rebaseTask{entry: child, newParent: cur.newID})
+			queue = append(queue, struct{ oldID, newID [32]byte }{child.id, child.id})
+		}
+	}
+
+	remapped := map[[32]byte][32]byte{oldParentID: newParentID}
+
+	for _, task := range tasks {
+		oldFirst := task.entry.commit.Parents[0]
+		newParent, ok := remapped[oldFirst]
+		if !ok {
+			newParent = oldFirst
+		}
+
+		var baseTreeID [32]byte
+		if pc, err2 := r.ReadCommit(oldFirst); err2 == nil {
+			baseTreeID = pc.TreeID
+		}
+		newParentCommit, err := r.ReadCommit(newParent)
+		if err != nil {
+			return fmt.Errorf("read new parent for %s: %w", object.FormatChangeID(task.entry.changeID), err)
+		}
+
+		result, err := merge.Trees(r, baseTreeID, task.entry.commit.TreeID, newParentCommit.TreeID)
+		if err != nil {
+			return fmt.Errorf("merge for %s: %w", object.FormatChangeID(task.entry.changeID), err)
+		}
+
+		newCommit := &object.Commit{
+			TreeID:    result.TreeID,
+			Parents:   [][32]byte{newParent},
+			ChangeID:  task.entry.changeID,
+			Author:    task.entry.commit.Author,
+			Committer: object.Signature{Name: r.Cfg.User.Name, Email: r.Cfg.User.Email, Timestamp: now},
+			Message:   task.entry.commit.Message,
+			Phase:     task.entry.commit.Phase,
+		}
+
+		tx, err := r.Store.Begin()
+		if err != nil {
+			return err
+		}
+		newCommitID, err := repo.WriteCommitTx(r.Store, tx, newCommit)
+		if err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.SetChangeCommit(tx, task.entry.changeID, newCommitID); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		obs := &object.ObsoleteMarker{
+			Predecessor: task.entry.id,
+			Successors:  [][32]byte{newCommitID},
+			Reason:      "amend",
+			Timestamp:   now.Unix(),
+		}
+		if _, err := repo.WriteObsoleteTx(r.Store, tx, obs); err != nil {
+			r.Store.Rollback(tx)
+			return err
+		}
+		if err := r.Store.Commit(tx); err != nil {
+			return err
+		}
+
+		remapped[task.entry.id] = newCommitID
+		conflictNote := ""
+		if len(result.Conflicts) > 0 {
+			conflictNote = fmt.Sprintf(" (%d conflict(s))", len(result.Conflicts))
+		}
+		fmt.Printf("  auto-rebased %s%s\n", object.FormatChangeID(task.entry.changeID), conflictNote)
+	}
+	return nil
+}

internal/wc/wc_test.go [A]
--- /dev/null
+++ b/internal/wc/wc_test.go
@@ -1,0 +1,206 @@
+package wc_test
+
+import (
+	"os"
+	"path/filepath"
+	"testing"
+
+	"arche/internal/repo"
+	"arche/internal/wc"
+
+	_ "github.com/mattn/go-sqlite3"
+)
+
+func TestMaterialize_RestoresFiles(t *testing.T) {
+	r := initRepo(t)
+	w := wc.New(r)
+
+	writeFile(t, r, "keep.txt", "original")
+	writeFile(t, r, "also.txt", "also original")
+
+	snapped, _, err := w.Snap("baseline")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+
+	baselineTree := snapped.TreeID
+	headCID, err := r.HeadChangeID()
+	if err != nil {
+		t.Fatalf("HeadChangeID: %v", err)
+	}
+
+	writeFile(t, r, "keep.txt", "MODIFIED")
+	writeFile(t, r, "extra.txt", "should vanish")
+
+	if err := w.Materialize(baselineTree, headCID); err != nil {
+		t.Fatalf("Materialize: %v", err)
+	}
+
+	got, exists := readFile(t, r, "keep.txt")
+	if !exists || got != "original" {
+		t.Errorf("keep.txt: want %q got (%q, exists=%v)", "original", got, exists)
+	}
+
+	got2, exists2 := readFile(t, r, "also.txt")
+	if !exists2 || got2 != "also original" {
+		t.Errorf("also.txt: want %q got (%q, exists=%v)", "also original", got2, exists2)
+	}
+
+	_, extraExists := readFile(t, r, "extra.txt")
+	if extraExists {
+		t.Error("extra.txt should have been removed by Materialize")
+	}
+}
+
+func TestMaterialize_AddsNewFiles(t *testing.T) {
+	r := initRepo(t)
+	w := wc.New(r)
+
+	writeFile(t, r, "new.txt", "new content")
+	snapped, _, err := w.Snap("add new.txt")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+
+	targetTree := snapped.TreeID
+	headCID, _ := r.HeadChangeID()
+
+	if err := os.Remove(filepath.Join(r.Root, "new.txt")); err != nil {
+		t.Fatalf("Remove: %v", err)
+	}
+
+	_, existsBefore := readFile(t, r, "new.txt")
+	if existsBefore {
+		t.Fatal("pre-condition: new.txt should not exist")
+	}
+
+	if err := w.Materialize(targetTree, headCID); err != nil {
+		t.Fatalf("Materialize: %v", err)
+	}
+
+	got, exists := readFile(t, r, "new.txt")
+	if !exists || got != "new content" {
+		t.Errorf("new.txt: want %q got (%q, exists=%v)", "new content", got, exists)
+	}
+}
+
+func TestMaterialize_RecordsOperation(t *testing.T) {
+	r := initRepo(t)
+	w := wc.New(r)
+
+	writeFile(t, r, "f.txt", "data")
+	snapped, _, err := w.Snap("snap")
+	if err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+
+	headCID, _ := r.HeadChangeID()
+	opsBefore, _ := r.Store.ListOperations(50)
+
+	if err := w.Materialize(snapped.TreeID, headCID); err != nil {
+		t.Fatalf("Materialize: %v", err)
+	}
+
+	opsAfter, _ := r.Store.ListOperations(50)
+	if len(opsAfter) <= len(opsBefore) {
+		t.Errorf("expected a new operation after Materialize; before=%d after=%d",
+			len(opsBefore), len(opsAfter))
+	}
+
+	last, _ := r.Store.GetLastOperation()
+	if last == nil || last.Kind != "co" {
+		t.Errorf("last operation kind: want co, got %v", last)
+	}
+}
+
+func TestStatus_Modified(t *testing.T) {
+	r := initRepo(t)
+	w := wc.New(r)
+
+	writeFile(t, r, "mod.txt", "before")
+
+	if _, _, err := w.Snap("base"); err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+
+	writeFile(t, r, "mod.txt", "after")
+
+	statuses, err := w.Status()
+	if err != nil {
+		t.Fatalf("Status: %v", err)
+	}
+
+	var found bool
+	for _, s := range statuses {
+		if s.Path == "mod.txt" && s.Status == 'M' {
+			found = true
+		}
+	}
+	if !found {
+		t.Errorf("mod.txt should be Modified, got statuses: %v", statuses)
+	}
+}
+
+func TestStatus_Deleted(t *testing.T) {
+	r := initRepo(t)
+	w := wc.New(r)
+
+	writeFile(t, r, "del.txt", "bye")
+
+	if _, _, err := w.Snap("base"); err != nil {
+		t.Fatalf("Snap: %v", err)
+	}
+
+	if err := os.Remove(filepath.Join(r.Root, "del.txt")); err != nil {
+		t.Fatalf("Remove: %v", err)
+	}
+
+	statuses, err := w.Status()
+	if err != nil {
+		t.Fatalf("Status: %v", err)
+	}
+
+	var found bool
+	for _, s := range statuses {
+		if s.Path == "del.txt" && s.Status == 'D' {
+			found = true
+		}
+	}
+	if !found {
+		t.Errorf("del.txt should be Deleted, got statuses: %v", statuses)
+	}
+}
+
+func initRepo(t *testing.T) *repo.Repo {
+	t.Helper()
+	dir := t.TempDir()
+	r, err := repo.Init(dir)
+	if err != nil {
+		t.Fatalf("repo.Init: %v", err)
+	}
+	t.Cleanup(func() { r.Close() })
+	return r
+}
+
+func writeFile(t *testing.T, r *repo.Repo, name, content string) {
+	t.Helper()
+	abs := filepath.Join(r.Root, name)
+	if err := os.MkdirAll(filepath.Dir(abs), 0o755); err != nil {
+		t.Fatalf("MkdirAll: %v", err)
+	}
+	if err := os.WriteFile(abs, []byte(content), 0o644); err != nil {
+		t.Fatalf("WriteFile %s: %v", name, err)
+	}
+}
+
+func readFile(t *testing.T, r *repo.Repo, name string) (string, bool) {
+	t.Helper()
+	data, err := os.ReadFile(filepath.Join(r.Root, name))
+	if os.IsNotExist(err) {
+		return "", false
+	}
+	if err != nil {
+		t.Fatalf("readFile %s: %v", name, err)
+	}
+	return string(data), true
+}

internal/wiki/wiki.go [A]
--- /dev/null
+++ b/internal/wiki/wiki.go
@@ -1,0 +1,63 @@
+package wiki
+
+import (
+	"database/sql"
+	"fmt"
+	"time"
+)
+
+type Store struct {
+	db *sql.DB
+}
+
+type Page struct {
+	Title   string
+	Content string
+	Author  string
+	Updated int64
+}
+
+func New(db *sql.DB) *Store { return &Store{db: db} }
+
+func (s *Store) Get(title string) (Page, error) {
+	row := s.db.QueryRow(`SELECT title, content, author, updated FROM wiki_pages WHERE title=?`, title)
+	var p Page
+	err := row.Scan(&p.Title, &p.Content, &p.Author, &p.Updated)
+	if err == sql.ErrNoRows {
+		return Page{}, fmt.Errorf("wiki page not found: %s", title)
+	}
+	return p, err
+}
+
+func (s *Store) Set(title, content, author string) error {
+	now := time.Now().Unix()
+	_, err := s.db.Exec(`
+		INSERT INTO wiki_pages (title, content, author, updated) 
+		VALUES (?, ?, ?, ?)
+		ON CONFLICT(title) DO UPDATE SET 
+			content=excluded.content, 
+			author=excluded.author, 
+			updated=excluded.updated`,
+		title, content, author, now,
+	)
+	return err
+}
+
+func (s *Store) List() ([]Page, error) {
+	rows, err := s.db.Query(`SELECT title, content, author, updated FROM wiki_pages ORDER BY title`)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	var pages []Page
+	for rows.Next() {
+		var p Page
+		err := rows.Scan(&p.Title, &p.Content, &p.Author, &p.Updated)
+		if err != nil {
+			return nil, err
+		}
+		pages = append(pages, p)
+	}
+	return pages, rows.Err()
+}