A vibe coded tangled fork which supports pijul.

Add schedule/cron trigger support for Spindle CI

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

+816 -422
+7
api/tangled/tangledpipeline.go
··· 40 40 Value string `json:"value" cborgen:"value"` 41 41 } 42 42 43 + // Pipeline_ScheduleTriggerData is a "scheduleTriggerData" in the sh.tangled.pipeline schema. 44 + type Pipeline_ScheduleTriggerData struct { 45 + // cron: The cron expression that triggered this pipeline 46 + Cron string `json:"cron" cborgen:"cron"` 47 + } 48 + 43 49 // Pipeline_PullRequestTriggerData is a "pullRequestTriggerData" in the sh.tangled.pipeline schema. 44 50 type Pipeline_PullRequestTriggerData struct { 45 51 Action string `json:"action" cborgen:"action"` ··· 61 67 Manual *Pipeline_ManualTriggerData `json:"manual,omitempty" cborgen:"manual,omitempty"` 62 68 PullRequest *Pipeline_PullRequestTriggerData `json:"pullRequest,omitempty" cborgen:"pullRequest,omitempty"` 63 69 Push *Pipeline_PushTriggerData `json:"push,omitempty" cborgen:"push,omitempty"` 70 + Schedule *Pipeline_ScheduleTriggerData `json:"schedule,omitempty" cborgen:"schedule,omitempty"` 64 71 Repo *Pipeline_TriggerRepo `json:"repo" cborgen:"repo"` 65 72 } 66 73
+10
appview/db/db.go
··· 676 676 create index if not exists idx_webhooks_repo_at on webhooks(repo_at); 677 677 create index if not exists idx_webhook_deliveries_webhook_id on webhook_deliveries(webhook_id); 678 678 create index if not exists idx_site_deploys_repo_at on site_deploys(repo_at); 679 + 680 + create table if not exists pijul_change_pushes ( 681 + change_hash text not null, 682 + committer_did text not null, 683 + repo_at text not null, 684 + channel text not null default '', 685 + pushed_at text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 686 + primary key (change_hash, repo_at) 687 + ); 688 + create index if not exists idx_pijul_change_pushes_repo on pijul_change_pushes(repo_at); 679 689 `) 680 690 if err != nil { 681 691 return nil, err
+48
appview/db/pijul_changes.go
··· 1 + package db 2 + 3 + import "strings" 4 + 5 + // StorePijulChangePush records that a change was pushed by a specific committer. 6 + func StorePijulChangePush(e Execer, changeHash, committerDid, repoAt, channel string) error { 7 + _, err := e.Exec(` 8 + insert or ignore into pijul_change_pushes (change_hash, committer_did, repo_at, channel) 9 + values (?, ?, ?, ?) 10 + `, changeHash, committerDid, repoAt, channel) 11 + return err 12 + } 13 + 14 + // GetVerifiedPijulChanges returns the set of change hashes that have a verified 15 + // push record (i.e., were pushed by a known committer via an authenticated path). 16 + // Returns a map of change_hash → committer_did. 17 + func GetVerifiedPijulChanges(e Execer, repoAt string, hashes []string) (map[string]string, error) { 18 + if len(hashes) == 0 { 19 + return make(map[string]string), nil 20 + } 21 + 22 + placeholders := make([]string, len(hashes)) 23 + args := make([]any, 0, len(hashes)+1) 24 + args = append(args, repoAt) 25 + for i, h := range hashes { 26 + placeholders[i] = "?" 27 + args = append(args, h) 28 + } 29 + 30 + rows, err := e.Query(` 31 + select change_hash, committer_did from pijul_change_pushes 32 + where repo_at = ? and change_hash in (`+strings.Join(placeholders, ",")+`) 33 + `, args...) 34 + if err != nil { 35 + return nil, err 36 + } 37 + defer rows.Close() 38 + 39 + result := make(map[string]string) 40 + for rows.Next() { 41 + var hash, did string 42 + if err := rows.Scan(&hash, &did); err != nil { 43 + return nil, err 44 + } 45 + result[hash] = did 46 + } 47 + return result, rows.Err() 48 + }
+7 -33
appview/ingester.go
··· 1135 1135 } 1136 1136 1137 1137 // ingestPijulRefUpdate handles sh.tangled.pijul.refUpdate records published to 1138 - // the committer's PDS after a pijul push or merge. It updates the contributor 1139 - // punchcard using the number of changes pushed. 1138 + // the committer's PDS after a pijul push or merge. 1139 + // Punchcard updates are handled by the knotstream handler (ingestPijulRefUpdate 1140 + // in state/knotstream.go) to avoid double-counting. 1140 1141 func (i *Ingester) ingestPijulRefUpdate(e *jmodels.Event) error { 1141 - if e.Commit.Operation != jmodels.CommitOperationCreate { 1142 - return nil 1143 - } 1144 - 1145 - committerDid := e.Did 1146 - 1147 - var record tangled.PijulRefUpdate 1148 - if err := json.Unmarshal(e.Commit.Record, &record); err != nil { 1149 - return fmt.Errorf("invalid pijulRefUpdate record: %w", err) 1150 - } 1151 - 1152 - if record.Repo == "" || len(record.Changes) == 0 { 1153 - return nil 1154 - } 1155 - 1156 - repoAt, err := syntax.ParseATURI(record.Repo) 1157 - if err != nil { 1158 - return fmt.Errorf("invalid repo AT URI %q: %w", record.Repo, err) 1159 - } 1160 - 1161 - if _, err := db.GetRepoByAtUri(i.Db, repoAt.String()); err != nil { 1162 - // Unknown repo — ignore rather than error; the repo may not be 1163 - // registered on this appview instance. 1164 - return nil 1165 - } 1166 - 1167 - return db.AddPunch(i.Db, models.Punch{ 1168 - Did: committerDid, 1169 - Date: time.Now(), 1170 - Count: len(record.Changes), 1171 - }) 1142 + // No-op: the knotstream event path is the authoritative source for 1143 + // punchcard updates. This handler exists so the firehose consumer 1144 + // doesn't log unknown-NSID warnings. 1145 + return nil 1172 1146 }
+11 -9
appview/pages/pages.go
··· 847 847 } 848 848 849 849 type RepoChangesParams struct { 850 - LoggedInUser *oauth.MultiAccountUser 851 - RepoInfo repoinfo.RepoInfo 852 - Active string 853 - Page int 854 - Changes []PijulChangeView 850 + LoggedInUser *oauth.MultiAccountUser 851 + RepoInfo repoinfo.RepoInfo 852 + Active string 853 + Page int 854 + Changes []PijulChangeView 855 + VerifiedChanges map[string]string // change_hash → committer_did 855 856 } 856 857 857 858 func (p *Pages) RepoChanges(w io.Writer, params RepoChangesParams) error { ··· 860 861 } 861 862 862 863 type RepoChangeParams struct { 863 - LoggedInUser *oauth.MultiAccountUser 864 - RepoInfo repoinfo.RepoInfo 865 - Active string 866 - Change PijulChangeDetail 864 + LoggedInUser *oauth.MultiAccountUser 865 + RepoInfo repoinfo.RepoInfo 866 + Active string 867 + Change PijulChangeDetail 868 + VerifiedChanges map[string]string // change_hash → committer_did 867 869 } 868 870 869 871 func (p *Pages) RepoChange(w io.Writer, params RepoChangeParams) error {
+8 -1
appview/pages/templates/repo/change.html
··· 33 33 {{ template "repo/fragments/time" $change.Timestamp }} 34 34 </div> 35 35 {{ end }} 36 - <div class="font-mono text-sm text-gray-500 dark:text-gray-400 break-all">{{ $change.Hash }}</div> 36 + <div class="font-mono text-sm text-gray-500 dark:text-gray-400 break-all flex items-center gap-2"> 37 + {{ $change.Hash }} 38 + {{ if mapContains $.VerifiedChanges $change.Hash }} 39 + <span class="bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-200 px-2 py-0.5 rounded text-xs flex items-center gap-1"> 40 + {{ i "shield-check" "w-3 h-3" }} verified 41 + </span> 42 + {{ end }} 43 + </div> 37 44 </div> 38 45 {{ else }} 39 46 {{ placeholderAvatar "md" }}
+5
appview/pages/templates/repo/changes.html
··· 103 103 </div> 104 104 105 105 <div class="text-xs mt-2 text-gray-500 dark:text-gray-400 flex items-center"> 106 + {{ $verified := mapContains $.VerifiedChanges $change.Hash }} 106 107 {{ $hashStyle := "text-gray-700 dark:text-gray-300 bg-gray-100 dark:bg-gray-900" }} 108 + {{ if $verified }} 109 + {{ $hashStyle = "bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-200" }} 110 + {{ end }} 107 111 <span class="font-mono"> 108 112 <a href="/{{ $.RepoInfo.FullName }}/change/{{ $change.Hash }}" 109 113 class="no-underline hover:underline {{ $hashStyle }} px-2 py-1 rounded flex items-center gap-2"> 110 114 {{ slice $change.Hash 0 12 }} 115 + {{ if $verified }}{{ i "shield-check" "w-4 h-4" }}{{ end }} 111 116 </a> 112 117 </span> 113 118 <span class="mx-2 before:content-['·'] before:select-none"></span>
+30 -7
appview/repo/changes.go
··· 9 9 "time" 10 10 11 11 "tangled.org/core/api/tangled" 12 + "tangled.org/core/appview/db" 12 13 "tangled.org/core/appview/oauth" 13 14 "tangled.org/core/appview/pages" 14 15 xrpcclient "tangled.org/core/appview/xrpcclient" ··· 96 97 changes = append(changes, view) 97 98 } 98 99 100 + // Look up verified push records for these changes. 101 + repoAt := fmt.Sprintf("at://%s/%s/%s", f.Did, "sh.tangled.repo", f.Name) 102 + changeHashes := make([]string, len(changes)) 103 + for i, c := range changes { 104 + changeHashes[i] = c.Hash 105 + } 106 + verifiedChanges, err := db.GetVerifiedPijulChanges(rp.db, repoAt, changeHashes) 107 + if err != nil { 108 + l.Warn("failed to look up verified pijul changes", "err", err) 109 + verifiedChanges = make(map[string]string) 110 + } 111 + 99 112 user := rp.oauth.GetMultiAccountUser(r) 100 113 rp.pages.RepoChanges(w, pages.RepoChangesParams{ 101 - LoggedInUser: user, 102 - RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 103 - Page: page, 104 - Changes: changes, 114 + LoggedInUser: user, 115 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 116 + Page: page, 117 + Changes: changes, 118 + VerifiedChanges: verifiedChanges, 105 119 }) 106 120 } 107 121 ··· 151 165 } 152 166 } 153 167 168 + // Look up verified push record for this change. 169 + repoAt := fmt.Sprintf("at://%s/%s/%s", f.Did, "sh.tangled.repo", f.Name) 170 + verifiedChanges, err := db.GetVerifiedPijulChanges(rp.db, repoAt, []string{resp.Hash}) 171 + if err != nil { 172 + l.Warn("failed to look up verified pijul change", "err", err) 173 + verifiedChanges = make(map[string]string) 174 + } 175 + 154 176 user := rp.oauth.GetMultiAccountUser(r) 155 177 rp.pages.RepoChange(w, pages.RepoChangeParams{ 156 - LoggedInUser: user, 157 - RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 158 - Change: change, 178 + LoggedInUser: user, 179 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 180 + Change: change, 181 + VerifiedChanges: verifiedChanges, 159 182 }) 160 183 } 161 184
-3
appview/repo/index.go
··· 159 159 currentRef string, 160 160 isDefaultRef bool, 161 161 ) ([]types.RepoLanguageDetails, error) { 162 - if repo.IsPijul() { 163 - return nil, nil 164 - } 165 162 // first attempt to fetch from db 166 163 langs, err := db.GetRepoLanguages( 167 164 rp.db,
+8
appview/state/knotstream.go
··· 334 334 return fmt.Errorf("pijulRefUpdate from %s for repo on %s, rejecting", source.Key(), repo.Knot) 335 335 } 336 336 337 + // Store change→committer mapping for verification badges. 338 + for _, hash := range record.Changes { 339 + if err := db.StorePijulChangePush(d, hash, record.CommitterDid, record.Repo, record.Channel); err != nil { 340 + // Non-fatal: log and continue. 341 + fmt.Printf("failed to store pijul change push: %v\n", err) 342 + } 343 + } 344 + 337 345 return db.AddPunch(d, models.Punch{ 338 346 Did: record.CommitterDid, 339 347 Date: time.Now(),
+3 -8
go.mod
··· 4 4 5 5 require ( 6 6 github.com/Blank-Xu/sql-adapter v1.1.1 7 + github.com/adrg/frontmatter v0.2.0 7 8 github.com/alecthomas/assert/v2 v2.11.0 8 9 github.com/alecthomas/chroma/v2 v2.23.1 9 10 github.com/avast/retry-go/v4 v4.6.1 10 11 github.com/aws/aws-sdk-go-v2 v1.41.4 12 + github.com/aws/aws-sdk-go-v2/config v1.32.12 11 13 github.com/aws/aws-sdk-go-v2/credentials v1.19.12 12 14 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 13 15 github.com/blevesearch/bleve/v2 v2.5.3 ··· 28 30 github.com/go-chi/chi/v5 v5.2.0 29 31 github.com/go-enry/go-enry/v2 v2.9.2 30 32 github.com/go-git/go-git/v5 v5.14.0 31 - github.com/goki/freetype v1.0.5 32 33 github.com/google/uuid v1.6.0 33 34 github.com/gorilla/feeds v1.2.0 34 35 github.com/gorilla/sessions v1.4.0 ··· 44 45 github.com/prometheus/client_golang v1.23.2 45 46 github.com/redis/go-redis/v9 v9.7.3 46 47 github.com/resend/resend-go/v2 v2.15.0 48 + github.com/robfig/cron/v3 v3.0.1 47 49 github.com/sethvargo/go-envconfig v1.1.0 48 50 github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c 49 51 github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef ··· 68 70 github.com/Microsoft/go-winio v0.6.2 // indirect 69 71 github.com/ProtonMail/go-crypto v1.3.0 // indirect 70 72 github.com/RoaringBitmap/roaring/v2 v2.4.5 // indirect 71 - github.com/adrg/frontmatter v0.2.0 // indirect 72 73 github.com/alecthomas/repr v0.5.2 // indirect 73 74 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect 74 75 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect 75 - github.com/aws/aws-sdk-go-v2/config v1.32.12 // indirect 76 76 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 // indirect 77 77 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect 78 78 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect ··· 227 227 go.opentelemetry.io/auto/sdk v1.2.1 // indirect 228 228 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect 229 229 go.opentelemetry.io/otel v1.40.0 // indirect 230 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect 231 230 go.opentelemetry.io/otel/metric v1.40.0 // indirect 232 231 go.opentelemetry.io/otel/trace v1.40.0 // indirect 233 - go.opentelemetry.io/proto/otlp v1.9.0 // indirect 234 232 go.uber.org/atomic v1.11.0 // indirect 235 233 go.uber.org/multierr v1.11.0 // indirect 236 234 go.uber.org/zap v1.27.1 // indirect ··· 240 238 golang.org/x/sys v0.41.0 // indirect 241 239 golang.org/x/text v0.34.0 // indirect 242 240 golang.org/x/time v0.12.0 // indirect 243 - google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect 244 - google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect 245 - google.golang.org/grpc v1.78.0 // indirect 246 241 google.golang.org/protobuf v1.36.11 // indirect 247 242 gopkg.in/fsnotify.v1 v1.4.7 // indirect 248 243 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+6 -32
go.sum
··· 25 25 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= 26 26 github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= 27 27 github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= 28 - github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= 29 - github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= 30 28 github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= 31 29 github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= 32 - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= 33 - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= 34 30 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= 35 31 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= 36 32 github.com/aws/aws-sdk-go-v2/config v1.32.12 h1:O3csC7HUGn2895eNrLytOJQdoL2xyJy0iYXhoZ1OmP0= 37 33 github.com/aws/aws-sdk-go-v2/config v1.32.12/go.mod h1:96zTvoOFR4FURjI+/5wY1vc1ABceROO4lWgWJuxgy0g= 38 - github.com/aws/aws-sdk-go-v2/credentials v1.19.9 h1:sWvTKsyrMlJGEuj/WgrwilpoJ6Xa1+KhIpGdzw7mMU8= 39 - github.com/aws/aws-sdk-go-v2/credentials v1.19.9/go.mod h1:+J44MBhmfVY/lETFiKI+klz0Vym2aCmIjqgClMmW82w= 40 34 github.com/aws/aws-sdk-go-v2/credentials v1.19.12 h1:oqtA6v+y5fZg//tcTWahyN9PEn5eDU/Wpvc2+kJ4aY8= 41 35 github.com/aws/aws-sdk-go-v2/credentials v1.19.12/go.mod h1:U3R1RtSHx6NB0DvEQFGyf/0sbrpJrluENHdPy1j/3TE= 42 36 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20 h1:zOgq3uezl5nznfoK3ODuqbhVg1JzAGDUhXOsU0IDCAo= 43 37 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.20/go.mod h1:z/MVwUARehy6GAg/yQ1GO2IMl0k++cu1ohP9zo887wE= 44 - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= 45 - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= 46 38 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 h1:CNXO7mvgThFGqOFgbNAP2nol2qAWBOGfqR/7tQlvLmc= 47 39 github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20/go.mod h1:oydPDJKcfMhgfcgBUZaG+toBbwy8yPWubJXBVERtI4o= 48 - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= 49 - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= 50 40 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 h1:tN6W/hg+pkM+tf9XDkWUbDEjGLb+raoBMFsTodcoYKw= 51 41 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20/go.mod h1:YJ898MhD067hSHA6xYCx5ts/jEd8BSOLtQDL3iZsvbc= 52 42 github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw= 53 43 github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= 54 - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k= 55 - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0= 56 44 github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21 h1:SwGMTMLIlvDNyhMteQ6r8IJSBPlRdXX5d4idhIGbkXA= 57 45 github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.21/go.mod h1:UUxgWxofmOdAMuqEsSppbDtGKLfR04HGsD0HXzvhI1k= 58 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= 59 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= 60 46 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY= 61 47 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= 62 - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs= 63 - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE= 64 48 github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12 h1:qtJZ70afD3ISKWnoX3xB0J2otEqu3LqicRcDBqsj0hQ= 65 49 github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.12/go.mod h1:v2pNpJbRNl4vEUWEh5ytQok0zACAKfdmKS51Hotc3pQ= 66 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY= 67 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU= 68 50 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 h1:2HvVAIq+YqgGotK6EkMf+KIEqTISmTYh5zLpYyeTo1Y= 69 51 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20/go.mod h1:V4X406Y666khGa8ghKmphma/7C0DAtEQYhkq9z4vpbk= 70 - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U= 71 - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g= 72 52 github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 h1:siU1A6xjUZ2N8zjTHSXFhB9L/2OY8Dqs0xXiLjF30jA= 73 53 github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20/go.mod h1:4TLZCmVJDM3FOu5P5TJP0zOlu9zWgDWU7aUxWbr+rcw= 74 - github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA= 75 - github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo= 76 54 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7MSNWeQ6eo247kE= 77 55 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= 78 56 github.com/aws/aws-sdk-go-v2/service/signin v1.0.8 h1:0GFOLzEbOyZABS3PhYfBIx2rNBACYcKty+XGkTgw1ow= ··· 83 61 github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.17/go.mod h1:Al9fFsXjv4KfbzQHGe6V4NZSZQXecFcvaIF4e70FoRA= 84 62 github.com/aws/aws-sdk-go-v2/service/sts v1.41.9 h1:Cng+OOwCHmFljXIxpEVXAGMnBia8MSU6Ch5i9PgBkcU= 85 63 github.com/aws/aws-sdk-go-v2/service/sts v1.41.9/go.mod h1:LrlIndBDdjA/EeXeyNBle+gyCwTlizzW5ycgWnvIxkk= 86 - github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= 87 - github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= 88 64 github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= 89 65 github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= 90 66 github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= ··· 205 181 github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= 206 182 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 207 183 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 208 - github.com/did-method-plc/go-didplc v0.0.0-20250716171643-635da8b4e038 h1:AGh+Vn9fXhf9eo8erG1CK4+LACduPo64P1OICQLDv88= 209 - github.com/did-method-plc/go-didplc v0.0.0-20250716171643-635da8b4e038/go.mod h1:ddIXqTTSXWtj5kMsHAPj8SvbIx2GZdAkBFgFa6e6+CM= 210 184 github.com/did-method-plc/go-didplc v0.2.2 h1:53HFhTT8NCAeFmZ6fdIZCf3PGDvj7A3cDjzOOEqn5XM= 211 185 github.com/did-method-plc/go-didplc v0.2.2/go.mod h1:bKdJ21irnwNHgVLWWL32zUWqZueXYbJRUcxplZghByo= 212 186 github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= ··· 280 254 github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 281 255 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 282 256 github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 283 - github.com/goki/freetype v1.0.5 h1:yi2lQeUhXnBgSMqYd0vVmPw6RnnfIeTP3N4uvaJXd7A= 284 - github.com/goki/freetype v1.0.5/go.mod h1:wKmKxddbzKmeci9K96Wknn5kjTWLyfC8tKOqAFbEX8E= 285 257 github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= 286 258 github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= 287 259 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= ··· 333 305 github.com/gorilla/sessions v1.4.0/go.mod h1:FLWm50oby91+hl7p/wRxDth9bWSuk0qVL2emc7lT5ik= 334 306 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= 335 307 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= 336 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= 337 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= 308 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8 h1:NpbJl/eVbvrGE0MJ6X16X9SAifesl6Fwxg/YmCvubRI= 309 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.8/go.mod h1:mi7YA+gCzVem12exXy46ZespvGtX/lZmD/RLnQhVW7U= 338 310 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 339 311 github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 340 312 github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= ··· 542 514 github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 543 515 github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= 544 516 github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 517 + github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= 518 + github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= 545 519 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= 546 520 github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= 547 521 github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= ··· 636 610 go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= 637 611 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= 638 612 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= 639 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= 640 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= 613 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= 614 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= 641 615 go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= 642 616 go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= 643 617 go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+23 -18
guard/guard.go
··· 1 1 package guard 2 2 3 3 import ( 4 - "bufio" 5 4 "context" 6 5 "errors" 7 6 "fmt" ··· 120 119 return fmt.Errorf("unsupported pijul protocol version") 121 120 } 122 121 123 - // If no repository is specified, this is a prove-only session. 124 - // (pijul identity prove connects without a repository path) 125 122 if repoPath == "" { 126 - l.Info("pijul prove session (no repo)", 127 - "user", incomingUser, 128 - "client", clientIP) 129 - 130 - stdinReader := bufio.NewReader(os.Stdin) 131 - if err := handlePijulProve(l, endpoint, incomingUser, stdinReader); err != nil { 132 - l.Error("prove failed", "error", err) 133 - fmt.Fprintf(os.Stderr, "prove failed: %v\n", err) 134 - return fmt.Errorf("prove failed: %v", err) 135 - } 136 - 137 - l.Info("prove completed", 138 - "user", incomingUser, 139 - "success", true) 140 - return nil 123 + l.Error("pijul protocol requires a repository path") 124 + fmt.Fprintln(os.Stderr, "repository path required") 125 + return fmt.Errorf("pijul protocol: missing repository path") 141 126 } 142 127 143 128 qualifiedRepoPath, err := guardAndQualifyRepo(l, endpoint, incomingUser, repoPath, "pijul-protocol") ··· 165 150 pijulCmd.Stderr = os.Stderr 166 151 pijulCmd.Stdin = os.Stdin 167 152 153 + // Snapshot existing change hashes before push. 154 + changesBefore := listPijulChangeHashes(fullPath) 155 + 168 156 if err := pijulCmd.Run(); err != nil { 169 157 l.Error("command failed", "error", err) 170 158 fmt.Fprintf(os.Stderr, "command failed: %v\n", err) ··· 176 164 "command", "pijul protocol", 177 165 "repo", repoPath, 178 166 "success", true) 167 + 168 + // Diff change hashes to find newly pushed changes. 169 + changesAfter := listPijulChangeHashes(fullPath) 170 + var newChanges []string 171 + for hash := range changesAfter { 172 + if !changesBefore[hash] { 173 + newChanges = append(newChanges, hash) 174 + } 175 + } 176 + 177 + if len(newChanges) > 0 { 178 + l.Info("new pijul changes detected", "count", len(newChanges)) 179 + if err := callPijulPostApplyHook(endpoint, fullPath, incomingUser, newChanges); err != nil { 180 + l.Error("pijul post-apply hook failed", "error", err) 181 + // Non-fatal: push already succeeded 182 + } 183 + } 179 184 180 185 return nil 181 186 }
+70
guard/pijul_hook.go
··· 1 + package guard 2 + 3 + import ( 4 + "bytes" 5 + "encoding/json" 6 + "fmt" 7 + "net/http" 8 + "os" 9 + "path/filepath" 10 + "strings" 11 + ) 12 + 13 + // listPijulChangeHashes scans .pijul/changes/ and returns a set of base32 change hashes. 14 + // Change files are stored as .pijul/changes/AB/CDEF...XYZ.change where 15 + // the hash is the concatenation of the directory name and file stem. 16 + func listPijulChangeHashes(repoPath string) map[string]bool { 17 + hashes := make(map[string]bool) 18 + changesDir := filepath.Join(repoPath, ".pijul", "changes") 19 + 20 + prefixDirs, err := os.ReadDir(changesDir) 21 + if err != nil { 22 + return hashes 23 + } 24 + 25 + for _, prefix := range prefixDirs { 26 + if !prefix.IsDir() || len(prefix.Name()) != 2 { 27 + continue 28 + } 29 + subDir := filepath.Join(changesDir, prefix.Name()) 30 + files, err := os.ReadDir(subDir) 31 + if err != nil { 32 + continue 33 + } 34 + for _, f := range files { 35 + name := f.Name() 36 + if !strings.HasSuffix(name, ".change") { 37 + continue 38 + } 39 + stem := strings.TrimSuffix(name, ".change") 40 + hash := prefix.Name() + stem 41 + hashes[hash] = true 42 + } 43 + } 44 + 45 + return hashes 46 + } 47 + 48 + // callPijulPostApplyHook notifies the knotserver about newly pushed pijul changes. 49 + func callPijulPostApplyHook(endpoint, repoPath, userDid string, changes []string) error { 50 + body, err := json.Marshal(map[string]interface{}{ 51 + "repo_path": repoPath, 52 + "user_did": userDid, 53 + "changes": changes, 54 + }) 55 + if err != nil { 56 + return fmt.Errorf("marshal hook payload: %w", err) 57 + } 58 + 59 + resp, err := http.Post(endpoint+"/hooks/pijul-post-apply", "application/json", bytes.NewReader(body)) 60 + if err != nil { 61 + return fmt.Errorf("POST pijul-post-apply: %w", err) 62 + } 63 + defer resp.Body.Close() 64 + 65 + if resp.StatusCode != http.StatusOK { 66 + return fmt.Errorf("pijul-post-apply returned %d", resp.StatusCode) 67 + } 68 + 69 + return nil 70 + }
-174
guard/prove.go
··· 1 - package guard 2 - 3 - import ( 4 - "bufio" 5 - "bytes" 6 - "crypto/ed25519" 7 - "crypto/rand" 8 - "encoding/json" 9 - "fmt" 10 - "io" 11 - "log/slog" 12 - "math/big" 13 - "net/http" 14 - "os" 15 - "strings" 16 - ) 17 - 18 - // pijulPublicKey matches the JSON format sent by pijul's prove protocol. 19 - // Example: {"version":1,"algorithm":"Ed25519","key":"<base58>","signature":"<base58>"} 20 - type pijulPublicKey struct { 21 - Version int `json:"version"` 22 - Algorithm string `json:"algorithm"` 23 - Key string `json:"key"` 24 - Signature string `json:"signature"` 25 - Expires string `json:"expires,omitempty"` 26 - } 27 - 28 - // handlePijulProve handles the pijul identity prove protocol over stdin/stdout. 29 - // Protocol: 30 - // 1. Client sends: challenge <json_public_key>\n 31 - // 2. Server sends: <random_challenge>\n 32 - // 3. Client sends: prove <base58_signature>\n 33 - // 4. Server sends: \n (success) 34 - func handlePijulProve(l *slog.Logger, endpoint string, did string, reader *bufio.Reader) error { 35 - // Read the challenge request 36 - line, err := reader.ReadString('\n') 37 - if err != nil { 38 - return fmt.Errorf("failed to read from stdin: %w", err) 39 - } 40 - line = strings.TrimRight(line, "\n\r") 41 - 42 - if !strings.HasPrefix(line, "challenge ") { 43 - return fmt.Errorf("expected 'challenge' message, got: %q", line) 44 - } 45 - 46 - pubKeyJSON := strings.TrimPrefix(line, "challenge ") 47 - 48 - var pubKey pijulPublicKey 49 - if err := json.Unmarshal([]byte(pubKeyJSON), &pubKey); err != nil { 50 - return fmt.Errorf("failed to parse public key JSON: %w", err) 51 - } 52 - 53 - if pubKey.Algorithm != "Ed25519" { 54 - return fmt.Errorf("unsupported algorithm: %s", pubKey.Algorithm) 55 - } 56 - 57 - // Decode the base58 public key 58 - rawPubKey, err := base58Decode(pubKey.Key) 59 - if err != nil { 60 - return fmt.Errorf("failed to decode public key: %w", err) 61 - } 62 - if len(rawPubKey) != ed25519.PublicKeySize { 63 - return fmt.Errorf("invalid public key size: %d", len(rawPubKey)) 64 - } 65 - 66 - // Generate random challenge (30 alphanumeric characters, matching Nest) 67 - challenge := generateChallenge(30) 68 - 69 - l.Info("prove: sending challenge", "did", did, "pubkey", pubKey.Key) 70 - 71 - // Send the challenge 72 - fmt.Fprintf(os.Stdout, "%s\n", challenge) 73 - 74 - // Read the prove response 75 - line, err = reader.ReadString('\n') 76 - if err != nil { 77 - return fmt.Errorf("failed to read prove response: %w", err) 78 - } 79 - line = strings.TrimRight(line, "\n\r") 80 - 81 - if !strings.HasPrefix(line, "prove ") { 82 - return fmt.Errorf("expected 'prove' message, got: %q", line) 83 - } 84 - 85 - signatureB58 := strings.TrimPrefix(line, "prove ") 86 - 87 - // Decode the base58 signature 88 - signature, err := base58Decode(signatureB58) 89 - if err != nil { 90 - return fmt.Errorf("failed to decode signature: %w", err) 91 - } 92 - 93 - // Verify the Ed25519 signature 94 - if !ed25519.Verify(ed25519.PublicKey(rawPubKey), []byte(challenge), signature) { 95 - l.Warn("prove: signature verification failed", "did", did) 96 - fmt.Fprintf(os.Stderr, "Signature verification failed\n") 97 - return fmt.Errorf("signature verification failed") 98 - } 99 - 100 - l.Info("prove: signature verified", "did", did, "pubkey", pubKey.Key) 101 - 102 - // Store the pubkey → DID mapping via knotserver internal API 103 - if err := storePijulProve(endpoint, pubKey.Key, did); err != nil { 104 - l.Error("prove: failed to store mapping", "err", err) 105 - return fmt.Errorf("failed to store prove result: %w", err) 106 - } 107 - 108 - // Send success (empty line) 109 - fmt.Fprintf(os.Stdout, "\n") 110 - 111 - l.Info("prove: completed successfully", "did", did, "pubkey", pubKey.Key) 112 - return nil 113 - } 114 - 115 - func storePijulProve(endpoint, publicKey, did string) error { 116 - body, _ := json.Marshal(map[string]string{ 117 - "public_key": publicKey, 118 - "did": did, 119 - }) 120 - 121 - resp, err := http.Post(endpoint+"/pijul-prove", "application/json", bytes.NewReader(body)) 122 - if err != nil { 123 - return err 124 - } 125 - defer resp.Body.Close() 126 - 127 - if resp.StatusCode != http.StatusOK { 128 - respBody, _ := io.ReadAll(resp.Body) 129 - return fmt.Errorf("knotserver returned %d: %s", resp.StatusCode, string(respBody)) 130 - } 131 - return nil 132 - } 133 - 134 - func generateChallenge(length int) string { 135 - const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 136 - b := make([]byte, length) 137 - for i := range b { 138 - n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) 139 - b[i] = charset[n.Int64()] 140 - } 141 - return string(b) 142 - } 143 - 144 - // base58Decode decodes a base58-encoded string (Bitcoin alphabet). 145 - func base58Decode(input string) ([]byte, error) { 146 - const alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" 147 - 148 - result := big.NewInt(0) 149 - base := big.NewInt(58) 150 - 151 - for _, c := range input { 152 - idx := strings.IndexRune(alphabet, c) 153 - if idx < 0 { 154 - return nil, fmt.Errorf("invalid base58 character: %c", c) 155 - } 156 - result.Mul(result, base) 157 - result.Add(result, big.NewInt(int64(idx))) 158 - } 159 - 160 - // Count leading '1's (zero bytes) 161 - leadingZeros := 0 162 - for _, c := range input { 163 - if c != '1' { 164 - break 165 - } 166 - leadingZeros++ 167 - } 168 - 169 - resultBytes := result.Bytes() 170 - decoded := make([]byte, leadingZeros+len(resultBytes)) 171 - copy(decoded[leadingZeros:], resultBytes) 172 - 173 - return decoded, nil 174 - }
+26 -6
knotserver/db/db.go
··· 81 81 name text unique 82 82 ); 83 83 84 - create table if not exists pijul_signing_keys ( 85 - public_key text primary key, 86 - did text not null, 87 - created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 88 - foreign key (did) references known_dids(did) on delete cascade 89 - ); 90 84 `) 91 85 if err != nil { 92 86 return nil, err ··· 195 189 ownerDid = nullOwner.String 196 190 repoName = nullName.String 197 191 return 192 + } 193 + 194 + // RepoInfo holds basic repo metadata from the repo_keys table. 195 + type RepoInfo struct { 196 + RepoDid string 197 + OwnerDid string 198 + RepoName string 199 + } 200 + 201 + // ListAllRepos returns all repos registered in the repo_keys table. 202 + func (d *DB) ListAllRepos() ([]RepoInfo, error) { 203 + rows, err := d.db.Query(`SELECT repo_did, owner_did, repo_name FROM repo_keys WHERE owner_did IS NOT NULL AND repo_name IS NOT NULL`) 204 + if err != nil { 205 + return nil, err 206 + } 207 + defer rows.Close() 208 + 209 + var repos []RepoInfo 210 + for rows.Next() { 211 + var r RepoInfo 212 + if err := rows.Scan(&r.RepoDid, &r.OwnerDid, &r.RepoName); err != nil { 213 + return nil, err 214 + } 215 + repos = append(repos, r) 216 + } 217 + return repos, rows.Err() 198 218 } 199 219 200 220 func (d *DB) ResolveRepoDIDOnDisk(scanPath, repoDid string) (repoPath, ownerDid, repoName string, err error) {
-54
knotserver/db/pijulkeys.go
··· 1 - package db 2 - 3 - import "strings" 4 - 5 - // StorePijulSigningKey stores a verified pijul public key → DID mapping. 6 - func (d *DB) StorePijulSigningKey(publicKey, did string) error { 7 - _, err := d.db.Exec(` 8 - insert or replace into pijul_signing_keys (public_key, did) 9 - values (?, ?) 10 - `, publicKey, did) 11 - return err 12 - } 13 - 14 - // GetDidForPijulKey returns the DID associated with a pijul public key. 15 - func (d *DB) GetDidForPijulKey(publicKey string) (string, error) { 16 - var did string 17 - err := d.db.QueryRow(` 18 - select did from pijul_signing_keys where public_key = ? 19 - `, publicKey).Scan(&did) 20 - return did, err 21 - } 22 - 23 - // GetPijulKeyToDid returns a map of pijul public key → DID for the given keys. 24 - func (d *DB) GetPijulKeyToDid(keys []string) (map[string]string, error) { 25 - if len(keys) == 0 { 26 - return make(map[string]string), nil 27 - } 28 - 29 - placeholders := make([]string, len(keys)) 30 - args := make([]any, len(keys)) 31 - for i, k := range keys { 32 - placeholders[i] = "?" 33 - args[i] = k 34 - } 35 - 36 - rows, err := d.db.Query(` 37 - select public_key, did from pijul_signing_keys 38 - where public_key in (`+strings.Join(placeholders, ",")+`) 39 - `, args...) 40 - if err != nil { 41 - return nil, err 42 - } 43 - defer rows.Close() 44 - 45 - result := make(map[string]string) 46 - for rows.Next() { 47 - var key, did string 48 - if err := rows.Scan(&key, &did); err != nil { 49 - return nil, err 50 - } 51 - result[key] = did 52 - } 53 - return result, rows.Err() 54 - }
+73 -14
knotserver/internal.go
··· 20 20 "tangled.org/core/knotserver/config" 21 21 "tangled.org/core/knotserver/db" 22 22 "tangled.org/core/knotserver/git" 23 + "tangled.org/core/knotserver/pijul" 23 24 "tangled.org/core/log" 24 25 "tangled.org/core/notifier" 25 26 "tangled.org/core/rbac" ··· 487 488 return nil 488 489 } 489 490 490 - // PijulProve stores a verified pijul public key → DID mapping. 491 - // Called by the guard after a successful prove challenge-response. 492 - func (h *InternalHandle) PijulProve(w http.ResponseWriter, r *http.Request) { 493 - l := h.l.With("handler", "PijulProve") 491 + // PijulPostApplyHook handles POST /hooks/pijul-post-apply. 492 + // Called by the guard after a successful pijul push (SSH or HTTP). 493 + // Verifies change author DIDs and emits PijulRefUpdate events. 494 + func (h *InternalHandle) PijulPostApplyHook(w http.ResponseWriter, r *http.Request) { 495 + l := h.l.With("handler", "PijulPostApplyHook") 494 496 495 497 var req struct { 496 - PublicKey string `json:"public_key"` 497 - Did string `json:"did"` 498 + RepoPath string `json:"repo_path"` 499 + UserDid string `json:"user_did"` 500 + Changes []string `json:"changes"` 501 + Channel string `json:"channel,omitempty"` 498 502 } 499 503 if err := json.NewDecoder(r.Body).Decode(&req); err != nil { 500 504 l.Error("failed to decode request", "err", err) 501 505 w.WriteHeader(http.StatusBadRequest) 502 - fmt.Fprintln(w, "invalid request body") 506 + return 507 + } 508 + 509 + if req.RepoPath == "" || req.UserDid == "" || len(req.Changes) == 0 { 510 + w.WriteHeader(http.StatusBadRequest) 511 + fmt.Fprintln(w, "repo_path, user_did, and changes are required") 503 512 return 504 513 } 505 514 506 - if req.PublicKey == "" || req.Did == "" { 515 + // Resolve repo metadata from the on-disk path. 516 + repoRelPath, err := filepath.Rel(h.c.Repo.ScanPath, req.RepoPath) 517 + if err != nil { 518 + l.Error("failed to compute relative repo path", "err", err) 519 + w.WriteHeader(http.StatusBadRequest) 520 + return 521 + } 522 + parts := strings.SplitN(repoRelPath, "/", 2) 523 + if len(parts) != 2 { 524 + l.Error("invalid repo path structure", "path", repoRelPath) 507 525 w.WriteHeader(http.StatusBadRequest) 508 - fmt.Fprintln(w, "missing public_key or did") 509 526 return 510 527 } 528 + ownerDid := parts[0] 529 + repoName := parts[1] 530 + repoDid, dbErr := h.db.GetRepoDid(ownerDid, repoName) 531 + if dbErr != nil { 532 + repoDid = ownerDid + "/" + repoName 533 + } 511 534 512 - if err := h.db.StorePijulSigningKey(req.PublicKey, req.Did); err != nil { 513 - l.Error("failed to store pijul signing key", "err", err, "did", req.Did) 535 + // Open repo to get channel state. 536 + channel := req.Channel 537 + if channel == "" { 538 + channel = "main" 539 + } 540 + pr, err := pijul.Open(req.RepoPath, channel) 541 + if err != nil { 542 + l.Error("failed to open pijul repo", "err", err) 514 543 w.WriteHeader(http.StatusInternalServerError) 515 - fmt.Fprintln(w, "failed to store signing key") 544 + return 545 + } 546 + 547 + // Get channel state and emit event. 548 + newState, _ := pr.ChannelState(channel) 549 + repoAt := fmt.Sprintf("at://%s/%s/%s", repoDid, tangled.RepoNSID, repoName) 550 + 551 + record := tangled.PijulRefUpdate{ 552 + Repo: repoAt, 553 + Channel: channel, 554 + Changes: req.Changes, 555 + CommitterDid: req.UserDid, 556 + NewState: newState, 557 + } 558 + 559 + eventJson, err := json.Marshal(record) 560 + if err != nil { 561 + l.Error("failed to marshal pijulRefUpdate event", "err", err) 562 + w.WriteHeader(http.StatusInternalServerError) 563 + return 564 + } 565 + 566 + event := db.Event{ 567 + Rkey: TID(), 568 + Nsid: tangled.PijulRefUpdateNSID, 569 + EventJson: string(eventJson), 570 + } 571 + 572 + if err := h.db.InsertEvent(event, h.n); err != nil { 573 + l.Error("failed to insert pijulRefUpdate event", "err", err) 574 + w.WriteHeader(http.StatusInternalServerError) 516 575 return 517 576 } 518 577 519 - l.Info("stored pijul signing key", "did", req.Did, "public_key", req.PublicKey) 578 + l.Info("pijul post-apply hook completed", "repo", repoAt, "changes", len(req.Changes), "committer", req.UserDid) 520 579 w.WriteHeader(http.StatusOK) 521 580 } 522 581 ··· 539 598 r.Get("/keys", h.InternalKeys) 540 599 r.Get("/guard", h.Guard) 541 600 r.Post("/hooks/post-receive", h.PostReceiveHook) 542 - r.Post("/pijul-prove", h.PijulProve) 601 + r.Post("/hooks/pijul-post-apply", h.PijulPostApplyHook) 543 602 r.Mount("/debug", middleware.Profiler()) 544 603 545 604 return r
+30 -2
knotserver/pijul/change.go
··· 251 251 // DID in hashed [[authors]]: did = 'did:plc:...' 252 252 if !inIdentity && strings.HasPrefix(line, "did = ") { 253 253 did := unquoteTOMLValue(strings.TrimPrefix(line, "did = ")) 254 - change.Authors = append(change.Authors, Author{DID: did}) 254 + if len(change.Authors) > 0 { 255 + change.Authors[len(change.Authors)-1].DID = did 256 + } else { 257 + change.Authors = append(change.Authors, Author{DID: did}) 258 + } 255 259 continue 256 260 } 257 261 ··· 311 315 return change, scanner.Err() 312 316 } 313 317 318 + // ExtractAuthorDID returns the DID from a change's [[authors]] section. 319 + // Returns empty string if the change has no DID. 320 + func (p *PijulRepo) ExtractAuthorDID(hash string) (string, error) { 321 + output, err := p.change(hash) 322 + if err != nil { 323 + return "", fmt.Errorf("pijul change %s: %w", hash, err) 324 + } 325 + change, err := parseChangeOutput(hash, output) 326 + if err != nil { 327 + return "", err 328 + } 329 + for _, a := range change.Authors { 330 + if a.DID != "" { 331 + return a.DID, nil 332 + } 333 + } 334 + return "", nil 335 + } 336 + 314 337 // unquoteTOMLValue strips surrounding single or double quotes from a TOML value. 315 338 func unquoteTOMLValue(s string) string { 316 339 s = strings.TrimSpace(s) ··· 322 345 return s 323 346 } 324 347 325 - // parseAuthor parses an author string like "Name <email>" 348 + // parseAuthor parses an author string like "Name <email>" or a DID like "did:web:example.com" 326 349 func parseAuthor(s string) Author { 327 350 s = strings.TrimSpace(s) 351 + 352 + // DID as author (pijul records DID directly in [[authors]]) 353 + if strings.HasPrefix(s, "did:") { 354 + return Author{DID: s} 355 + } 328 356 329 357 // Try to extract email from angle brackets 330 358 if start := strings.Index(s, "<"); start != -1 {
+2 -12
knotserver/pijul/repo.go
··· 76 76 return err 77 77 } 78 78 79 - // Unrecord removes a change from the channel and resets the working copy 80 - // to match (like git reset --hard). 79 + // Unrecord removes a change from the channel. 81 80 func (p *PijulRepo) Unrecord(changeHash string) error { 82 81 args := []string{changeHash} 83 82 if p.channelName != "" { 84 83 args = append(args, "--channel", p.channelName) 85 84 } 86 - if _, err := p.runPijulCmd("unrecord", args...); err != nil { 87 - return err 88 - } 89 - 90 - // Reset working copy to match the channel state 91 - resetArgs := []string{} 92 - if p.channelName != "" { 93 - resetArgs = append(resetArgs, "--channel", p.channelName) 94 - } 95 - _, err := p.runPijulCmd("reset", resetArgs...) 85 + _, err := p.runPijulCmd("unrecord", args...) 96 86 return err 97 87 } 98 88
+1 -1
knotserver/pijul_http.go
··· 241 241 return 242 242 } 243 243 244 - // 5. Get channel state and emit SSE event (best-effort). 244 + // 6. Get channel state and emit SSE event (best-effort). 245 245 newState, _ := pr.ChannelState(channel) 246 246 go h.emitPijulRefUpdate(pusherDid, ownerDid, repoName, repoDid, channel, hash, newState) 247 247
+296
knotserver/scheduler.go
··· 1 + package knotserver 2 + 3 + import ( 4 + "context" 5 + "encoding/json" 6 + "fmt" 7 + "log/slog" 8 + "path/filepath" 9 + "sync" 10 + "time" 11 + 12 + "github.com/robfig/cron/v3" 13 + "tangled.org/core/api/tangled" 14 + "tangled.org/core/knotserver/config" 15 + "tangled.org/core/knotserver/db" 16 + "tangled.org/core/knotserver/git" 17 + "tangled.org/core/notifier" 18 + "tangled.org/core/workflow" 19 + ) 20 + 21 + // Scheduler manages cron-based pipeline triggers for repos that define 22 + // schedule events in their workflow files. 23 + type Scheduler struct { 24 + cron *cron.Cron 25 + db *db.DB 26 + cfg *config.Config 27 + l *slog.Logger 28 + n *notifier.Notifier 29 + 30 + // tracks registered cron entries per repo+workflow+cron to avoid duplicates 31 + mu sync.Mutex 32 + entries map[string]cron.EntryID // key: "repoDid:workflowName:cronExpr" 33 + lastScan map[string]string // key: "repoDid:workflowName", value: cronExpr 34 + } 35 + 36 + // NewScheduler creates a new cron scheduler for pipeline triggers. 37 + func NewScheduler(d *db.DB, cfg *config.Config, l *slog.Logger, n *notifier.Notifier) *Scheduler { 38 + return &Scheduler{ 39 + cron: cron.New(), 40 + db: d, 41 + cfg: cfg, 42 + l: l, 43 + n: n, 44 + entries: make(map[string]cron.EntryID), 45 + lastScan: make(map[string]string), 46 + } 47 + } 48 + 49 + // Start begins the scheduler. It performs an initial scan of all repos, 50 + // then rescans periodically to pick up new or changed schedule definitions. 51 + func (s *Scheduler) Start(ctx context.Context) { 52 + s.l.Info("starting cron scheduler") 53 + 54 + s.scanRepos() 55 + s.cron.Start() 56 + 57 + ticker := time.NewTicker(5 * time.Minute) 58 + defer ticker.Stop() 59 + 60 + for { 61 + select { 62 + case <-ctx.Done(): 63 + s.l.Info("stopping cron scheduler") 64 + s.cron.Stop() 65 + return 66 + case <-ticker.C: 67 + s.scanRepos() 68 + } 69 + } 70 + } 71 + 72 + // scanRepos iterates all repos and registers/updates cron entries for 73 + // workflows that have schedule event constraints. 74 + func (s *Scheduler) scanRepos() { 75 + repos, err := s.db.ListAllRepos() 76 + if err != nil { 77 + s.l.Error("scheduler: failed to list repos", "error", err) 78 + return 79 + } 80 + 81 + s.mu.Lock() 82 + defer s.mu.Unlock() 83 + 84 + // Track which entries are still active after this scan 85 + activeKeys := make(map[string]bool) 86 + 87 + for _, repo := range repos { 88 + schedules := s.extractSchedules(repo) 89 + for _, sched := range schedules { 90 + entryKey := fmt.Sprintf("%s:%s:%s", repo.RepoDid, sched.workflowName, sched.cronExpr) 91 + activeKeys[entryKey] = true 92 + 93 + // Already registered with same cron expression 94 + if _, exists := s.entries[entryKey]; exists { 95 + continue 96 + } 97 + 98 + // Remove old entry for this workflow if cron changed 99 + scanKey := fmt.Sprintf("%s:%s", repo.RepoDid, sched.workflowName) 100 + if oldExpr, ok := s.lastScan[scanKey]; ok && oldExpr != sched.cronExpr { 101 + oldKey := fmt.Sprintf("%s:%s:%s", repo.RepoDid, sched.workflowName, oldExpr) 102 + if oldID, exists := s.entries[oldKey]; exists { 103 + s.cron.Remove(oldID) 104 + delete(s.entries, oldKey) 105 + } 106 + } 107 + 108 + // Register new cron entry 109 + repoInfo := repo 110 + cronExpr := sched.cronExpr 111 + entryID, err := s.cron.AddFunc(cronExpr, func() { 112 + s.triggerPipeline(repoInfo, cronExpr) 113 + }) 114 + if err != nil { 115 + s.l.Error("scheduler: invalid cron expression", 116 + "repo", repo.RepoName, 117 + "owner", repo.OwnerDid, 118 + "cron", cronExpr, 119 + "error", err, 120 + ) 121 + continue 122 + } 123 + 124 + s.entries[entryKey] = entryID 125 + s.lastScan[scanKey] = cronExpr 126 + s.l.Info("scheduler: registered cron", 127 + "repo", repo.RepoName, 128 + "owner", repo.OwnerDid, 129 + "cron", cronExpr, 130 + "workflow", sched.workflowName, 131 + ) 132 + } 133 + } 134 + 135 + // Remove entries for repos/workflows that no longer have schedules 136 + for key, entryID := range s.entries { 137 + if !activeKeys[key] { 138 + s.cron.Remove(entryID) 139 + delete(s.entries, key) 140 + s.l.Info("scheduler: removed stale cron", "key", key) 141 + } 142 + } 143 + } 144 + 145 + type scheduleInfo struct { 146 + workflowName string 147 + cronExpr string 148 + } 149 + 150 + // extractSchedules reads workflow files from a repo and returns any 151 + // schedule event constraints with their cron expressions. 152 + func (s *Scheduler) extractSchedules(repo db.RepoInfo) []scheduleInfo { 153 + repoPath, _, _, err := s.db.ResolveRepoDIDOnDisk(s.cfg.Repo.ScanPath, repo.RepoDid) 154 + if err != nil { 155 + s.l.Debug("scheduler: cannot resolve repo on disk", "repoDid", repo.RepoDid, "error", err) 156 + return nil 157 + } 158 + 159 + gr, err := git.Open(repoPath, "") 160 + if err != nil { 161 + s.l.Debug("scheduler: cannot open repo", "repoDid", repo.RepoDid, "error", err) 162 + return nil 163 + } 164 + 165 + workflowDir, err := gr.FileTree(context.Background(), workflow.WorkflowDir) 166 + if err != nil { 167 + // No workflow directory is normal for most repos 168 + return nil 169 + } 170 + 171 + var schedules []scheduleInfo 172 + for _, entry := range workflowDir { 173 + if !entry.IsFile() { 174 + continue 175 + } 176 + 177 + fpath := filepath.Join(workflow.WorkflowDir, entry.Name) 178 + contents, err := gr.RawContent(fpath) 179 + if err != nil { 180 + continue 181 + } 182 + 183 + wf, err := workflow.FromFile(entry.Name, contents) 184 + if err != nil { 185 + continue 186 + } 187 + 188 + for _, constraint := range wf.When { 189 + if !constraint.MatchEvent(string(workflow.TriggerKindSchedule)) { 190 + continue 191 + } 192 + for _, cronExpr := range constraint.Cron { 193 + schedules = append(schedules, scheduleInfo{ 194 + workflowName: entry.Name, 195 + cronExpr: cronExpr, 196 + }) 197 + } 198 + } 199 + } 200 + 201 + return schedules 202 + } 203 + 204 + // triggerPipeline creates a pipeline event for a scheduled workflow run, 205 + // following the same pattern as push/PR triggers in internal.go. 206 + func (s *Scheduler) triggerPipeline(repo db.RepoInfo, cronExpr string) { 207 + repoPath, _, _, err := s.db.ResolveRepoDIDOnDisk(s.cfg.Repo.ScanPath, repo.RepoDid) 208 + if err != nil { 209 + s.l.Error("scheduler: cannot resolve repo for trigger", "repoDid", repo.RepoDid, "error", err) 210 + return 211 + } 212 + 213 + gr, err := git.Open(repoPath, "") 214 + if err != nil { 215 + s.l.Error("scheduler: cannot open repo for trigger", "repoDid", repo.RepoDid, "error", err) 216 + return 217 + } 218 + 219 + defaultBranch, err := gr.FindMainBranch() 220 + if err != nil { 221 + s.l.Error("scheduler: cannot find default branch", "repoDid", repo.RepoDid, "error", err) 222 + return 223 + } 224 + 225 + workflowDir, err := gr.FileTree(context.Background(), workflow.WorkflowDir) 226 + if err != nil { 227 + s.l.Error("scheduler: cannot read workflow dir", "repoDid", repo.RepoDid, "error", err) 228 + return 229 + } 230 + 231 + var pipeline workflow.RawPipeline 232 + for _, entry := range workflowDir { 233 + if !entry.IsFile() { 234 + continue 235 + } 236 + 237 + fpath := filepath.Join(workflow.WorkflowDir, entry.Name) 238 + contents, err := gr.RawContent(fpath) 239 + if err != nil { 240 + continue 241 + } 242 + 243 + pipeline = append(pipeline, workflow.RawWorkflow{ 244 + Name: entry.Name, 245 + Contents: contents, 246 + }) 247 + } 248 + 249 + triggerRepo := &tangled.Pipeline_TriggerRepo{ 250 + Did: repo.OwnerDid, 251 + Knot: s.cfg.Server.Hostname, 252 + Repo: &repo.RepoName, 253 + RepoDid: &repo.RepoDid, 254 + DefaultBranch: defaultBranch, 255 + } 256 + 257 + compiler := workflow.Compiler{ 258 + Trigger: tangled.Pipeline_TriggerMetadata{ 259 + Kind: string(workflow.TriggerKindSchedule), 260 + Schedule: &tangled.Pipeline_ScheduleTriggerData{ 261 + Cron: cronExpr, 262 + }, 263 + Repo: triggerRepo, 264 + }, 265 + } 266 + 267 + cp := compiler.Compile(compiler.Parse(pipeline)) 268 + 269 + // Do not run empty pipelines 270 + if cp.Workflows == nil { 271 + return 272 + } 273 + 274 + eventJson, err := json.Marshal(cp) 275 + if err != nil { 276 + s.l.Error("scheduler: failed to marshal pipeline", "error", err) 277 + return 278 + } 279 + 280 + event := db.Event{ 281 + Rkey: TID(), 282 + Nsid: tangled.PipelineNSID, 283 + EventJson: string(eventJson), 284 + } 285 + 286 + if err := s.db.InsertEvent(event, s.n); err != nil { 287 + s.l.Error("scheduler: failed to insert pipeline event", "error", err) 288 + return 289 + } 290 + 291 + s.l.Info("scheduler: triggered pipeline", 292 + "repo", repo.RepoName, 293 + "owner", repo.OwnerDid, 294 + "cron", cronExpr, 295 + ) 296 + }
+4
knotserver/server.go
··· 91 91 92 92 go migrateReposOnStartup(ctx, c, db, e, &notifier, log.SubLogger(logger, "migrate")) 93 93 94 + // Start the cron scheduler for scheduled pipeline triggers 95 + sched := NewScheduler(db, c, log.SubLogger(logger, "scheduler"), &notifier) 96 + go sched.Start(ctx) 97 + 94 98 mux, err := Setup(ctx, c, db, e, jc, &notifier) 95 99 if err != nil { 96 100 return fmt.Errorf("failed to setup server: %w", err)
+95
knotserver/vcs/language.go
··· 1 + package vcs 2 + 3 + import ( 4 + "context" 5 + "path" 6 + "strings" 7 + 8 + "github.com/go-enry/go-enry/v2" 9 + ) 10 + 11 + // LangBreakdown maps language names to byte sizes. 12 + type LangBreakdown map[string]int64 13 + 14 + // AnalyzeLanguages walks the repository file tree and returns a breakdown 15 + // of programming languages by byte size. Works with any VCS backend. 16 + func AnalyzeLanguages(ctx context.Context, repo ReadRepo) (LangBreakdown, error) { 17 + sizes := make(map[string]int64) 18 + 19 + var walk func(dir string) error 20 + walk = func(dir string) error { 21 + if ctx.Err() != nil { 22 + return ctx.Err() 23 + } 24 + 25 + entries, err := repo.FileTree(ctx, dir) 26 + if err != nil { 27 + return err 28 + } 29 + 30 + for _, entry := range entries { 31 + filepath := path.Join(dir, entry.Name) 32 + 33 + if entry.Mode == "dir" || entry.Mode == "040000" { 34 + if err := walk(filepath); err != nil { 35 + return err 36 + } 37 + continue 38 + } 39 + 40 + content, err := repo.FileContentN(filepath, 16*1024) 41 + if err != nil { 42 + continue // skip unreadable/binary files 43 + } 44 + 45 + if enry.IsGenerated(filepath, content) || 46 + enry.IsBinary(content) || 47 + strings.HasSuffix(filepath, "bun.lock") { 48 + continue 49 + } 50 + 51 + language := detectLanguage(entry.Name, content) 52 + if group := enry.GetLanguageGroup(language); group != "" { 53 + language = group 54 + } 55 + 56 + langType := enry.GetLanguageType(language) 57 + if langType != enry.Programming && langType != enry.Markup && langType != enry.Unknown { 58 + continue 59 + } 60 + 61 + sizes[language] += entry.Size 62 + 63 + // If size is 0 (some VCS don't report size in tree), estimate from content 64 + if entry.Size == 0 && len(content) > 0 { 65 + sizes[language] += int64(len(content)) 66 + } 67 + } 68 + 69 + return nil 70 + } 71 + 72 + if err := walk(""); err != nil { 73 + return nil, err 74 + } 75 + 76 + return sizes, nil 77 + } 78 + 79 + func detectLanguage(name string, content []byte) string { 80 + language, ok := enry.GetLanguageByExtension(name) 81 + if ok { 82 + return language 83 + } 84 + 85 + language, ok = enry.GetLanguageByFilename(name) 86 + if ok { 87 + return language 88 + } 89 + 90 + if len(content) == 0 { 91 + return enry.OtherLanguage 92 + } 93 + 94 + return enry.GetLanguage(name, content) 95 + }
+4 -20
knotserver/xrpc/pijul_changes.go
··· 91 91 return 92 92 } 93 93 94 - // Collect all author keys to resolve to DIDs 95 - authorKeys := make([]string, 0, len(entries)) 96 - for _, e := range entries { 97 - if e.Author.Name != "" { 98 - authorKeys = append(authorKeys, e.Author.Name) 99 - } 100 - } 101 - keyToDid, err := x.Db.GetPijulKeyToDid(authorKeys) 102 - if err != nil { 103 - x.Logger.Warn("failed to resolve pijul keys to DIDs", "error", err) 104 - keyToDid = make(map[string]string) 105 - } 106 - 107 - // Convert to response format 94 + // Convert to response format. 95 + // DIDs are embedded directly in pijul change files by the record command. 108 96 changeEntries := make([]PijulChangeEntry, len(entries)) 109 97 for i, e := range entries { 110 98 author := PijulAuthor{ 111 99 Name: e.Author.Name, 112 100 Email: e.Author.Email, 113 - } 114 - if did, ok := keyToDid[e.Author.Name]; ok { 115 - author.Did = did 101 + Did: e.Author.DID, 116 102 } 117 103 118 104 changeEntries[i] = PijulChangeEntry{ ··· 189 175 author := PijulAuthor{ 190 176 Name: entry.Author.Name, 191 177 Email: entry.Author.Email, 192 - } 193 - if did, err := x.Db.GetDidForPijulKey(entry.Author.Name); err == nil { 194 - author.Did = did 178 + Did: entry.Author.DID, 195 179 } 196 180 197 181 response := PijulChangeGetResponse{
+2 -1
knotserver/xrpc/pijul_unrecord.go
··· 116 116 } 117 117 118 118 if len(response.Failed) > 0 && len(response.Unrecorded) == 0 { 119 + msg := response.Failed[0].Error 119 120 writeError(w, xrpcerr.NewXrpcError( 120 121 xrpcerr.WithTag("UnrecordFailed"), 121 - xrpcerr.WithMessage("all changes failed to unrecord"), 122 + xrpcerr.WithMessage(msg), 122 123 ), http.StatusInternalServerError) 123 124 return 124 125 }
+4 -4
knotserver/xrpc/repo_languages.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/api/tangled" 10 - "tangled.org/core/knotserver/git" 10 + "tangled.org/core/knotserver/vcs" 11 11 xrpcerr "tangled.org/core/xrpc/errors" 12 12 ) 13 13 ··· 21 21 22 22 ref := r.URL.Query().Get("ref") 23 23 24 - gr, err := git.Open(repoPath, ref) 24 + rv, err := vcs.Open(repoPath, ref) 25 25 if err != nil { 26 26 x.Logger.Error("opening repo", "error", err.Error()) 27 27 writeError(w, xrpcerr.RefNotFoundError, http.StatusNotFound) 28 28 return 29 29 } 30 30 31 - ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second) 31 + ctx, cancel := context.WithTimeout(r.Context(), 2*time.Second) 32 32 defer cancel() 33 33 34 - sizes, err := gr.AnalyzeLanguages(ctx) 34 + sizes, err := vcs.AnalyzeLanguages(ctx, rv) 35 35 if err != nil { 36 36 x.Logger.Error("failed to analyze languages", "error", err.Error()) 37 37 writeError(w, xrpcerr.NewXrpcError(
+18 -1
lexicons/pipeline/pipeline.json
··· 40 40 "enum": [ 41 41 "push", 42 42 "pull_request", 43 - "manual" 43 + "manual", 44 + "schedule" 44 45 ] 45 46 }, 46 47 "repo": { ··· 58 59 "manual": { 59 60 "type": "ref", 60 61 "ref": "#manualTriggerData" 62 + }, 63 + "schedule": { 64 + "type": "ref", 65 + "ref": "#scheduleTriggerData" 61 66 } 62 67 } 63 68 }, ··· 146 151 "type": "ref", 147 152 "ref": "#pair" 148 153 } 154 + } 155 + } 156 + }, 157 + "scheduleTriggerData": { 158 + "type": "object", 159 + "required": [ 160 + "cron" 161 + ], 162 + "properties": { 163 + "cron": { 164 + "type": "string", 165 + "description": "The cron expression that triggered this pipeline" 149 166 } 150 167 } 151 168 },
+3 -18
nix/gomod2nix.toml
··· 314 314 [mod."github.com/gogo/protobuf"] 315 315 version = "v1.3.2" 316 316 hash = "sha256-pogILFrrk+cAtb0ulqn9+gRZJ7sGnnLLdtqITvxvG6c=" 317 - [mod."github.com/goki/freetype"] 318 - version = "v1.0.5" 319 - hash = "sha256-8ILVMx5w1/nV88RZPoG45QJ0jH1YEPJGLpZQdBJFqIs=" 320 317 [mod."github.com/golang-jwt/jwt/v5"] 321 318 version = "v5.3.0" 322 319 hash = "sha256-VdN9Eo74ncMFJEVUSiJ1VRPMbC09FdVGno8wAReseXU=" ··· 569 566 [mod."github.com/rivo/uniseg"] 570 567 version = "v0.4.7" 571 568 hash = "sha256-rDcdNYH6ZD8KouyyiZCUEy8JrjOQoAkxHBhugrfHjFo=" 569 + [mod."github.com/robfig/cron/v3"] 570 + version = "v3.0.1" 571 + hash = "sha256-FUdqNbWYi5biQc/tjCeqzxu4iy4ot1ZvDU1M1wRf/6k=" 572 572 [mod."github.com/ryanuber/go-glob"] 573 573 version = "v1.0.0" 574 574 hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY=" ··· 654 654 [mod."go.opentelemetry.io/otel"] 655 655 version = "v1.40.0" 656 656 hash = "sha256-Cu9ZCLMAd9kGsmpnvoyqwm0IkF4Uk6Xo+8OsP9l+wUQ=" 657 - [mod."go.opentelemetry.io/otel/exporters/otlp/otlptrace"] 658 - version = "v1.40.0" 659 - hash = "sha256-eu4tFL7b8o9eyzchxAtU/UQX9hIZ5jq7Z67GRCZq8Kw=" 660 657 [mod."go.opentelemetry.io/otel/metric"] 661 658 version = "v1.40.0" 662 659 hash = "sha256-+84fJNAYQ0A5DzH1YjHXCo162GgB7r19PsTHiAP9C8k=" 663 660 [mod."go.opentelemetry.io/otel/trace"] 664 661 version = "v1.40.0" 665 662 hash = "sha256-oPA3DfQ5vXhlVkBPksu/kKYBSrssefq4vtnd4vm9K8w=" 666 - [mod."go.opentelemetry.io/proto/otlp"] 667 - version = "v1.9.0" 668 - hash = "sha256-qO+oKCbSRzyNv0jBpQTiHRaI50bLrWRyyvf6lYWvjPc=" 669 663 [mod."go.uber.org/atomic"] 670 664 version = "v1.11.0" 671 665 hash = "sha256-TyYws/cSPVqYNffFX0gbDml1bD4bBGcysrUWU7mHPIY=" ··· 705 699 [mod."golang.org/x/xerrors"] 706 700 version = "v0.0.0-20240903120638-7835f813f4da" 707 701 hash = "sha256-bE7CcrnAvryNvM26ieJGXqbAtuLwHaGcmtVMsVnksqo=" 708 - [mod."google.golang.org/genproto/googleapis/api"] 709 - version = "v0.0.0-20260209200024-4cfbd4190f57" 710 - hash = "sha256-2C7DZwLpDDdmUhVUcRDaotbtkhQFOQ9a1SsdVC8lOqc=" 711 - [mod."google.golang.org/genproto/googleapis/rpc"] 712 - version = "v0.0.0-20260209200024-4cfbd4190f57" 713 - hash = "sha256-gdgUw1LzgVOrarF1cGBUI9uoaR/d6lur2RwxUDKnOZA=" 714 - [mod."google.golang.org/grpc"] 715 - version = "v1.78.0" 716 - hash = "sha256-oKsu3+Eae5tpFOZ9K2ZzYh1FgdYdEnEIB1C+UIxSD+E=" 717 702 [mod."google.golang.org/protobuf"] 718 703 version = "v1.36.11" 719 704 hash = "sha256-7W+6jntfI/awWL3JP6yQedxqP5S9o3XvPgJ2XxxsIeE="
+5
spindle/models/clone.go
··· 96 96 // TODO: Implement manual trigger SHA resolution (fetch default branch HEAD) 97 97 return "", nil 98 98 99 + case workflow.TriggerKindSchedule: 100 + // Schedule triggers run against the default branch HEAD 101 + // Return empty string to fetch the latest commit 102 + return "", nil 103 + 99 104 default: 100 105 return "", fmt.Errorf("unknown trigger kind: %s", tr.Kind) 101 106 }
+11
spindle/models/pipeline_env.go
··· 76 76 env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value 77 77 } 78 78 } 79 + 80 + case workflow.TriggerKindSchedule: 81 + // Schedule triggers run against the default branch 82 + if tr.Repo != nil { 83 + env["TANGLED_REF"] = "refs/heads/" + tr.Repo.DefaultBranch 84 + env["TANGLED_REF_NAME"] = tr.Repo.DefaultBranch 85 + env["TANGLED_REF_TYPE"] = "branch" 86 + } 87 + if tr.Schedule != nil { 88 + env["TANGLED_SCHEDULE_CRON"] = tr.Schedule.Cron 89 + } 79 90 } 80 91 81 92 return env
+6 -4
workflow/def.go
··· 36 36 Event StringList `yaml:"event"` 37 37 Branch StringList `yaml:"branch"` // required for pull_request; for push, either branch or tag must be specified 38 38 Tag StringList `yaml:"tag"` // optional; only applies to push events 39 + Cron StringList `yaml:"cron"` // optional; only applies to schedule events 39 40 } 40 41 41 42 CloneOpts struct { ··· 55 56 TriggerKindPush TriggerKind = "push" 56 57 TriggerKindPullRequest TriggerKind = "pull_request" 57 58 TriggerKindManual TriggerKind = "manual" 59 + TriggerKindSchedule TriggerKind = "schedule" 58 60 ) 59 61 60 62 func (t TriggerKind) String() string { ··· 94 96 95 97 // if any of the constraints on a workflow is true, return true 96 98 func (w *Workflow) Match(trigger tangled.Pipeline_TriggerMetadata) (bool, error) { 97 - // manual triggers always run the workflow 98 - if trigger.Manual != nil { 99 + // manual and schedule triggers always run the workflow 100 + if trigger.Manual != nil || trigger.Schedule != nil { 99 101 return true, nil 100 102 } 101 103 ··· 121 123 func (c *Constraint) Match(trigger tangled.Pipeline_TriggerMetadata) (bool, error) { 122 124 match := true 123 125 124 - // manual triggers always pass this constraint 125 - if trigger.Manual != nil { 126 + // manual and schedule triggers always pass this constraint 127 + if trigger.Manual != nil || trigger.Schedule != nil { 126 128 return true, nil 127 129 } 128 130