A vibe coded tangled fork which supports pijul.

wip: atprotate pulls

Signed-off-by: oppiliappan <me@oppi.li>

+1663 -716
+276 -128
api/tangled/cbor_gen.go
··· 7983 7983 fieldCount-- 7984 7984 } 7985 7985 7986 - if t.Mentions == nil { 7986 + if t.DependentOn == nil { 7987 7987 fieldCount-- 7988 7988 } 7989 7989 7990 - if t.Patch == nil { 7990 + if t.Mentions == nil { 7991 7991 fieldCount-- 7992 7992 } 7993 7993 7994 7994 if t.References == nil { 7995 + fieldCount-- 7996 + } 7997 + 7998 + if t.Rounds == nil { 7995 7999 fieldCount-- 7996 8000 } 7997 8001 ··· 8054 8058 return err 8055 8059 } 8056 8060 8057 - // t.Patch (string) (string) 8058 - if t.Patch != nil { 8059 - 8060 - if len("patch") > 1000000 { 8061 - return xerrors.Errorf("Value in field \"patch\" was too long") 8062 - } 8063 - 8064 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 8065 - return err 8066 - } 8067 - if _, err := cw.WriteString(string("patch")); err != nil { 8068 - return err 8069 - } 8070 - 8071 - if t.Patch == nil { 8072 - if _, err := cw.Write(cbg.CborNull); err != nil { 8073 - return err 8074 - } 8075 - } else { 8076 - if len(*t.Patch) > 1000000 { 8077 - return xerrors.Errorf("Value in field t.Patch was too long") 8078 - } 8079 - 8080 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil { 8081 - return err 8082 - } 8083 - if _, err := cw.WriteString(string(*t.Patch)); err != nil { 8084 - return err 8085 - } 8086 - } 8087 - } 8088 - 8089 8061 // t.Title (string) (string) 8090 8062 if len("title") > 1000000 { 8091 8063 return xerrors.Errorf("Value in field \"title\" was too long") ··· 8109 8081 return err 8110 8082 } 8111 8083 8084 + // t.Rounds ([]*tangled.RepoPull_Round) (slice) 8085 + if t.Rounds != nil { 8086 + 8087 + if len("rounds") > 1000000 { 8088 + return xerrors.Errorf("Value in field \"rounds\" was too long") 8089 + } 8090 + 8091 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("rounds"))); err != nil { 8092 + return err 8093 + } 8094 + if _, err := cw.WriteString(string("rounds")); err != nil { 8095 + return err 8096 + } 8097 + 8098 + if len(t.Rounds) > 8192 { 8099 + return xerrors.Errorf("Slice value in field t.Rounds was too long") 8100 + } 8101 + 8102 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Rounds))); err != nil { 8103 + return err 8104 + } 8105 + for _, v := range t.Rounds { 8106 + if err := v.MarshalCBOR(cw); err != nil { 8107 + return err 8108 + } 8109 + 8110 + } 8111 + } 8112 + 8112 8113 // t.Source (tangled.RepoPull_Source) (struct) 8113 8114 if t.Source != nil { 8114 8115 ··· 8203 8204 return err 8204 8205 } 8205 8206 8206 - // t.PatchBlob (util.LexBlob) (struct) 8207 - if len("patchBlob") > 1000000 { 8208 - return xerrors.Errorf("Value in field \"patchBlob\" was too long") 8209 - } 8210 - 8211 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil { 8212 - return err 8213 - } 8214 - if _, err := cw.WriteString(string("patchBlob")); err != nil { 8215 - return err 8216 - } 8217 - 8218 - if err := t.PatchBlob.MarshalCBOR(cw); err != nil { 8219 - return err 8220 - } 8221 - 8222 8207 // t.References ([]string) (slice) 8223 8208 if t.References != nil { 8224 8209 ··· 8254 8239 8255 8240 } 8256 8241 } 8242 + 8243 + // t.DependentOn (string) (string) 8244 + if t.DependentOn != nil { 8245 + 8246 + if len("dependentOn") > 1000000 { 8247 + return xerrors.Errorf("Value in field \"dependentOn\" was too long") 8248 + } 8249 + 8250 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependentOn"))); err != nil { 8251 + return err 8252 + } 8253 + if _, err := cw.WriteString(string("dependentOn")); err != nil { 8254 + return err 8255 + } 8256 + 8257 + if t.DependentOn == nil { 8258 + if _, err := cw.Write(cbg.CborNull); err != nil { 8259 + return err 8260 + } 8261 + } else { 8262 + if len(*t.DependentOn) > 1000000 { 8263 + return xerrors.Errorf("Value in field t.DependentOn was too long") 8264 + } 8265 + 8266 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.DependentOn))); err != nil { 8267 + return err 8268 + } 8269 + if _, err := cw.WriteString(string(*t.DependentOn)); err != nil { 8270 + return err 8271 + } 8272 + } 8273 + } 8257 8274 return nil 8258 8275 } 8259 8276 ··· 8282 8299 8283 8300 n := extra 8284 8301 8285 - nameBuf := make([]byte, 10) 8302 + nameBuf := make([]byte, 11) 8286 8303 for i := uint64(0); i < n; i++ { 8287 8304 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 8288 8305 if err != nil { ··· 8330 8347 8331 8348 t.LexiconTypeID = string(sval) 8332 8349 } 8333 - // t.Patch (string) (string) 8334 - case "patch": 8350 + // t.Title (string) (string) 8351 + case "title": 8335 8352 8336 8353 { 8337 - b, err := cr.ReadByte() 8354 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8338 8355 if err != nil { 8339 8356 return err 8340 8357 } 8341 - if b != cbg.CborNull[0] { 8342 - if err := cr.UnreadByte(); err != nil { 8343 - return err 8344 - } 8358 + 8359 + t.Title = string(sval) 8360 + } 8361 + // t.Rounds ([]*tangled.RepoPull_Round) (slice) 8362 + case "rounds": 8363 + 8364 + maj, extra, err = cr.ReadHeader() 8365 + if err != nil { 8366 + return err 8367 + } 8368 + 8369 + if extra > 8192 { 8370 + return fmt.Errorf("t.Rounds: array too large (%d)", extra) 8371 + } 8345 8372 8346 - sval, err := cbg.ReadStringWithMax(cr, 1000000) 8347 - if err != nil { 8348 - return err 8349 - } 8373 + if maj != cbg.MajArray { 8374 + return fmt.Errorf("expected cbor array") 8375 + } 8350 8376 8351 - t.Patch = (*string)(&sval) 8352 - } 8377 + if extra > 0 { 8378 + t.Rounds = make([]*RepoPull_Round, extra) 8353 8379 } 8354 - // t.Title (string) (string) 8355 - case "title": 8380 + 8381 + for i := 0; i < int(extra); i++ { 8382 + { 8383 + var maj byte 8384 + var extra uint64 8385 + var err error 8386 + _ = maj 8387 + _ = extra 8388 + _ = err 8389 + 8390 + { 8391 + 8392 + b, err := cr.ReadByte() 8393 + if err != nil { 8394 + return err 8395 + } 8396 + if b != cbg.CborNull[0] { 8397 + if err := cr.UnreadByte(); err != nil { 8398 + return err 8399 + } 8400 + t.Rounds[i] = new(RepoPull_Round) 8401 + if err := t.Rounds[i].UnmarshalCBOR(cr); err != nil { 8402 + return xerrors.Errorf("unmarshaling t.Rounds[i] pointer: %w", err) 8403 + } 8404 + } 8405 + 8406 + } 8356 8407 8357 - { 8358 - sval, err := cbg.ReadStringWithMax(cr, 1000000) 8359 - if err != nil { 8360 - return err 8361 8408 } 8362 - 8363 - t.Title = string(sval) 8364 8409 } 8365 8410 // t.Source (tangled.RepoPull_Source) (struct) 8366 8411 case "source": ··· 8453 8498 8454 8499 t.CreatedAt = string(sval) 8455 8500 } 8456 - // t.PatchBlob (util.LexBlob) (struct) 8457 - case "patchBlob": 8458 - 8459 - { 8460 - 8461 - b, err := cr.ReadByte() 8462 - if err != nil { 8463 - return err 8464 - } 8465 - if b != cbg.CborNull[0] { 8466 - if err := cr.UnreadByte(); err != nil { 8467 - return err 8468 - } 8469 - t.PatchBlob = new(util.LexBlob) 8470 - if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil { 8471 - return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err) 8472 - } 8473 - } 8474 - 8475 - } 8476 8501 // t.References ([]string) (slice) 8477 8502 case "references": 8478 8503 ··· 8511 8536 t.References[i] = string(sval) 8512 8537 } 8513 8538 8539 + } 8540 + } 8541 + // t.DependentOn (string) (string) 8542 + case "dependentOn": 8543 + 8544 + { 8545 + b, err := cr.ReadByte() 8546 + if err != nil { 8547 + return err 8548 + } 8549 + if b != cbg.CborNull[0] { 8550 + if err := cr.UnreadByte(); err != nil { 8551 + return err 8552 + } 8553 + 8554 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8555 + if err != nil { 8556 + return err 8557 + } 8558 + 8559 + t.DependentOn = (*string)(&sval) 8514 8560 } 8515 8561 } 8516 8562 ··· 8890 8936 } 8891 8937 8892 8938 cw := cbg.NewCborWriter(w) 8893 - fieldCount := 3 8939 + fieldCount := 2 8894 8940 8895 8941 if t.Repo == nil { 8896 8942 fieldCount-- 8897 8943 } 8898 8944 8899 8945 if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 8900 - return err 8901 - } 8902 - 8903 - // t.Sha (string) (string) 8904 - if len("sha") > 1000000 { 8905 - return xerrors.Errorf("Value in field \"sha\" was too long") 8906 - } 8907 - 8908 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sha"))); err != nil { 8909 - return err 8910 - } 8911 - if _, err := cw.WriteString(string("sha")); err != nil { 8912 - return err 8913 - } 8914 - 8915 - if len(t.Sha) > 1000000 { 8916 - return xerrors.Errorf("Value in field t.Sha was too long") 8917 - } 8918 - 8919 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sha))); err != nil { 8920 - return err 8921 - } 8922 - if _, err := cw.WriteString(string(t.Sha)); err != nil { 8923 8946 return err 8924 8947 } 8925 8948 ··· 9021 9044 } 9022 9045 9023 9046 switch string(nameBuf[:nameLen]) { 9024 - // t.Sha (string) (string) 9025 - case "sha": 9026 - 9027 - { 9028 - sval, err := cbg.ReadStringWithMax(cr, 1000000) 9029 - if err != nil { 9030 - return err 9031 - } 9032 - 9033 - t.Sha = string(sval) 9034 - } 9035 - // t.Repo (string) (string) 9047 + // t.Repo (string) (string) 9036 9048 case "repo": 9037 9049 9038 9050 { ··· 9063 9075 } 9064 9076 9065 9077 t.Branch = string(sval) 9078 + } 9079 + 9080 + default: 9081 + // Field doesn't exist on this type, so ignore it 9082 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 9083 + return err 9084 + } 9085 + } 9086 + } 9087 + 9088 + return nil 9089 + } 9090 + func (t *RepoPull_Round) MarshalCBOR(w io.Writer) error { 9091 + if t == nil { 9092 + _, err := w.Write(cbg.CborNull) 9093 + return err 9094 + } 9095 + 9096 + cw := cbg.NewCborWriter(w) 9097 + 9098 + if _, err := cw.Write([]byte{162}); err != nil { 9099 + return err 9100 + } 9101 + 9102 + // t.CreatedAt (string) (string) 9103 + if len("createdAt") > 1000000 { 9104 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 9105 + } 9106 + 9107 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 9108 + return err 9109 + } 9110 + if _, err := cw.WriteString(string("createdAt")); err != nil { 9111 + return err 9112 + } 9113 + 9114 + if len(t.CreatedAt) > 1000000 { 9115 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 9116 + } 9117 + 9118 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 9119 + return err 9120 + } 9121 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 9122 + return err 9123 + } 9124 + 9125 + // t.PatchBlob (util.LexBlob) (struct) 9126 + if len("patchBlob") > 1000000 { 9127 + return xerrors.Errorf("Value in field \"patchBlob\" was too long") 9128 + } 9129 + 9130 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil { 9131 + return err 9132 + } 9133 + if _, err := cw.WriteString(string("patchBlob")); err != nil { 9134 + return err 9135 + } 9136 + 9137 + if err := t.PatchBlob.MarshalCBOR(cw); err != nil { 9138 + return err 9139 + } 9140 + return nil 9141 + } 9142 + 9143 + func (t *RepoPull_Round) UnmarshalCBOR(r io.Reader) (err error) { 9144 + *t = RepoPull_Round{} 9145 + 9146 + cr := cbg.NewCborReader(r) 9147 + 9148 + maj, extra, err := cr.ReadHeader() 9149 + if err != nil { 9150 + return err 9151 + } 9152 + defer func() { 9153 + if err == io.EOF { 9154 + err = io.ErrUnexpectedEOF 9155 + } 9156 + }() 9157 + 9158 + if maj != cbg.MajMap { 9159 + return fmt.Errorf("cbor input should be of type map") 9160 + } 9161 + 9162 + if extra > cbg.MaxLength { 9163 + return fmt.Errorf("RepoPull_Round: map struct too large (%d)", extra) 9164 + } 9165 + 9166 + n := extra 9167 + 9168 + nameBuf := make([]byte, 9) 9169 + for i := uint64(0); i < n; i++ { 9170 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 9171 + if err != nil { 9172 + return err 9173 + } 9174 + 9175 + if !ok { 9176 + // Field doesn't exist on this type, so ignore it 9177 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 9178 + return err 9179 + } 9180 + continue 9181 + } 9182 + 9183 + switch string(nameBuf[:nameLen]) { 9184 + // t.CreatedAt (string) (string) 9185 + case "createdAt": 9186 + 9187 + { 9188 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 9189 + if err != nil { 9190 + return err 9191 + } 9192 + 9193 + t.CreatedAt = string(sval) 9194 + } 9195 + // t.PatchBlob (util.LexBlob) (struct) 9196 + case "patchBlob": 9197 + 9198 + { 9199 + 9200 + b, err := cr.ReadByte() 9201 + if err != nil { 9202 + return err 9203 + } 9204 + if b != cbg.CborNull[0] { 9205 + if err := cr.UnreadByte(); err != nil { 9206 + return err 9207 + } 9208 + t.PatchBlob = new(util.LexBlob) 9209 + if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil { 9210 + return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err) 9211 + } 9212 + } 9213 + 9066 9214 } 9067 9215 9068 9216 default:
+18 -13
api/tangled/repopull.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPull 19 19 type RepoPull struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 - // patch: (deprecated) use patchBlob instead 25 - Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"` 26 - // patchBlob: patch content 27 - PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"` 28 - References []string `json:"references,omitempty" cborgen:"references,omitempty"` 29 - Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 30 - Target *RepoPull_Target `json:"target" cborgen:"target"` 31 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + DependentOn *string `json:"dependentOn,omitempty" cborgen:"dependentOn,omitempty"` 24 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 25 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 26 + Rounds []*RepoPull_Round `json:"rounds,omitempty" cborgen:"rounds,omitempty"` 27 + Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 28 + Target *RepoPull_Target `json:"target" cborgen:"target"` 29 + Title string `json:"title" cborgen:"title"` 30 + } 31 + 32 + // RepoPull_Round is a "round" in the sh.tangled.repo.pull schema. 33 + // 34 + // revisions of this pull request, newer rounds are appended to this array. appviews may reject records do not treat this field as append-only. the blob format is gzipped text-based git-format-patches. 35 + type RepoPull_Round struct { 36 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 37 + PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"` 32 38 } 33 39 34 40 // RepoPull_Source is a "source" in the sh.tangled.repo.pull schema. 35 41 type RepoPull_Source struct { 36 42 Branch string `json:"branch" cborgen:"branch"` 37 43 Repo *string `json:"repo,omitempty" cborgen:"repo,omitempty"` 38 - Sha string `json:"sha" cborgen:"sha"` 39 44 } 40 45 41 46 // RepoPull_Target is a "target" in the sh.tangled.repo.pull schema.
+47
appview/db/db.go
··· 1255 1255 return err 1256 1256 }) 1257 1257 1258 + orm.RunMigration(conn, logger, "add-blob-data-to-pull-submissions", func(tx *sql.Tx) error { 1259 + _, err := tx.Exec(` 1260 + alter table pull_submissions add column patch_blob_ref text; 1261 + alter table pull_submissions add column patch_blob_mime text; 1262 + alter table pull_submissions add column patch_blob_size integer; 1263 + `) 1264 + return err 1265 + }) 1266 + 1267 + orm.RunMigration(conn, logger, "remove-stack-id-from-pull-submissions", func(tx *sql.Tx) error { 1268 + _, err := tx.Exec(` 1269 + alter table pulls drop column stack_id; 1270 + `) 1271 + return err 1272 + }) 1273 + 1274 + orm.RunMigration(conn, logger, "replace-parent-change-id-with-aturi", func(tx *sql.Tx) error { 1275 + // add new column 1276 + _, err := tx.Exec(` 1277 + alter table pulls add column dependent_on text; 1278 + `) 1279 + if err != nil { 1280 + return err 1281 + } 1282 + 1283 + // populate dependent_on with at_uri of the parent 1284 + _, err = tx.Exec(` 1285 + update pulls 1286 + set dependent_on = ( 1287 + select at_uri 1288 + from pulls as parent 1289 + where parent.change_id = pulls.parent_change_id 1290 + ) 1291 + where parent_change_id is not null; 1292 + `) 1293 + if err != nil { 1294 + return err 1295 + } 1296 + 1297 + // drop old column 1298 + _, err = tx.Exec(` 1299 + alter table pulls drop column parent_change_id; 1300 + `) 1301 + 1302 + return err 1303 + }) 1304 + 1258 1305 return &DB{ 1259 1306 db, 1260 1307 logger,
+486 -130
appview/db/pulls.go
··· 12 12 "time" 13 13 14 14 "github.com/bluesky-social/indigo/atproto/syntax" 15 + lexutil "github.com/bluesky-social/indigo/lex/util" 16 + "github.com/ipfs/go-cid" 15 17 "tangled.org/core/appview/models" 16 18 "tangled.org/core/appview/pagination" 17 19 "tangled.org/core/orm" 20 + "tangled.org/core/sets" 18 21 ) 19 22 20 - func NewPull(tx *sql.Tx, pull *models.Pull) error { 23 + func comparePullSource(existing, new *models.PullSource) bool { 24 + if existing == nil && new == nil { 25 + return true 26 + } 27 + if existing == nil || new == nil { 28 + return false 29 + } 30 + if existing.Branch != new.Branch { 31 + return false 32 + } 33 + if existing.RepoAt == nil && new.RepoAt == nil { 34 + return true 35 + } 36 + if existing.RepoAt == nil || new.RepoAt == nil { 37 + return false 38 + } 39 + return *existing.RepoAt == *new.RepoAt 40 + } 41 + 42 + func compareSubmissions(existing, new []*models.PullSubmission) bool { 43 + if len(existing) != len(new) { 44 + return false 45 + } 46 + for i := range existing { 47 + if existing[i].Blob.Ref.String() != new[i].Blob.Ref.String() { 48 + return false 49 + } 50 + if existing[i].Blob.MimeType != new[i].Blob.MimeType { 51 + return false 52 + } 53 + if existing[i].Blob.Size != new[i].Blob.Size { 54 + return false 55 + } 56 + } 57 + return true 58 + } 59 + 60 + func PutPull(tx *sql.Tx, pull *models.Pull) error { 61 + // ensure sequence exists 62 + _, err := tx.Exec(` 63 + insert or ignore into repo_pull_seqs (repo_at, next_pull_id) 64 + values (?, 1) 65 + `, pull.RepoAt) 66 + if err != nil { 67 + return err 68 + } 69 + 70 + pulls, err := GetPulls( 71 + tx, 72 + orm.FilterEq("owner_did", pull.OwnerDid), 73 + orm.FilterEq("rkey", pull.Rkey), 74 + ) 75 + switch { 76 + case err != nil: 77 + return err 78 + case len(pulls) == 0: 79 + return createNewPull(tx, pull) 80 + case len(pulls) != 1: // should be unreachable 81 + return fmt.Errorf("invalid number of pulls returned: %d", len(pulls)) 82 + default: 83 + existingPull := pulls[0] 84 + if existingPull.State == models.PullMerged { 85 + return nil 86 + } 87 + 88 + dependentOnEqual := (existingPull.DependentOn == nil && pull.DependentOn == nil) || 89 + (existingPull.DependentOn != nil && pull.DependentOn != nil && *existingPull.DependentOn == *pull.DependentOn) 90 + 91 + pullSourceEqual := comparePullSource(existingPull.PullSource, pull.PullSource) 92 + submissionsEqual := compareSubmissions(existingPull.Submissions, pull.Submissions) 93 + 94 + if existingPull.Title == pull.Title && 95 + existingPull.Body == pull.Body && 96 + existingPull.TargetBranch == pull.TargetBranch && 97 + existingPull.RepoAt == pull.RepoAt && 98 + dependentOnEqual && 99 + pullSourceEqual && 100 + submissionsEqual { 101 + return nil 102 + } 103 + 104 + isLonger := len(existingPull.Submissions) < len(pull.Submissions) 105 + if isLonger { 106 + isAppendOnly := compareSubmissions(existingPull.Submissions, pull.Submissions[:len(existingPull.Submissions)]) 107 + if !isAppendOnly { 108 + return fmt.Errorf("the new pull does not treat submissions as append-only") 109 + } 110 + } else if !submissionsEqual { 111 + return fmt.Errorf("the new pull does not treat submissions as append-only") 112 + } 113 + 114 + pull.ID = existingPull.ID 115 + pull.PullId = existingPull.PullId 116 + return updatePull(tx, pull, existingPull) 117 + } 118 + } 119 + 120 + func createNewPull(tx *sql.Tx, pull *models.Pull) error { 21 121 _, err := tx.Exec(` 22 122 insert or ignore into repo_pull_seqs (repo_at, next_pull_id) 23 123 values (?, 1) ··· 49 149 } 50 150 } 51 151 52 - var stackId, changeId, parentChangeId *string 53 - if pull.StackId != "" { 54 - stackId = &pull.StackId 55 - } 56 - if pull.ChangeId != "" { 57 - changeId = &pull.ChangeId 58 - } 59 - if pull.ParentChangeId != "" { 60 - parentChangeId = &pull.ParentChangeId 61 - } 152 + // var stackId, changeId, parentChangeId *string 153 + // if pull.StackId != "" { 154 + // stackId = &pull.StackId 155 + // } 156 + // if pull.ChangeId != "" { 157 + // changeId = &pull.ChangeId 158 + // } 159 + // if pull.ParentChangeId != "" { 160 + // parentChangeId = &pull.ParentChangeId 161 + // } 62 162 63 163 result, err := tx.Exec( 64 164 ` 65 165 insert into pulls ( 66 - repo_at, owner_did, pull_id, title, target_branch, body, rkey, state, source_branch, source_repo_at, stack_id, change_id, parent_change_id 166 + repo_at, 167 + owner_did, 168 + pull_id, 169 + title, 170 + target_branch, 171 + body, 172 + rkey, 173 + state, 174 + dependent_on, 175 + source_branch, 176 + source_repo_at 67 177 ) 68 - values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 178 + values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 69 179 pull.RepoAt, 70 180 pull.OwnerDid, 71 181 pull.PullId, ··· 74 184 pull.Body, 75 185 pull.Rkey, 76 186 pull.State, 187 + pull.DependentOn, 77 188 sourceBranch, 78 189 sourceRepoAt, 79 - stackId, 80 - changeId, 81 - parentChangeId, 82 190 ) 83 191 if err != nil { 84 192 return err ··· 91 199 } 92 200 pull.ID = int(id) 93 201 94 - _, err = tx.Exec(` 95 - insert into pull_submissions (pull_at, round_number, patch, combined, source_rev) 96 - values (?, ?, ?, ?, ?) 97 - `, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev) 98 - if err != nil { 99 - return err 202 + for i, s := range pull.Submissions { 203 + _, err = tx.Exec(` 204 + insert into pull_submissions ( 205 + pull_at, 206 + round_number, 207 + patch, 208 + combined, 209 + source_rev, 210 + patch_blob_ref, 211 + patch_blob_mime, 212 + patch_blob_size 213 + ) 214 + values (?, ?, ?, ?, ?, ?, ?, ?) 215 + `, 216 + pull.AtUri(), 217 + i, 218 + s.Patch, 219 + s.Combined, 220 + s.SourceRev, 221 + s.Blob.Ref.String(), 222 + s.Blob.MimeType, 223 + s.Blob.Size, 224 + ) 225 + if err != nil { 226 + return err 227 + } 100 228 } 101 229 102 230 if err := putReferences(tx, pull.AtUri(), pull.References); err != nil { ··· 106 234 return nil 107 235 } 108 236 109 - func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) { 110 - pull, err := GetPull(e, repoAt, pullId) 237 + func updatePull(tx *sql.Tx, pull *models.Pull, existingPull *models.Pull) error { 238 + var sourceBranch, sourceRepoAt *string 239 + if pull.PullSource != nil { 240 + sourceBranch = &pull.PullSource.Branch 241 + if pull.PullSource.RepoAt != nil { 242 + x := pull.PullSource.RepoAt.String() 243 + sourceRepoAt = &x 244 + } 245 + } 246 + 247 + _, err := tx.Exec(` 248 + update pulls set 249 + title = ?, 250 + body = ?, 251 + target_branch = ?, 252 + dependent_on = ?, 253 + source_branch = ?, 254 + source_repo_at = ? 255 + where owner_did = ? and rkey = ? 256 + `, pull.Title, pull.Body, pull.TargetBranch, pull.DependentOn, sourceBranch, sourceRepoAt, pull.OwnerDid, pull.Rkey) 111 257 if err != nil { 112 - return "", err 258 + return err 113 259 } 114 - return pull.AtUri(), err 260 + 261 + // insert new submissions (append-only) 262 + for i := len(existingPull.Submissions); i < len(pull.Submissions); i++ { 263 + s := pull.Submissions[i] 264 + _, err = tx.Exec(` 265 + insert into pull_submissions ( 266 + pull_at, 267 + round_number, 268 + patch, 269 + combined, 270 + source_rev, 271 + patch_blob_ref, 272 + patch_blob_mime, 273 + patch_blob_size 274 + ) 275 + values (?, ?, ?, ?, ?, ?, ?, ?) 276 + `, 277 + pull.AtUri(), 278 + i, 279 + s.Patch, 280 + s.Combined, 281 + s.SourceRev, 282 + s.Blob.Ref.String(), 283 + s.Blob.MimeType, 284 + s.Blob.Size, 285 + ) 286 + if err != nil { 287 + return err 288 + } 289 + } 290 + 291 + if err := putReferences(tx, pull.AtUri(), pull.References); err != nil { 292 + return fmt.Errorf("put reference_links: %w", err) 293 + } 294 + return nil 115 295 } 116 296 297 + // func NewPull(tx *sql.Tx, pull *models.Pull) error { 298 + // _, err := tx.Exec(` 299 + // insert or ignore into repo_pull_seqs (repo_at, next_pull_id) 300 + // values (?, 1) 301 + // `, pull.RepoAt) 302 + // if err != nil { 303 + // return err 304 + // } 305 + // 306 + // var nextId int 307 + // err = tx.QueryRow(` 308 + // update repo_pull_seqs 309 + // set next_pull_id = next_pull_id + 1 310 + // where repo_at = ? 311 + // returning next_pull_id - 1 312 + // `, pull.RepoAt).Scan(&nextId) 313 + // if err != nil { 314 + // return err 315 + // } 316 + // 317 + // pull.PullId = nextId 318 + // pull.State = models.PullOpen 319 + // 320 + // var sourceBranch, sourceRepoAt *string 321 + // if pull.PullSource != nil { 322 + // sourceBranch = &pull.PullSource.Branch 323 + // if pull.PullSource.RepoAt != nil { 324 + // x := pull.PullSource.RepoAt.String() 325 + // sourceRepoAt = &x 326 + // } 327 + // } 328 + // 329 + // // var stackId, changeId, parentChangeId *string 330 + // // if pull.StackId != "" { 331 + // // stackId = &pull.StackId 332 + // // } 333 + // // if pull.ChangeId != "" { 334 + // // changeId = &pull.ChangeId 335 + // // } 336 + // // if pull.ParentChangeId != "" { 337 + // // parentChangeId = &pull.ParentChangeId 338 + // // } 339 + // 340 + // result, err := tx.Exec( 341 + // ` 342 + // insert into pulls ( 343 + // repo_at, 344 + // owner_did, 345 + // pull_id, 346 + // title, 347 + // target_branch, 348 + // body, 349 + // rkey, 350 + // state, 351 + // dependent_on, 352 + // source_branch, 353 + // source_repo_at 354 + // ) 355 + // values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 356 + // pull.RepoAt, 357 + // pull.OwnerDid, 358 + // pull.PullId, 359 + // pull.Title, 360 + // pull.TargetBranch, 361 + // pull.Body, 362 + // pull.Rkey, 363 + // pull.State, 364 + // pull.DependentOn, 365 + // sourceBranch, 366 + // sourceRepoAt, 367 + // ) 368 + // if err != nil { 369 + // return err 370 + // } 371 + // 372 + // // Set the database primary key ID 373 + // id, err := result.LastInsertId() 374 + // if err != nil { 375 + // return err 376 + // } 377 + // pull.ID = int(id) 378 + // 379 + // _, err = tx.Exec(` 380 + // insert into pull_submissions ( 381 + // pull_at, 382 + // round_number, 383 + // patch, 384 + // combined, 385 + // source_rev, 386 + // patch_blob_ref, 387 + // patch_blob_mime, 388 + // patch_blob_size 389 + // ) 390 + // values (?, ?, ?, ?, ?, ?, ?, ?) 391 + // `, 392 + // pull.AtUri(), 393 + // 0, 394 + // pull.Submissions[0].Patch, 395 + // pull.Submissions[0].Combined, 396 + // pull.Submissions[0].SourceRev, 397 + // pull.Submissions[0].Blob.Ref.String(), 398 + // pull.Submissions[0].Blob.MimeType, 399 + // pull.Submissions[0].Blob.Size, 400 + // ) 401 + // if err != nil { 402 + // return err 403 + // } 404 + // 405 + // if err := putReferences(tx, pull.AtUri(), pull.References); err != nil { 406 + // return fmt.Errorf("put reference_links: %w", err) 407 + // } 408 + // 409 + // return nil 410 + // } 411 + 117 412 func NextPullId(e Execer, repoAt syntax.ATURI) (int, error) { 118 413 var pullId int 119 414 err := e.QueryRow(`select next_pull_id from repo_pull_seqs where repo_at = ?`, repoAt).Scan(&pullId) ··· 157 452 rkey, 158 453 source_branch, 159 454 source_repo_at, 160 - stack_id, 161 - change_id, 162 - parent_change_id 455 + dependent_on 163 456 from 164 457 pulls 165 458 %s ··· 177 470 for rows.Next() { 178 471 var pull models.Pull 179 472 var createdAt string 180 - var sourceBranch, sourceRepoAt, stackId, changeId, parentChangeId sql.NullString 473 + var sourceBranch, sourceRepoAt, dependentOn sql.NullString 181 474 err := rows.Scan( 182 475 &pull.ID, 183 476 &pull.OwnerDid, ··· 191 484 &pull.Rkey, 192 485 &sourceBranch, 193 486 &sourceRepoAt, 194 - &stackId, 195 - &changeId, 196 - &parentChangeId, 487 + &dependentOn, 197 488 ) 198 489 if err != nil { 199 490 return nil, err ··· 218 509 } 219 510 } 220 511 221 - if stackId.Valid { 222 - pull.StackId = stackId.String 223 - } 224 - if changeId.Valid { 225 - pull.ChangeId = changeId.String 226 - } 227 - if parentChangeId.Valid { 228 - pull.ParentChangeId = parentChangeId.String 512 + if dependentOn.Valid { 513 + x := syntax.ATURI(dependentOn.String) 514 + pull.DependentOn = &x 229 515 } 230 516 231 517 pulls[pull.AtUri()] = &pull ··· 305 591 return GetPullsPaginated(e, pagination.Page{}, filters...) 306 592 } 307 593 308 - func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) { 309 - pulls, err := GetPullsPaginated(e, pagination.Page{Limit: 1}, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId)) 594 + func GetPull(e Execer, filters ...orm.Filter) (*models.Pull, error) { 595 + pulls, err := GetPullsPaginated(e, pagination.Page{Limit: 1}, filters...) 310 596 if err != nil { 311 597 return nil, err 312 598 } ··· 339 625 patch, 340 626 combined, 341 627 created, 342 - source_rev 628 + source_rev, 629 + patch_blob_ref, 630 + patch_blob_mime, 631 + patch_blob_size 343 632 from 344 633 pull_submissions 345 634 %s ··· 358 647 for rows.Next() { 359 648 var submission models.PullSubmission 360 649 var submissionCreatedStr string 361 - var submissionSourceRev, submissionCombined sql.NullString 650 + var submissionSourceRev, submissionCombined sql.Null[string] 651 + var patchBlobRef, patchBlobMime sql.Null[string] 652 + var patchBlobSize sql.Null[int64] 362 653 err := rows.Scan( 363 654 &submission.ID, 364 655 &submission.PullAt, ··· 367 658 &submissionCombined, 368 659 &submissionCreatedStr, 369 660 &submissionSourceRev, 661 + &patchBlobRef, 662 + &patchBlobMime, 663 + &patchBlobSize, 370 664 ) 371 665 if err != nil { 372 666 return nil, err ··· 377 671 } 378 672 379 673 if submissionSourceRev.Valid { 380 - submission.SourceRev = submissionSourceRev.String 674 + submission.SourceRev = submissionSourceRev.V 381 675 } 382 676 383 677 if submissionCombined.Valid { 384 - submission.Combined = submissionCombined.String 678 + submission.Combined = submissionCombined.V 679 + } 680 + 681 + if patchBlobRef.Valid { 682 + submission.Blob.Ref = lexutil.LexLink(cid.MustParse(patchBlobRef.V)) 683 + } 684 + 685 + if patchBlobMime.Valid { 686 + submission.Blob.MimeType = patchBlobMime.V 687 + } 688 + 689 + if patchBlobSize.Valid { 690 + submission.Blob.Size = patchBlobSize.V 385 691 } 386 692 387 693 submissionMap[submission.ID] = &submission ··· 612 918 return i, nil 613 919 } 614 920 615 - func SetPullState(e Execer, repoAt syntax.ATURI, pullId int, pullState models.PullState) error { 616 - _, err := e.Exec( 617 - `update pulls set state = ? where repo_at = ? and pull_id = ? and (state <> ? or state <> ?)`, 618 - pullState, 619 - repoAt, 620 - pullId, 621 - models.PullDeleted, // only update state of non-deleted pulls 622 - models.PullMerged, // only update state of non-merged pulls 623 - ) 624 - return err 625 - } 626 - 627 - func ClosePull(e Execer, repoAt syntax.ATURI, pullId int) error { 628 - err := SetPullState(e, repoAt, pullId, models.PullClosed) 629 - return err 630 - } 631 - 632 - func ReopenPull(e Execer, repoAt syntax.ATURI, pullId int) error { 633 - err := SetPullState(e, repoAt, pullId, models.PullOpen) 634 - return err 635 - } 636 - 637 - func MergePull(e Execer, repoAt syntax.ATURI, pullId int) error { 638 - err := SetPullState(e, repoAt, pullId, models.PullMerged) 639 - return err 640 - } 641 - 642 - func DeletePull(e Execer, repoAt syntax.ATURI, pullId int) error { 643 - err := SetPullState(e, repoAt, pullId, models.PullDeleted) 644 - return err 645 - } 646 - 647 - func ResubmitPull(e Execer, pullAt syntax.ATURI, newRoundNumber int, newPatch string, combinedPatch string, newSourceRev string) error { 648 - _, err := e.Exec(` 649 - insert into pull_submissions (pull_at, round_number, patch, combined, source_rev) 650 - values (?, ?, ?, ?, ?) 651 - `, pullAt, newRoundNumber, newPatch, combinedPatch, newSourceRev) 652 - 653 - return err 654 - } 655 - 656 - func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error { 921 + // use with transaction 922 + func SetPullsState(e Execer, pullState models.PullState, filters ...orm.Filter) error { 657 923 var conditions []string 658 924 var args []any 659 925 660 - args = append(args, parentChangeId) 661 - 926 + args = append(args, pullState) 662 927 for _, filter := range filters { 663 928 conditions = append(conditions, filter.Condition()) 664 929 args = append(args, filter.Arg()...) 665 930 } 931 + args = append(args, models.PullAbandoned) // only update state of non-deleted pulls 932 + args = append(args, models.PullMerged) // only update state of non-merged pulls 666 933 667 934 whereClause := "" 668 935 if conditions != nil { 669 936 whereClause = " where " + strings.Join(conditions, " and ") 670 937 } 671 938 672 - query := fmt.Sprintf("update pulls set parent_change_id = ? %s", whereClause) 939 + query := fmt.Sprintf("update pulls set state = ? %s and state <> ? and state <> ?", whereClause) 940 + 673 941 _, err := e.Exec(query, args...) 942 + return err 943 + } 944 + 945 + func ClosePulls(e Execer, filters ...orm.Filter) error { 946 + return SetPullsState(e, models.PullClosed, filters...) 947 + } 948 + 949 + func ReopenPulls(e Execer, filters ...orm.Filter) error { 950 + return SetPullsState(e, models.PullOpen, filters...) 951 + } 952 + 953 + func MergePulls(e Execer, filters ...orm.Filter) error { 954 + return SetPullsState(e, models.PullMerged, filters...) 955 + } 956 + 957 + func AbandonPulls(e Execer, filters ...orm.Filter) error { 958 + return SetPullsState(e, models.PullAbandoned, filters...) 959 + } 960 + 961 + func ResubmitPull( 962 + e Execer, 963 + pullAt syntax.ATURI, 964 + newRoundNumber int, 965 + newPatch string, 966 + combinedPatch string, 967 + newSourceRev string, 968 + blob *lexutil.LexBlob, 969 + ) error { 970 + _, err := e.Exec(` 971 + insert into pull_submissions ( 972 + pull_at, 973 + round_number, 974 + patch, 975 + combined, 976 + source_rev, 977 + patch_blob_ref, 978 + patch_blob_mime, 979 + patch_blob_size 980 + ) 981 + values (?, ?, ?, ?, ?, ?, ?, ?) 982 + `, pullAt, newRoundNumber, newPatch, combinedPatch, newSourceRev, blob.Ref.String(), blob.MimeType, blob.Size) 674 983 675 984 return err 676 985 } 677 986 678 - // Only used when stacking to update contents in the event of a rebase (the interdiff should be empty). 679 - // otherwise submissions are immutable 680 - func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error { 987 + func SetDependentOn(e Execer, dependentOn syntax.ATURI, filters ...orm.Filter) error { 681 988 var conditions []string 682 989 var args []any 683 990 684 - args = append(args, sourceRev) 685 - args = append(args, newPatch) 991 + args = append(args, dependentOn) 686 992 687 993 for _, filter := range filters { 688 994 conditions = append(conditions, filter.Condition()) ··· 694 1000 whereClause = " where " + strings.Join(conditions, " and ") 695 1001 } 696 1002 697 - query := fmt.Sprintf("update pull_submissions set source_rev = ?, patch = ? %s", whereClause) 1003 + query := fmt.Sprintf("update pulls set dependent_on = ? %s", whereClause) 698 1004 _, err := e.Exec(query, args...) 699 1005 700 1006 return err ··· 712 1018 models.PullOpen, 713 1019 models.PullMerged, 714 1020 models.PullClosed, 715 - models.PullDeleted, 1021 + models.PullAbandoned, 716 1022 repoAt, 717 1023 ) 718 1024 ··· 724 1030 return count, nil 725 1031 } 726 1032 727 - // change-id parent-change-id 1033 + // change-id dependent_on 728 1034 // 729 - // 4 w ,-------- z (TOP) 730 - // 3 z <----',------- y 731 - // 2 y <-----',------ x 1035 + // 4 w ,-------- at_uri(z) (TOP) 1036 + // 3 z <----',------- at_uri(y) 1037 + // 2 y <-----',------ at_uri(x) 732 1038 // 1 x <------' nil (BOT) 733 1039 // 734 - // `w` is parent of none, so it is the top of the stack 735 - func GetStack(e Execer, stackId string) (models.Stack, error) { 736 - unorderedPulls, err := GetPulls( 737 - e, 738 - orm.FilterEq("stack_id", stackId), 739 - orm.FilterNotEq("state", models.PullDeleted), 740 - ) 1040 + // `w` has no dependents, so it is the top of the stack 1041 + // 1042 + // this unfortunately does a db query for *each* pull of the stack, 1043 + // ideally this would be a recursive query, but in the interest of implementation simplicity, 1044 + // we took the less performant route 1045 + // 1046 + // TODO: make this less bad 1047 + func GetStack(e Execer, atUri syntax.ATURI) (models.Stack, error) { 1048 + // first get the pull for the given at-uri 1049 + pull, err := GetPull(e, orm.FilterEq("at_uri", atUri)) 741 1050 if err != nil { 742 1051 return nil, err 743 1052 } 744 - // map of parent-change-id to pull 745 - changeIdMap := make(map[string]*models.Pull, len(unorderedPulls)) 746 - parentMap := make(map[string]*models.Pull, len(unorderedPulls)) 747 - for _, p := range unorderedPulls { 748 - changeIdMap[p.ChangeId] = p 749 - if p.ParentChangeId != "" { 750 - parentMap[p.ParentChangeId] = p 1053 + 1054 + // Collect all pulls in the stack by traversing up and down 1055 + allPulls := []*models.Pull{pull} 1056 + visited := sets.New[syntax.ATURI]() 1057 + 1058 + // Traverse up to find all dependents 1059 + current := pull 1060 + for { 1061 + dependent, err := GetPull(e, 1062 + orm.FilterEq("dependent_on", current.AtUri()), 1063 + orm.FilterNotEq("state", models.PullAbandoned), 1064 + ) 1065 + if err != nil || dependent == nil { 1066 + break 751 1067 } 1068 + if visited.Contains(dependent.AtUri()) { 1069 + return allPulls, fmt.Errorf("circular dependency detected in stack") 1070 + } 1071 + allPulls = append(allPulls, dependent) 1072 + visited.Insert(dependent.AtUri()) 1073 + current = dependent 752 1074 } 753 1075 754 - // the top of the stack is the pull that is not a parent of any pull 1076 + // Traverse down to find all dependencies 1077 + current = pull 1078 + for current.DependentOn != nil { 1079 + dependency, err := GetPull( 1080 + e, 1081 + orm.FilterEq("at_uri", current.DependentOn), 1082 + orm.FilterNotEq("state", models.PullAbandoned), 1083 + ) 1084 + 1085 + if err != nil { 1086 + return allPulls, fmt.Errorf("failed to find parent pull request, stack is malformed, missing PR: %s", current.DependentOn) 1087 + } 1088 + if visited.Contains(dependency.AtUri()) { 1089 + return allPulls, fmt.Errorf("circular dependency detected in stack") 1090 + } 1091 + allPulls = append(allPulls, dependency) 1092 + visited.Insert(dependency.AtUri()) 1093 + current = dependency 1094 + } 1095 + 1096 + // sort the list: find the top and build ordered list 1097 + atUriMap := make(map[syntax.ATURI]*models.Pull, len(allPulls)) 1098 + dependentMap := make(map[syntax.ATURI]*models.Pull, len(allPulls)) 1099 + 1100 + for _, p := range allPulls { 1101 + atUriMap[p.AtUri()] = p 1102 + if p.DependentOn != nil { 1103 + dependentMap[*p.DependentOn] = p 1104 + } 1105 + } 1106 + 1107 + // the top of the stack is the pull that no other pull depends on 755 1108 var topPull *models.Pull 756 - for _, maybeTop := range unorderedPulls { 757 - if _, ok := parentMap[maybeTop.ChangeId]; !ok { 1109 + for _, maybeTop := range allPulls { 1110 + if _, ok := dependentMap[maybeTop.AtUri()]; !ok { 758 1111 topPull = maybeTop 759 1112 break 760 1113 } ··· 763 1116 pulls := []*models.Pull{} 764 1117 for { 765 1118 pulls = append(pulls, topPull) 766 - if topPull.ParentChangeId != "" { 767 - if next, ok := changeIdMap[topPull.ParentChangeId]; ok { 1119 + if topPull.DependentOn != nil { 1120 + if next, ok := atUriMap[*topPull.DependentOn]; ok { 768 1121 topPull = next 769 1122 } else { 770 - return nil, fmt.Errorf("failed to find parent pull request, stack is malformed") 1123 + return pulls, fmt.Errorf("failed to find parent pull request, stack is malformed") 771 1124 } 772 1125 } else { 773 1126 break ··· 777 1130 return pulls, nil 778 1131 } 779 1132 780 - func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) { 781 - pulls, err := GetPulls( 782 - e, 783 - orm.FilterEq("stack_id", stackId), 784 - orm.FilterEq("state", models.PullDeleted), 785 - ) 1133 + func GetAbandonedPulls(e Execer, atUri syntax.ATURI) ([]*models.Pull, error) { 1134 + stack, err := GetStack(e, atUri) 786 1135 if err != nil { 787 1136 return nil, err 788 1137 } 789 1138 790 - return pulls, nil 1139 + var abandoned []*models.Pull 1140 + for _, p := range stack { 1141 + if p.State == models.PullAbandoned { 1142 + abandoned = append(abandoned, p) 1143 + } 1144 + } 1145 + 1146 + return abandoned, nil 791 1147 }
+1 -1
appview/db/repos.go
··· 261 261 models.PullOpen, 262 262 models.PullMerged, 263 263 models.PullClosed, 264 - models.PullDeleted, 264 + models.PullAbandoned, 265 265 }, args...) 266 266 rows, err = e.Query( 267 267 pullCountQuery,
+143
appview/ingester.go
··· 4 4 "context" 5 5 "encoding/json" 6 6 "fmt" 7 + "io" 7 8 "log/slog" 8 9 "maps" 10 + "net/http" 11 + "net/url" 9 12 "slices" 13 + "sync" 10 14 11 15 "time" 12 16 ··· 14 18 jmodels "github.com/bluesky-social/jetstream/pkg/models" 15 19 "github.com/go-git/go-git/v5/plumbing" 16 20 "github.com/ipfs/go-cid" 21 + "golang.org/x/sync/errgroup" 17 22 "tangled.org/core/api/tangled" 18 23 "tangled.org/core/appview/config" 19 24 "tangled.org/core/appview/db" ··· 79 84 err = i.ingestString(e) 80 85 case tangled.RepoIssueNSID: 81 86 err = i.ingestIssue(ctx, e) 87 + case tangled.RepoPullNSID: 88 + err = i.ingestPull(ctx, e) 82 89 case tangled.RepoIssueCommentNSID: 83 90 err = i.ingestIssueComment(e) 84 91 case tangled.LabelDefinitionNSID: ··· 862 869 ); err != nil { 863 870 l.Error("failed to delete", "err", err) 864 871 return fmt.Errorf("failed to delete issue record: %w", err) 872 + } 873 + if err := tx.Commit(); err != nil { 874 + l.Error("failed to commit txn", "err", err) 875 + return err 876 + } 877 + 878 + return nil 879 + } 880 + 881 + return nil 882 + } 883 + 884 + func (i *Ingester) ingestPull(ctx context.Context, e *jmodels.Event) error { 885 + did := e.Did 886 + rkey := e.Commit.RKey 887 + 888 + var err error 889 + 890 + l := i.Logger.With("handler", "ingestPull", "nsid", e.Commit.Collection, "did", did, "rkey", rkey) 891 + l.Info("ingesting record") 892 + 893 + ddb, ok := i.Db.Execer.(*db.DB) 894 + if !ok { 895 + return fmt.Errorf("failed to index pull record, invalid db cast") 896 + } 897 + 898 + switch e.Commit.Operation { 899 + case jmodels.CommitOperationCreate, jmodels.CommitOperationUpdate: 900 + raw := json.RawMessage(e.Commit.Record) 901 + record := tangled.RepoPull{} 902 + err = json.Unmarshal(raw, &record) 903 + if err != nil { 904 + l.Error("invalid record", "err", err) 905 + return err 906 + } 907 + 908 + ownerId, err := i.IdResolver.ResolveIdent(ctx, did) 909 + if err != nil { 910 + l.Error("failed to resolve did") 911 + return err 912 + } 913 + 914 + // go through and fetch all blobs in parallel 915 + readers := make([]*io.ReadCloser, len(record.Rounds)) 916 + var mu sync.Mutex 917 + 918 + g, gctx := errgroup.WithContext(ctx) 919 + 920 + for idx, b := range record.Rounds { 921 + g.Go(func() error { 922 + ownerPds := ownerId.PDSEndpoint() 923 + url, _ := url.Parse(fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob", ownerPds)) 924 + q := url.Query() 925 + q.Set("cid", b.PatchBlob.Ref.String()) 926 + q.Set("did", did) 927 + url.RawQuery = q.Encode() 928 + 929 + req, err := http.NewRequestWithContext(gctx, http.MethodGet, url.String(), nil) 930 + if err != nil { 931 + l.Error("failed to create request") 932 + return err 933 + } 934 + req.Header.Set("Content-Type", "application/json") 935 + 936 + resp, err := http.DefaultClient.Do(req) 937 + if err != nil { 938 + l.Error("failed to make request") 939 + return err 940 + } 941 + 942 + mu.Lock() 943 + readers[idx] = &resp.Body 944 + mu.Unlock() 945 + 946 + return nil 947 + }) 948 + } 949 + 950 + if err := g.Wait(); err != nil { 951 + for _, r := range readers { 952 + if r != nil && *r != nil { 953 + (*r).Close() 954 + } 955 + } 956 + return err 957 + } 958 + 959 + defer func() { 960 + for _, r := range readers { 961 + if r != nil && *r != nil { 962 + (*r).Close() 963 + } 964 + } 965 + }() 966 + 967 + pull := models.PullFromRecord(did, rkey, record, readers) 968 + // if err := i.Validator.ValidateIssue(&issue); err != nil { 969 + // return fmt.Errorf("failed to validate issue: %w", err) 970 + // } 971 + 972 + tx, err := ddb.BeginTx(ctx, nil) 973 + if err != nil { 974 + l.Error("failed to begin transaction", "err", err) 975 + return err 976 + } 977 + defer tx.Rollback() 978 + 979 + err = db.PutPull(tx, &pull) 980 + if err != nil { 981 + l.Error("failed to create pull", "err", err) 982 + return err 983 + } 984 + 985 + err = tx.Commit() 986 + if err != nil { 987 + l.Error("failed to commit txn", "err", err) 988 + return err 989 + } 990 + 991 + return nil 992 + 993 + case jmodels.CommitOperationDelete: 994 + tx, err := ddb.BeginTx(ctx, nil) 995 + if err != nil { 996 + l.Error("failed to begin transaction", "err", err) 997 + return err 998 + } 999 + defer tx.Rollback() 1000 + 1001 + if err := db.AbandonPulls( 1002 + tx, 1003 + orm.FilterEq("owner_did", did), 1004 + orm.FilterEq("rkey", rkey), 1005 + ); err != nil { 1006 + l.Error("failed to abandon", "err", err) 1007 + return fmt.Errorf("failed to abandon pull record: %w", err) 865 1008 } 866 1009 if err := tx.Commit(); err != nil { 867 1010 l.Error("failed to commit txn", "err", err)
+6 -15
appview/middleware/middleware.go
··· 252 252 return 253 253 } 254 254 255 - pr, err := db.GetPull(mw.db, f.RepoAt(), prIdInt) 255 + pr, err := db.GetPull(mw.db, orm.FilterEq("repo_at", f.RepoAt()), orm.FilterEq("pull_id", prIdInt)) 256 256 if err != nil { 257 257 log.Println("failed to get pull and comments", err) 258 258 mw.pages.Error404(w) ··· 261 261 262 262 ctx := context.WithValue(r.Context(), "pull", pr) 263 263 264 - if pr.IsStacked() { 265 - stack, err := db.GetStack(mw.db, pr.StackId) 266 - if err != nil { 267 - log.Println("failed to get stack", err) 268 - return 269 - } 270 - abandonedPulls, err := db.GetAbandonedPulls(mw.db, pr.StackId) 271 - if err != nil { 272 - log.Println("failed to get abandoned pulls", err) 273 - return 274 - } 264 + stack, err := db.GetStack(mw.db, pr.AtUri()) 265 + if err != nil { 266 + log.Println("failed to get stack", err) 267 + } 275 268 276 - ctx = context.WithValue(ctx, "stack", stack) 277 - ctx = context.WithValue(ctx, "abandonedPulls", abandonedPulls) 278 - } 269 + ctx = context.WithValue(ctx, "stack", stack) 279 270 280 271 next.ServeHTTP(w, r.WithContext(ctx)) 281 272 })
+196 -18
appview/models/pull.go
··· 1 1 package models 2 2 3 3 import ( 4 + "bytes" 5 + "compress/gzip" 4 6 "fmt" 7 + "io" 5 8 "log" 6 9 "slices" 7 10 "strings" 8 11 "time" 9 12 10 - "github.com/bluesky-social/indigo/atproto/syntax" 11 13 "tangled.org/core/api/tangled" 12 14 "tangled.org/core/patchutil" 13 15 "tangled.org/core/types" 16 + 17 + "github.com/bluesky-social/indigo/atproto/syntax" 18 + lexutil "github.com/bluesky-social/indigo/lex/util" 14 19 ) 15 20 16 21 type PullState int ··· 19 24 PullClosed PullState = iota 20 25 PullOpen 21 26 PullMerged 22 - PullDeleted 27 + PullAbandoned 23 28 ) 24 29 25 30 func (p PullState) String() string { ··· 30 35 return "merged" 31 36 case PullClosed: 32 37 return "closed" 33 - case PullDeleted: 34 - return "deleted" 38 + case PullAbandoned: 39 + return "abandoned" 35 40 default: 36 41 return "closed" 37 42 } ··· 46 51 func (p PullState) IsClosed() bool { 47 52 return p == PullClosed 48 53 } 49 - func (p PullState) IsDeleted() bool { 50 - return p == PullDeleted 54 + func (p PullState) IsAbandoned() bool { 55 + return p == PullAbandoned 51 56 } 52 57 53 58 type Pull struct { ··· 70 75 References []syntax.ATURI 71 76 72 77 // stacking 73 - StackId string // nullable string 74 - ChangeId string // nullable string 75 - ParentChangeId string // nullable string 78 + DependentOn *syntax.ATURI 79 + // StackId string // nullable string 80 + // ChangeId string // nullable string 81 + // ParentChangeId string // nullable string 76 82 77 83 // meta 78 84 Created time.Time ··· 89 95 if p.PullSource != nil { 90 96 source = &tangled.RepoPull_Source{} 91 97 source.Branch = p.PullSource.Branch 92 - source.Sha = p.LatestSha() 93 98 if p.PullSource.RepoAt != nil { 94 99 s := p.PullSource.RepoAt.String() 95 100 source.Repo = &s ··· 104 109 references[i] = string(uri) 105 110 } 106 111 112 + rounds := make([]*tangled.RepoPull_Round, len(p.Submissions)) 113 + for i, submission := range p.Submissions { 114 + rounds[i] = submission.AsRecord() 115 + } 116 + 117 + var dependentOn *string 118 + if p.DependentOn != nil { 119 + x := p.DependentOn.String() 120 + dependentOn = &x 121 + } 122 + 107 123 record := tangled.RepoPull{ 108 124 Title: p.Title, 109 125 Body: &p.Body, ··· 114 130 Repo: p.RepoAt.String(), 115 131 Branch: p.TargetBranch, 116 132 }, 117 - Source: source, 133 + Rounds: rounds, 134 + Source: source, 135 + DependentOn: dependentOn, 118 136 } 119 137 return record 120 138 } 121 139 140 + func PullFromRecord(did, rkey string, record tangled.RepoPull, blobs []*io.ReadCloser) Pull { 141 + created, err := time.Parse(time.RFC3339, record.CreatedAt) 142 + if err != nil { 143 + created = time.Now() 144 + } 145 + 146 + body := "" 147 + if record.Body != nil { 148 + body = *record.Body 149 + } 150 + 151 + var mentions []syntax.DID 152 + for _, m := range record.Mentions { 153 + if did, err := syntax.ParseDID(m); err == nil { 154 + mentions = append(mentions, did) 155 + } 156 + } 157 + 158 + var targetRepoAt syntax.ATURI 159 + var targetBranch string 160 + if record.Target != nil { 161 + if uri, err := syntax.ParseATURI(record.Target.Repo); err == nil { 162 + targetRepoAt = uri 163 + } 164 + targetBranch = record.Target.Branch 165 + } 166 + 167 + var pullSource *PullSource 168 + if record.Source != nil { 169 + pullSource = &PullSource{ 170 + Branch: record.Source.Branch, 171 + } 172 + 173 + if record.Source.Repo != nil { 174 + if uri, err := syntax.ParseATURI(*record.Source.Repo); err == nil { 175 + pullSource.RepoAt = &uri 176 + } 177 + } 178 + } 179 + 180 + var dependentOn *syntax.ATURI 181 + if record.DependentOn != nil { 182 + if uri, err := syntax.ParseATURI(*record.DependentOn); err == nil { 183 + dependentOn = &uri 184 + } 185 + } 186 + 187 + var submissions []*PullSubmission 188 + for i, s := range record.Rounds { 189 + var blob *io.ReadCloser 190 + if i < len(blobs) { 191 + blob = blobs[i] 192 + } 193 + submission, err := PullSubmissionFromRecord(did, rkey, i, s, blob) 194 + // TODO: log or bubble error here 195 + if err != nil { 196 + submissions = append(submissions, nil) 197 + } else { 198 + submissions = append(submissions, submission) 199 + } 200 + } 201 + 202 + return Pull{ 203 + RepoAt: targetRepoAt, 204 + OwnerDid: did, 205 + Rkey: rkey, 206 + Title: record.Title, 207 + Body: body, 208 + TargetBranch: targetBranch, 209 + PullSource: pullSource, 210 + State: PullOpen, 211 + Submissions: submissions, 212 + Created: created, 213 + DependentOn: dependentOn, 214 + } 215 + } 216 + 217 + func PullSubmissionFromRecord(did, rkey string, roundNumber int, round *tangled.RepoPull_Round, blob *io.ReadCloser) (*PullSubmission, error) { 218 + created, err := time.Parse(time.RFC3339, round.CreatedAt) 219 + if err != nil { 220 + created = time.Now() 221 + } 222 + 223 + var patch, sourceRev string 224 + if blob != nil { 225 + p, err := extractGzip(*blob) 226 + if err != nil { 227 + return nil, fmt.Errorf("failed to extract gzip: %w", err) 228 + } 229 + patch = p 230 + if patchutil.IsFormatPatch(p) { 231 + patches, err := patchutil.ExtractPatches(p) 232 + if err != nil { 233 + return nil, fmt.Errorf("failed to extract patches: %w", err) 234 + } 235 + 236 + for _, part := range patches { 237 + sourceRev = part.SHA 238 + } 239 + } 240 + } 241 + 242 + log.Println("source sha", sourceRev) 243 + 244 + return &PullSubmission{ 245 + PullAt: syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", did, tangled.RepoPullNSID, rkey)), 246 + RoundNumber: roundNumber, 247 + Blob: *round.PatchBlob, 248 + Created: created, 249 + Patch: patch, 250 + SourceRev: sourceRev, 251 + }, nil 252 + } 253 + 122 254 type PullSource struct { 123 255 Branch string 124 256 RepoAt *syntax.ATURI ··· 136 268 137 269 // content 138 270 RoundNumber int 271 + Blob lexutil.LexBlob 139 272 Patch string 140 273 Combined string 141 274 Comments []PullComment ··· 225 358 return false 226 359 } 227 360 228 - func (p *Pull) IsStacked() bool { 229 - return p.StackId != "" 230 - } 231 - 232 361 func (p *Pull) Participants() []string { 233 362 participantSet := make(map[string]struct{}) 234 363 participants := []string{} ··· 265 394 return patches 266 395 } 267 396 397 + // empty if invalid, not otherwise 398 + func (s PullSubmission) ChangeId() string { 399 + patches := s.AsFormatPatch() 400 + if len(patches) != 1 { 401 + return "" 402 + } 403 + 404 + c, err := patches[0].ChangeId() 405 + if err != nil { 406 + return "" 407 + } 408 + 409 + return c 410 + } 411 + 268 412 func (s *PullSubmission) Participants() []string { 269 413 participantSet := make(map[string]struct{}) 270 414 participants := []string{} ··· 293 437 return s.Combined 294 438 } 295 439 440 + func (s *PullSubmission) GetBlob() *lexutil.LexBlob { 441 + if !s.Blob.Ref.Defined() { 442 + return nil 443 + } 444 + 445 + return &s.Blob 446 + } 447 + 448 + func (s *PullSubmission) AsRecord() *tangled.RepoPull_Round { 449 + return &tangled.RepoPull_Round{ 450 + CreatedAt: s.Created.Format(time.RFC3339), 451 + PatchBlob: s.GetBlob(), 452 + } 453 + } 454 + 296 455 type Stack []*Pull 297 456 298 457 // position of this pull in the stack 299 458 func (stack Stack) Position(pull *Pull) int { 300 459 return slices.IndexFunc(stack, func(p *Pull) bool { 301 - return p.ChangeId == pull.ChangeId 460 + return p.AtUri() == pull.AtUri() 302 461 }) 303 462 } 304 463 ··· 372 531 break 373 532 } 374 533 375 - // skip over deleted PRs 376 - if p.State != PullDeleted { 534 + // skip over abandoned PRs 535 + if p.State != PullAbandoned { 377 536 mergeable = append(mergeable, p) 378 537 } 379 538 } ··· 385 544 Repo *Repo 386 545 Branch string 387 546 } 547 + 548 + func extractGzip(blob io.Reader) (string, error) { 549 + var b bytes.Buffer 550 + r, err := gzip.NewReader(blob) 551 + if err != nil { 552 + return "", err 553 + } 554 + defer r.Close() 555 + 556 + const maxSize = 15 * 1024 * 1024 557 + limitedReader := io.LimitReader(r, maxSize) 558 + 559 + _, err = io.Copy(&b, limitedReader) 560 + if err != nil { 561 + return "", err 562 + } 563 + 564 + return b.String(), nil 565 + }
+2 -2
appview/notify/db/db.go
··· 282 282 l := log.FromContext(ctx) 283 283 284 284 pull, err := db.GetPull(n.db, 285 - syntax.ATURI(comment.RepoAt), 286 - comment.PullId, 285 + orm.FilterEq("repo_at", syntax.ATURI(comment.RepoAt)), 286 + orm.FilterEq("pull_id", comment.PullId), 287 287 ) 288 288 if err != nil { 289 289 l.Error("failed to get pulls", "err", err)
-1
appview/pages/pages.go
··· 1180 1180 Active string 1181 1181 Pull *models.Pull 1182 1182 Stack models.Stack 1183 - AbandonedPulls []*models.Pull 1184 1183 Backlinks []models.RichReferenceLink 1185 1184 BranchDeleteStatus *models.BranchDeleteStatus 1186 1185 MergeCheck types.MergeCheckResponse
+1 -1
appview/pages/templates/repo/pulls/fragments/pullActions.html
··· 6 6 {{ $totalPulls := sub 0 1 }} 7 7 {{ $below := sub 0 1 }} 8 8 {{ $stackCount := "" }} 9 - {{ if .Pull.IsStacked }} 9 + {{ if (gt (len .Stack) 1) }} 10 10 {{ $totalPulls = len $stack }} 11 11 {{ $below = $stack.Below .Pull }} 12 12 {{ $mergeable := len $below.Mergeable }}
+1 -17
appview/pages/templates/repo/pulls/fragments/pullHeader.html
··· 6 6 </h1> 7 7 </header> 8 8 9 - {{ $bgColor := "bg-gray-800 dark:bg-gray-700" }} 10 - {{ $icon := "ban" }} 11 - 12 - {{ if .Pull.State.IsOpen }} 13 - {{ $bgColor = "bg-green-600 dark:bg-green-700" }} 14 - {{ $icon = "git-pull-request" }} 15 - {{ else if .Pull.State.IsMerged }} 16 - {{ $bgColor = "bg-purple-600 dark:bg-purple-700" }} 17 - {{ $icon = "git-merge" }} 18 - {{ end }} 19 - 20 9 <section> 21 10 <div class="flex items-center gap-2"> 22 - <span 23 - class="inline-flex items-center rounded px-2 py-[5px] {{ $bgColor }} text-sm" 24 - > 25 - {{ i $icon "w-3 h-3 mr-1.5 text-white" }} 26 - <span class="text-white">{{ .Pull.State.String }}</span> 27 - </span> 11 + {{ template "repo/pulls/fragments/pullState" .Pull.State }} 28 12 <span class="text-gray-500 dark:text-gray-400 text-sm flex flex-wrap items-center gap-1"> 29 13 opened by 30 14 {{ template "user/fragments/picHandleLink" .Pull.OwnerDid }}
-18
appview/pages/templates/repo/pulls/fragments/pullStack.html
··· 14 14 </summary> 15 15 {{ block "pullList" (list .Stack $) }} {{ end }} 16 16 </details> 17 - 18 - {{ if gt (len .AbandonedPulls) 0 }} 19 - <details class="mt-4 bg-white dark:bg-gray-800 group" open> 20 - <summary class="p-2 text-sm font-bold list-none cursor-pointer hover:text-gray-500 hover:dark:text-gray-400"> 21 - <span class="flex items-center gap-2"> 22 - <span class="group-open:hidden"> 23 - {{ i "chevrons-up-down" "w-4 h-4" }} 24 - </span> 25 - <span class="hidden group-open:flex"> 26 - {{ i "chevrons-down-up" "w-4 h-4" }} 27 - </span> 28 - ABANDONED PULLS 29 - <span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 text-sm ml-1">{{ len .AbandonedPulls }}</span> 30 - </span> 31 - </summary> 32 - {{ block "pullList" (list .AbandonedPulls $) }} {{ end }} 33 - </details> 34 - {{ end }} 35 17 {{ end }} 36 18 37 19 {{ define "pullList" }}
+22
appview/pages/templates/repo/pulls/fragments/pullState.html
··· 1 + {{ define "repo/pulls/fragments/pullState" }} 2 + {{ $bgColor := "bg-gray-800 dark:bg-gray-700" }} 3 + {{ $icon := "ban" }} 4 + 5 + {{ if .IsOpen }} 6 + {{ $bgColor = "bg-green-600 dark:bg-green-700" }} 7 + {{ $icon = "git-pull-request" }} 8 + {{ else if .IsMerged }} 9 + {{ $bgColor = "bg-purple-600 dark:bg-purple-700" }} 10 + {{ $icon = "git-merge" }} 11 + {{ else if .IsAbandoned }} 12 + {{ $bgColor = "bg-red-600 dark:bg-red-700" }} 13 + {{ $icon = "git-pull-request-closed" }} 14 + {{ end }} 15 + 16 + <span class="inline-flex items-center rounded px-2 py-[5px] {{ $bgColor }} text-sm"> 17 + {{ i $icon "w-3 h-3 mr-1.5 text-white" }} 18 + <span class="text-white">{{ .String }}</span> 19 + </span> 20 + {{ end }} 21 + 22 +
+1 -1
appview/pages/templates/repo/pulls/fragments/summarizedPullState.html
··· 8 8 {{ else if .IsMerged }} 9 9 {{ $fgColor = "text-purple-600 dark:text-purple-500" }} 10 10 {{ $icon = "git-merge" }} 11 - {{ else if .IsDeleted }} 11 + {{ else if .IsAbandoned }} 12 12 {{ $fgColor = "text-red-600 dark:text-red-500" }} 13 13 {{ $icon = "git-pull-request-closed" }} 14 14 {{ end }}
+2 -2
appview/pages/templates/repo/pulls/pull.html
··· 104 104 105 105 {{ define "repoContent" }} 106 106 {{ template "repo/pulls/fragments/pullHeader" . }} 107 - {{ if .Pull.IsStacked }} 107 + {{ if (gt (len .Stack) 1) }} 108 108 <div class="mt-8"> 109 109 {{ template "repo/pulls/fragments/pullStack" . }} 110 110 </div> ··· 461 461 > 462 462 </div> 463 463 </div> 464 - {{ else if .Pull.State.IsDeleted }} 464 + {{ else if .Pull.State.IsAbandoned }} 465 465 <div class="bg-red-50 dark:bg-red-900 border border-red-500 rounded drop-shadow-sm px-6 py-2 relative"> 466 466 <div class="flex items-center gap-2 text-red-500 dark:text-red-300"> 467 467 {{ i "git-pull-request-closed" "w-4 h-4" }}
+1 -39
appview/pages/templates/repo/pulls/pulls.html
··· 79 79 </a> 80 80 </div> 81 81 <div class="text-sm text-gray-500 dark:text-gray-400 flex flex-wrap items-center gap-1"> 82 - {{ $bgColor := "bg-gray-800 dark:bg-gray-700" }} 83 - {{ $icon := "ban" }} 84 - 85 - {{ if .State.IsOpen }} 86 - {{ $bgColor = "bg-green-600 dark:bg-green-700" }} 87 - {{ $icon = "git-pull-request" }} 88 - {{ else if .State.IsMerged }} 89 - {{ $bgColor = "bg-purple-600 dark:bg-purple-700" }} 90 - {{ $icon = "git-merge" }} 91 - {{ end }} 92 - 93 - 94 - <span 95 - class="inline-flex items-center rounded px-2 py-[5px] {{ $bgColor }} text-sm" 96 - > 97 - {{ i $icon "w-3 h-3 mr-1.5 text-white" }} 98 - <span class="text-white">{{ .State.String }}</span> 99 - </span> 100 - 82 + {{ template "repo/pulls/fragments/pullState" .State }} 101 83 <span class="ml-1"> 102 84 {{ template "user/fragments/picHandleLink" .OwnerDid }} 103 85 </span> ··· 132 114 {{ end }} 133 115 </div> 134 116 </div> 135 - {{ if .StackId }} 136 - {{ $otherPulls := index $.Stacks .StackId }} 137 - {{ if gt (len $otherPulls) 0 }} 138 - <details class="bg-white dark:bg-gray-800 group"> 139 - <summary class="pb-4 px-6 text-xs list-none cursor-pointer hover:text-gray-500 hover:dark:text-gray-400"> 140 - {{ $s := "s" }} 141 - {{ if eq (len $otherPulls) 1 }} 142 - {{ $s = "" }} 143 - {{ end }} 144 - <div class="group-open:hidden flex items-center gap-2"> 145 - {{ i "chevrons-up-down" "w-4 h-4" }} expand {{ len $otherPulls }} pull{{$s}} in this stack 146 - </div> 147 - <div class="hidden group-open:flex items-center gap-2"> 148 - {{ i "chevrons-down-up" "w-4 h-4" }} hide {{ len $otherPulls }} pull{{$s}} in this stack 149 - </div> 150 - </summary> 151 - {{ block "stackedPullList" (list $otherPulls $) }} {{ end }} 152 - </details> 153 - {{ end }} 154 - {{ end }} 155 117 </div> 156 118 {{ end }} 157 119 </div>
+265 -254
appview/pulls/pulls.go
··· 47 47 lexutil "github.com/bluesky-social/indigo/lex/util" 48 48 indigoxrpc "github.com/bluesky-social/indigo/xrpc" 49 49 "github.com/go-chi/chi/v5" 50 - "github.com/google/uuid" 51 50 ) 52 51 53 52 const ApplicationGzip = "application/gzip" ··· 190 189 191 190 // can be nil if this pull is not stacked 192 191 stack, _ := r.Context().Value("stack").(models.Stack) 193 - abandonedPulls, _ := r.Context().Value("abandonedPulls").([]*models.Pull) 194 192 195 193 mergeCheckResponse := s.mergeCheck(r, f, pull, stack) 196 194 branchDeleteStatus := s.branchDeleteStatus(r, f, pull) ··· 206 204 shas = append(shas, s.SourceRev) 207 205 } 208 206 for _, p := range stack { 209 - shas = append(shas, p.LatestSha()) 210 - } 211 - for _, p := range abandonedPulls { 212 207 shas = append(shas, p.LatestSha()) 213 208 } 214 209 ··· 277 272 diff = patchutil.Interdiff(previousPatch, currentPatch) 278 273 } 279 274 280 - s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{ 275 + fmt.Println(s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{ 281 276 LoggedInUser: user, 282 277 RepoInfo: s.repoResolver.GetRepoInfo(r, user), 283 278 Pull: pull, 284 279 Stack: stack, 285 - AbandonedPulls: abandonedPulls, 286 280 Backlinks: backlinks, 287 281 BranchDeleteStatus: branchDeleteStatus, 288 282 MergeCheck: mergeCheckResponse, ··· 297 291 UserReacted: userReactions, 298 292 299 293 LabelDefs: defs, 300 - }) 294 + })) 301 295 } 302 296 303 297 func (s *Pulls) RepoSinglePull(w http.ResponseWriter, r *http.Request) { ··· 326 320 Host: host, 327 321 } 328 322 329 - patch := pull.LatestPatch() 330 - if pull.IsStacked() { 331 - // combine patches of substack 332 - subStack := stack.Below(pull) 333 - // collect the portion of the stack that is mergeable 334 - mergeable := subStack.Mergeable() 335 - // combine each patch 336 - patch = mergeable.CombinedPatch() 337 - } 323 + // combine patches of substack 324 + subStack := stack.Below(pull) 325 + // collect the portion of the stack that is mergeable 326 + mergeable := subStack.Mergeable() 327 + // combine each patch 328 + patch := mergeable.CombinedPatch() 338 329 339 330 resp, xe := tangled.RepoMergeCheck( 340 331 r.Context(), ··· 432 423 } 433 424 434 425 func (s *Pulls) resubmitCheck(r *http.Request, repo *models.Repo, pull *models.Pull, stack models.Stack) pages.ResubmitResult { 435 - if pull.State == models.PullMerged || pull.State == models.PullDeleted || pull.PullSource == nil { 426 + if pull.State == models.PullMerged || pull.State == models.PullAbandoned || pull.PullSource == nil { 436 427 return pages.Unknown 437 428 } 438 429 ··· 478 469 479 470 targetBranch := branchResp 480 471 481 - latestSourceRev := pull.LatestSha() 482 - 483 - if pull.IsStacked() && stack != nil { 484 - top := stack[0] 485 - latestSourceRev = top.LatestSha() 486 - } 472 + top := stack[0] 473 + latestSourceRev := top.LatestSha() 487 474 488 475 if latestSourceRev != targetBranch.Hash { 489 476 return pages.ShouldResubmit ··· 646 633 countOpts := searchOpts 647 634 countOpts.Page = pagination.Page{Limit: 1} 648 635 for _, ps := range []models.PullState{models.PullOpen, models.PullMerged, models.PullClosed} { 649 - ps := ps 650 636 countOpts.State = &ps 651 637 countRes, err := s.indexer.Search(r.Context(), countOpts) 652 638 if err != nil { ··· 710 696 // we want to group all stacked PRs into just one list 711 697 stacks := make(map[string]models.Stack) 712 698 var shas []string 713 - n := 0 699 + // n := 0 714 700 for _, p := range pulls { 715 701 // store the sha for later 716 702 shas = append(shas, p.LatestSha()) 717 703 // this PR is stacked 718 - if p.StackId != "" { 719 - // we have already seen this PR stack 720 - if _, seen := stacks[p.StackId]; seen { 721 - stacks[p.StackId] = append(stacks[p.StackId], p) 722 - // skip this PR 723 - } else { 724 - stacks[p.StackId] = nil 725 - pulls[n] = p 726 - n++ 727 - } 728 - } else { 729 - pulls[n] = p 730 - n++ 731 - } 704 + // if p.StackId != "" { 705 + // // we have already seen this PR stack 706 + // if _, seen := stacks[p.StackId]; seen { 707 + // stacks[p.StackId] = append(stacks[p.StackId], p) 708 + // // skip this PR 709 + // } else { 710 + // stacks[p.StackId] = nil 711 + // pulls[n] = p 712 + // n++ 713 + // } 714 + // } else { 715 + // pulls[n] = p 716 + // n++ 717 + // } 732 718 } 733 - pulls = pulls[:n] 719 + // pulls = pulls[:n] 734 720 735 721 ps, err := db.GetPipelineStatuses( 736 722 s.db, ··· 770 756 filterState = state.String() 771 757 } 772 758 773 - s.pages.RepoPulls(w, pages.RepoPullsParams{ 759 + fmt.Println(s.pages.RepoPulls(w, pages.RepoPullsParams{ 774 760 LoggedInUser: s.oauth.GetMultiAccountUser(r), 775 761 RepoInfo: repoInfo, 776 762 Pulls: pulls, ··· 781 767 Pipelines: m, 782 768 Page: page, 783 769 PullCount: totalPulls, 784 - }) 770 + })) 785 771 } 786 772 787 773 func (s *Pulls) PullComment(w http.ResponseWriter, r *http.Request) { ··· 1114 1100 } 1115 1101 recordPullSource := &tangled.RepoPull_Source{ 1116 1102 Branch: sourceBranch, 1117 - Sha: comparison.Rev2, 1118 1103 } 1119 1104 1120 1105 s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) ··· 1229 1214 recordPullSource := &tangled.RepoPull_Source{ 1230 1215 Branch: sourceBranch, 1231 1216 Repo: &forkAtUriStr, 1232 - Sha: sourceRev, 1233 1217 } 1234 1218 1235 1219 s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) ··· 1302 1286 mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 1303 1287 1304 1288 rkey := tid.TID() 1289 + 1290 + blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(patch), ApplicationGzip) 1291 + if err != nil { 1292 + log.Println("failed to upload patch", err) 1293 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1294 + return 1295 + } 1296 + 1297 + record := tangled.RepoPull{ 1298 + Title: title, 1299 + Body: &body, 1300 + Target: &tangled.RepoPull_Target{ 1301 + Repo: string(repo.RepoAt()), 1302 + Branch: targetBranch, 1303 + }, 1304 + Source: recordPullSource, 1305 + CreatedAt: time.Now().Format(time.RFC3339), 1306 + Rounds: []*tangled.RepoPull_Round{ 1307 + { 1308 + CreatedAt: time.Now().Format(time.RFC3339), 1309 + PatchBlob: blob.Blob, 1310 + }, 1311 + }, 1312 + } 1305 1313 initialSubmission := models.PullSubmission{ 1306 1314 Patch: patch, 1307 1315 Combined: combined, 1308 1316 SourceRev: sourceRev, 1317 + Blob: *blob.Blob, 1309 1318 } 1310 1319 pull := &models.Pull{ 1311 1320 Title: title, ··· 1320 1329 &initialSubmission, 1321 1330 }, 1322 1331 PullSource: pullSource, 1332 + State: models.PullOpen, 1323 1333 } 1324 - err = db.NewPull(tx, pull) 1334 + 1335 + _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1336 + Collection: tangled.RepoPullNSID, 1337 + Repo: user.Active.Did, 1338 + Rkey: rkey, 1339 + Record: &lexutil.LexiconTypeDecoder{ 1340 + Val: &record, 1341 + }, 1342 + }) 1325 1343 if err != nil { 1326 1344 log.Println("failed to create pull request", err) 1327 - s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1328 - return 1329 - } 1330 - pullId, err := db.NextPullId(tx, repo.RepoAt()) 1331 - if err != nil { 1332 - log.Println("failed to get pull id", err) 1333 1345 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1334 1346 return 1335 1347 } 1336 1348 1337 - blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(patch), ApplicationGzip) 1349 + err = db.PutPull(tx, pull) 1338 1350 if err != nil { 1339 - log.Println("failed to upload patch", err) 1351 + log.Println("failed to create pull request", err) 1340 1352 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1341 1353 return 1342 1354 } 1343 - 1344 - _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1345 - Collection: tangled.RepoPullNSID, 1346 - Repo: user.Active.Did, 1347 - Rkey: rkey, 1348 - Record: &lexutil.LexiconTypeDecoder{ 1349 - Val: &tangled.RepoPull{ 1350 - Title: title, 1351 - Target: &tangled.RepoPull_Target{ 1352 - Repo: string(repo.RepoAt()), 1353 - Branch: targetBranch, 1354 - }, 1355 - PatchBlob: blob.Blob, 1356 - Source: recordPullSource, 1357 - CreatedAt: time.Now().Format(time.RFC3339), 1358 - }, 1359 - }, 1360 - }) 1355 + pullId, err := db.NextPullId(tx, repo.RepoAt()) 1361 1356 if err != nil { 1362 - log.Println("failed to create pull request", err) 1357 + log.Println("failed to get pull id", err) 1363 1358 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1364 1359 return 1365 1360 } ··· 1390 1385 1391 1386 // must be branch or fork based 1392 1387 if sourceRev == "" { 1393 - log.Println("stacked PR from patch-based pull") 1388 + s.logger.Error("stacked PR from patch-based pull") 1394 1389 s.pages.Notice(w, "pull", "Stacking is only supported on branch and fork based pull-requests.") 1395 1390 return 1396 1391 } 1397 1392 1398 1393 formatPatches, err := patchutil.ExtractPatches(patch) 1399 1394 if err != nil { 1400 - log.Println("failed to extract patches", err) 1395 + s.logger.Error("failed to extract patches", "err", err) 1401 1396 s.pages.Notice(w, "pull", fmt.Sprintf("Failed to extract patches: %v", err)) 1402 1397 return 1403 1398 } 1404 1399 1405 1400 // must have atleast 1 patch to begin with 1406 1401 if len(formatPatches) == 0 { 1407 - log.Println("empty patches") 1402 + s.logger.Error("empty patches") 1408 1403 s.pages.Notice(w, "pull", "No patches found in the generated format-patch.") 1409 1404 return 1410 1405 } 1411 1406 1412 - // build a stack out of this patch 1413 - stackId := uuid.New() 1414 - stack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pullSource, stackId.String()) 1407 + client, err := s.oauth.AuthorizedClient(r) 1415 1408 if err != nil { 1416 - log.Println("failed to create stack", err) 1417 - s.pages.Notice(w, "pull", fmt.Sprintf("Failed to create stack: %v", err)) 1409 + s.logger.Error("failed to get authorized client", "err", err) 1410 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1418 1411 return 1419 1412 } 1420 1413 1421 - client, err := s.oauth.AuthorizedClient(r) 1414 + // first upload all blobs 1415 + blobs := make([]*lexutil.LexBlob, len(formatPatches)) 1416 + for i, p := range formatPatches { 1417 + blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(p.Raw), ApplicationGzip) 1418 + if err != nil { 1419 + s.logger.Error("failed to upload patch blob", "err", err) 1420 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1421 + return 1422 + } 1423 + s.logger.Info("uploaded blob", "idx", i+1, "total", len(formatPatches)) 1424 + blobs[i] = blob.Blob 1425 + } 1426 + 1427 + // build a stack out of this patch 1428 + stack, err := s.newStack(r.Context(), repo, user, targetBranch, pullSource, formatPatches, blobs) 1422 1429 if err != nil { 1423 - log.Println("failed to get authorized client", err) 1424 - s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1430 + s.logger.Error("failed to create stack", "err", err) 1431 + s.pages.Notice(w, "pull", fmt.Sprintf("Failed to create stack: %v", err)) 1425 1432 return 1426 1433 } 1427 1434 1428 1435 // apply all record creations at once 1429 1436 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1430 1437 for _, p := range stack { 1431 - blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(p.LatestPatch()), ApplicationGzip) 1432 - if err != nil { 1433 - log.Println("failed to upload patch blob", err) 1434 - s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1435 - return 1436 - } 1437 - 1438 1438 record := p.AsRecord() 1439 - record.PatchBlob = blob.Blob 1440 1439 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 1441 1440 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 1442 1441 Collection: tangled.RepoPullNSID, ··· 1452 1451 Writes: writes, 1453 1452 }) 1454 1453 if err != nil { 1455 - log.Println("failed to create stacked pull request", err) 1454 + s.logger.Error("failed to create stacked pull request", "err", err) 1456 1455 s.pages.Notice(w, "pull", "Failed to create stacked pull request. Try again later.") 1457 1456 return 1458 1457 } ··· 1460 1459 // create all pulls at once 1461 1460 tx, err := s.db.BeginTx(r.Context(), nil) 1462 1461 if err != nil { 1463 - log.Println("failed to start tx") 1462 + s.logger.Error("failed to start tx") 1464 1463 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1465 1464 return 1466 1465 } 1467 1466 defer tx.Rollback() 1468 1467 1469 1468 for _, p := range stack { 1470 - err = db.NewPull(tx, p) 1469 + err = db.PutPull(tx, p) 1471 1470 if err != nil { 1472 - log.Println("failed to create pull request", err) 1471 + s.logger.Error("failed to create pull request", "err", err) 1473 1472 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1474 1473 return 1475 1474 } ··· 1477 1476 } 1478 1477 1479 1478 if err = tx.Commit(); err != nil { 1480 - log.Println("failed to create pull request", err) 1479 + s.logger.Error("failed to create pull request", "err", err) 1481 1480 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1482 1481 return 1483 1482 } ··· 1496 1495 func (s *Pulls) ValidatePatch(w http.ResponseWriter, r *http.Request) { 1497 1496 _, err := s.repoResolver.Resolve(r) 1498 1497 if err != nil { 1499 - log.Println("failed to get repo and knot", err) 1498 + s.logger.Error("failed to get repo and knot", "err", err) 1500 1499 return 1501 1500 } 1502 1501 ··· 1921 1920 combined string, 1922 1921 sourceRev string, 1923 1922 ) { 1924 - if pull.IsStacked() { 1923 + stack := r.Context().Value("stack").(models.Stack) 1924 + if stack != nil { 1925 1925 log.Println("resubmitting stacked PR") 1926 - s.resubmitStackedPullHelper(w, r, repo, user, pull, patch, pull.StackId) 1926 + s.resubmitStackedPullHelper(w, r, repo, user, pull, patch) 1927 1927 return 1928 1928 } 1929 1929 ··· 1945 1945 } 1946 1946 } 1947 1947 1948 - tx, err := s.db.BeginTx(r.Context(), nil) 1949 - if err != nil { 1950 - log.Println("failed to start tx") 1951 - s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 1952 - return 1953 - } 1954 - defer tx.Rollback() 1955 - 1956 1948 pullAt := pull.AtUri() 1957 1949 newRoundNumber := len(pull.Submissions) 1958 1950 newPatch := patch 1959 1951 newSourceRev := sourceRev 1960 1952 combinedPatch := combined 1961 - err = db.ResubmitPull(tx, pullAt, newRoundNumber, newPatch, combinedPatch, newSourceRev) 1962 - if err != nil { 1963 - log.Println("failed to create pull request", err) 1964 - s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 1965 - return 1966 - } 1953 + 1967 1954 client, err := s.oauth.AuthorizedClient(r) 1968 1955 if err != nil { 1969 1956 log.Println("failed to authorize client") ··· 1985 1972 return 1986 1973 } 1987 1974 record := pull.AsRecord() 1988 - record.PatchBlob = blob.Blob 1975 + record.Rounds = append(record.Rounds, &tangled.RepoPull_Round{ 1976 + CreatedAt: time.Now().Format(time.RFC3339), 1977 + PatchBlob: blob.Blob, 1978 + }) 1989 1979 record.CreatedAt = time.Now().Format(time.RFC3339) 1990 1980 1991 - if record.Source != nil { 1992 - record.Source.Sha = newSourceRev 1993 - } 1994 - 1995 1981 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1996 1982 Collection: tangled.RepoPullNSID, 1997 1983 Repo: user.Active.Did, ··· 2007 1993 return 2008 1994 } 2009 1995 2010 - if err = tx.Commit(); err != nil { 2011 - log.Println("failed to commit transaction", err) 2012 - s.pages.Notice(w, "resubmit-error", "Failed to resubmit pull.") 1996 + err = db.ResubmitPull(s.db, pullAt, newRoundNumber, newPatch, combinedPatch, newSourceRev, blob.Blob) 1997 + if err != nil { 1998 + log.Println("failed to create pull request", err) 1999 + s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 2013 2000 return 2014 2001 } 2015 2002 ··· 2024 2011 user *oauth.MultiAccountUser, 2025 2012 pull *models.Pull, 2026 2013 patch string, 2027 - stackId string, 2028 2014 ) { 2029 2015 targetBranch := pull.TargetBranch 2030 2016 2031 2017 origStack, _ := r.Context().Value("stack").(models.Stack) 2032 - newStack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pull.PullSource, stackId) 2018 + 2019 + formatPatches, err := patchutil.ExtractPatches(patch) 2020 + if err != nil { 2021 + s.logger.Error("Failed to extract patches", "err", err) 2022 + s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Failed to parse patches.") 2023 + return 2024 + } 2025 + 2026 + // must have atleast 1 patch to begin with 2027 + if len(formatPatches) == 0 { 2028 + s.logger.Error("No patches found in the generated format-patch.") 2029 + s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request: No patches found in the generated patch.") 2030 + return 2031 + } 2032 + 2033 + client, err := s.oauth.AuthorizedClient(r) 2034 + if err != nil { 2035 + s.logger.Error("failed to get authorized client", "err", err) 2036 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 2037 + return 2038 + } 2039 + 2040 + // first upload all blobs 2041 + blobs := make([]*lexutil.LexBlob, len(formatPatches)) 2042 + for i, p := range formatPatches { 2043 + blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(p.Raw), ApplicationGzip) 2044 + if err != nil { 2045 + s.logger.Error("failed to upload patch blob", "err", err) 2046 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 2047 + return 2048 + } 2049 + s.logger.Info("uploaded blob", "idx", i+1, "total", len(formatPatches)) 2050 + blobs[i] = blob.Blob 2051 + } 2052 + 2053 + newStack, err := s.newStack(r.Context(), repo, user, targetBranch, pull.PullSource, formatPatches, blobs) 2033 2054 if err != nil { 2034 2055 log.Println("failed to create resubmitted stack", err) 2035 2056 s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.") ··· 2040 2061 origById := make(map[string]*models.Pull) 2041 2062 newById := make(map[string]*models.Pull) 2042 2063 for _, p := range origStack { 2043 - origById[p.ChangeId] = p 2064 + origById[p.LatestSubmission().ChangeId()] = p 2044 2065 } 2045 2066 for _, p := range newStack { 2046 - newById[p.ChangeId] = p 2067 + newById[p.LatestSubmission().ChangeId()] = p 2047 2068 } 2048 2069 2049 2070 // commits that got deleted: corresponding pull is closed ··· 2055 2076 2056 2077 // pulls in orignal stack but not in new one 2057 2078 for _, op := range origStack { 2058 - if _, ok := newById[op.ChangeId]; !ok { 2059 - deletions[op.ChangeId] = op 2079 + if _, ok := newById[op.LatestSubmission().ChangeId()]; !ok { 2080 + deletions[op.LatestSubmission().ChangeId()] = op 2060 2081 } 2061 2082 } 2062 2083 2063 2084 // pulls in new stack but not in original one 2064 2085 for _, np := range newStack { 2065 - if _, ok := origById[np.ChangeId]; !ok { 2066 - additions[np.ChangeId] = np 2086 + if _, ok := origById[np.LatestSubmission().ChangeId()]; !ok { 2087 + additions[np.LatestSubmission().ChangeId()] = np 2067 2088 } 2068 2089 } 2069 2090 2070 2091 // NOTE: this loop can be written in any of above blocks, 2071 2092 // but is written separately in the interest of simpler code 2072 2093 for _, np := range newStack { 2073 - if op, ok := origById[np.ChangeId]; ok { 2094 + if op, ok := origById[np.LatestSubmission().ChangeId()]; ok { 2095 + // pull exists in both stacks 2096 + updated[op.LatestSubmission().ChangeId()] = struct{}{} 2097 + } 2098 + } 2099 + 2100 + // NOTE: we can go through the newStack and update dependent relations and 2101 + // rkeys now that we know which ones have been updated 2102 + // update dependentOn relations for the entire stack 2103 + var parentAt *syntax.ATURI 2104 + for _, np := range newStack { 2105 + if op, ok := origById[np.LatestSubmission().ChangeId()]; ok { 2074 2106 // pull exists in both stacks 2075 - updated[op.ChangeId] = struct{}{} 2107 + np.Rkey = op.Rkey 2076 2108 } 2109 + np.DependentOn = parentAt 2110 + x := np.AtUri() 2111 + parentAt = &x 2077 2112 } 2078 2113 2079 2114 tx, err := s.db.Begin() ··· 2084 2119 } 2085 2120 defer tx.Rollback() 2086 2121 2087 - client, err := s.oauth.AuthorizedClient(r) 2088 - if err != nil { 2089 - log.Println("failed to authorize client") 2090 - s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 2091 - return 2092 - } 2093 - 2094 2122 // pds updates to make 2095 2123 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 2096 2124 ··· 2101 2129 continue 2102 2130 } 2103 2131 2104 - err := db.DeletePull(tx, p.RepoAt, p.PullId) 2132 + err := db.AbandonPulls(tx, orm.FilterEq("repo_at", p.RepoAt), orm.FilterEq("at_uri", p.AtUri())) 2105 2133 if err != nil { 2106 2134 log.Println("failed to delete pull", err, p.PullId) 2107 2135 s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") ··· 2117 2145 2118 2146 // new pulls are created 2119 2147 for _, p := range additions { 2120 - err := db.NewPull(tx, p) 2148 + blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(p.LatestPatch()), ApplicationGzip) 2121 2149 if err != nil { 2122 - log.Println("failed to create pull", err, p.PullId) 2123 - s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2150 + log.Println("failed to upload patch blob", err) 2151 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2124 2152 return 2125 2153 } 2154 + p.Submissions[0].Blob = *blob.Blob 2126 2155 2127 - blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(patch), ApplicationGzip) 2128 - if err != nil { 2129 - log.Println("failed to upload patch blob", err) 2130 - s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2156 + if err = db.PutPull(tx, p); err != nil { 2157 + log.Println("failed to create pull", err, p.PullId) 2158 + s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2131 2159 return 2132 2160 } 2161 + 2133 2162 record := p.AsRecord() 2134 - record.PatchBlob = blob.Blob 2163 + record.Rounds = []*tangled.RepoPull_Round{ 2164 + { 2165 + CreatedAt: time.Now().Format(time.RFC3339), 2166 + PatchBlob: blob.Blob, 2167 + }, 2168 + } 2135 2169 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2136 2170 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 2137 2171 Collection: tangled.RepoPullNSID, ··· 2154 2188 } 2155 2189 2156 2190 // resubmit the new pull 2191 + np.Rkey = op.Rkey 2157 2192 pullAt := op.AtUri() 2158 2193 newRoundNumber := len(op.Submissions) 2159 2194 newPatch := np.LatestPatch() 2160 2195 combinedPatch := np.LatestSubmission().Combined 2161 2196 newSourceRev := np.LatestSha() 2162 - err := db.ResubmitPull(tx, pullAt, newRoundNumber, newPatch, combinedPatch, newSourceRev) 2197 + 2198 + blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(newPatch), ApplicationGzip) 2163 2199 if err != nil { 2164 - log.Println("failed to update pull", err, op.PullId) 2165 - s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2200 + log.Println("failed to upload patch blob", err) 2201 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2166 2202 return 2167 2203 } 2168 2204 2169 - blob, err := xrpc.RepoUploadBlob(r.Context(), client, gz(patch), ApplicationGzip) 2205 + err = db.ResubmitPull(tx, pullAt, newRoundNumber, newPatch, combinedPatch, newSourceRev, blob.Blob) 2170 2206 if err != nil { 2171 - log.Println("failed to upload patch blob", err) 2172 - s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2207 + log.Println("failed to update pull", err, op.PullId) 2208 + s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2173 2209 return 2174 2210 } 2211 + 2175 2212 record := np.AsRecord() 2176 - record.PatchBlob = blob.Blob 2213 + record.Rounds = op.AsRecord().Rounds 2214 + record.Rounds = append(record.Rounds, &tangled.RepoPull_Round{ 2215 + CreatedAt: time.Now().Format(time.RFC3339), 2216 + PatchBlob: blob.Blob, 2217 + }) 2177 2218 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2178 2219 RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{ 2179 2220 Collection: tangled.RepoPullNSID, ··· 2185 2226 }) 2186 2227 } 2187 2228 2188 - // update parent-change-id relations for the entire stack 2189 - for _, p := range newStack { 2190 - err := db.SetPullParentChangeId( 2191 - tx, 2192 - p.ParentChangeId, 2193 - // these should be enough filters to be unique per-stack 2194 - orm.FilterEq("repo_at", p.RepoAt.String()), 2195 - orm.FilterEq("owner_did", p.OwnerDid), 2196 - orm.FilterEq("change_id", p.ChangeId), 2197 - ) 2198 - 2199 - if err != nil { 2200 - log.Println("failed to update pull", err, p.PullId) 2201 - s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2202 - return 2203 - } 2204 - } 2205 - 2206 - err = tx.Commit() 2207 - if err != nil { 2208 - log.Println("failed to resubmit pull", err) 2209 - s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2210 - return 2211 - } 2212 - 2213 2229 _, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{ 2214 2230 Repo: user.Active.Did, 2215 2231 Writes: writes, ··· 2217 2233 if err != nil { 2218 2234 log.Println("failed to create stacked pull request", err) 2219 2235 s.pages.Notice(w, "pull", "Failed to create stacked pull request. Try again later.") 2236 + return 2237 + } 2238 + 2239 + err = tx.Commit() 2240 + if err != nil { 2241 + log.Println("failed to resubmit pull", err) 2242 + s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2220 2243 return 2221 2244 } 2222 2245 ··· 2240 2263 return 2241 2264 } 2242 2265 2243 - var pullsToMerge models.Stack 2244 - pullsToMerge = append(pullsToMerge, pull) 2245 - if pull.IsStacked() { 2246 - stack, ok := r.Context().Value("stack").(models.Stack) 2247 - if !ok { 2248 - log.Println("failed to get stack") 2249 - s.pages.Notice(w, "pull-merge-error", "Failed to merge patch. Try again later.") 2250 - return 2251 - } 2266 + stack, ok := r.Context().Value("stack").(models.Stack) 2267 + if !ok { 2268 + log.Println("failed to get stack") 2269 + s.pages.Notice(w, "pull-merge-error", "Failed to merge patch. Try again later.") 2270 + return 2271 + } 2252 2272 2253 - // combine patches of substack 2254 - subStack := stack.StrictlyBelow(pull) 2255 - // collect the portion of the stack that is mergeable 2256 - mergeable := subStack.Mergeable() 2257 - // add to total patch 2258 - pullsToMerge = append(pullsToMerge, mergeable...) 2259 - } 2273 + // combine patches of substack 2274 + subStack := stack.Below(pull) 2275 + // collect the portion of the stack that is mergeable 2276 + pullsToMerge := subStack.Mergeable() 2260 2277 2261 2278 patch := pullsToMerge.CombinedPatch() 2262 2279 ··· 2316 2333 } 2317 2334 defer tx.Rollback() 2318 2335 2336 + var atUris []syntax.ATURI 2319 2337 for _, p := range pullsToMerge { 2320 - err := db.MergePull(tx, f.RepoAt(), p.PullId) 2321 - if err != nil { 2322 - log.Printf("failed to update pull request status in database: %s", err) 2323 - s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.") 2324 - return 2325 - } 2338 + atUris = append(atUris, p.AtUri()) 2326 2339 p.State = models.PullMerged 2340 + } 2341 + err = db.MergePulls(tx, orm.FilterEq("repo_at", f.RepoAt()), orm.FilterIn("at_uri", atUris)) 2342 + if err != nil { 2343 + log.Printf("failed to update pull request status in database: %s", err) 2344 + s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.") 2345 + return 2327 2346 } 2328 2347 2329 2348 err = tx.Commit() ··· 2380 2399 } 2381 2400 defer tx.Rollback() 2382 2401 2383 - var pullsToClose []*models.Pull 2384 - pullsToClose = append(pullsToClose, pull) 2385 - 2386 - // if this PR is stacked, then we want to close all PRs below this one on the stack 2387 - if pull.IsStacked() { 2388 - stack := r.Context().Value("stack").(models.Stack) 2389 - subStack := stack.StrictlyBelow(pull) 2390 - pullsToClose = append(pullsToClose, subStack...) 2391 - } 2392 - 2402 + // if this PR is stacked, then we want to close all PRs above this one on the stack 2403 + stack := r.Context().Value("stack").(models.Stack) 2404 + pullsToClose := stack.Above(pull) 2405 + var atUris []syntax.ATURI 2393 2406 for _, p := range pullsToClose { 2394 - // Close the pull in the database 2395 - err = db.ClosePull(tx, f.RepoAt(), p.PullId) 2396 - if err != nil { 2397 - log.Println("failed to close pull", err) 2398 - s.pages.Notice(w, "pull-close", "Failed to close pull.") 2399 - return 2400 - } 2407 + atUris = append(atUris, p.AtUri()) 2401 2408 p.State = models.PullClosed 2409 + } 2410 + err = db.ClosePulls( 2411 + tx, 2412 + orm.FilterEq("repo_at", f.RepoAt()), 2413 + orm.FilterIn("at_uri", atUris), 2414 + ) 2415 + if err != nil { 2416 + log.Println("failed to close pulls", err) 2417 + s.pages.Notice(w, "pull-close", "Failed to close pull.") 2402 2418 } 2403 2419 2404 2420 // Commit the transaction ··· 2454 2470 } 2455 2471 defer tx.Rollback() 2456 2472 2457 - var pullsToReopen []*models.Pull 2458 - pullsToReopen = append(pullsToReopen, pull) 2459 - 2460 2473 // if this PR is stacked, then we want to reopen all PRs above this one on the stack 2461 - if pull.IsStacked() { 2462 - stack := r.Context().Value("stack").(models.Stack) 2463 - subStack := stack.StrictlyAbove(pull) 2464 - pullsToReopen = append(pullsToReopen, subStack...) 2465 - } 2466 - 2474 + stack := r.Context().Value("stack").(models.Stack) 2475 + pullsToReopen := stack.Below(pull) 2476 + var atUris []syntax.ATURI 2467 2477 for _, p := range pullsToReopen { 2468 - // Close the pull in the database 2469 - err = db.ReopenPull(tx, f.RepoAt(), p.PullId) 2470 - if err != nil { 2471 - log.Println("failed to close pull", err) 2472 - s.pages.Notice(w, "pull-close", "Failed to close pull.") 2473 - return 2474 - } 2478 + atUris = append(atUris, p.AtUri()) 2475 2479 p.State = models.PullOpen 2480 + } 2481 + err = db.ReopenPulls( 2482 + tx, 2483 + orm.FilterEq("repo_at", f.RepoAt()), 2484 + orm.FilterIn("at_uri", atUris), 2485 + ) 2486 + if err != nil { 2487 + log.Println("failed to reopen pulls", err) 2488 + s.pages.Notice(w, "pull-close", "Failed to reopen pull.") 2476 2489 } 2477 2490 2478 2491 // Commit the transaction ··· 2490 2503 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2491 2504 } 2492 2505 2493 - func (s *Pulls) newStack(ctx context.Context, repo *models.Repo, user *oauth.MultiAccountUser, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) { 2494 - formatPatches, err := patchutil.ExtractPatches(patch) 2495 - if err != nil { 2496 - return nil, fmt.Errorf("Failed to extract patches: %v", err) 2497 - } 2498 - 2499 - // must have atleast 1 patch to begin with 2500 - if len(formatPatches) == 0 { 2501 - return nil, fmt.Errorf("No patches found in the generated format-patch.") 2502 - } 2503 - 2504 - // the stack is identified by a UUID 2506 + func (s *Pulls) newStack( 2507 + ctx context.Context, 2508 + repo *models.Repo, 2509 + user *oauth.MultiAccountUser, 2510 + targetBranch string, 2511 + pullSource *models.PullSource, 2512 + formatPatches []types.FormatPatch, 2513 + blobs []*lexutil.LexBlob, 2514 + ) (models.Stack, error) { 2505 2515 var stack models.Stack 2506 - parentChangeId := "" 2507 - for _, fp := range formatPatches { 2516 + var parentAtUri *syntax.ATURI 2517 + for i, fp := range formatPatches { 2508 2518 // all patches must have a jj change-id 2509 - changeId, err := fp.ChangeId() 2519 + _, err := fp.ChangeId() 2510 2520 if err != nil { 2511 2521 return nil, fmt.Errorf("Stacking is only supported if all patches contain a change-id commit header.") 2512 2522 } ··· 2521 2531 Patch: fp.Raw, 2522 2532 SourceRev: fp.SHA, 2523 2533 Combined: fp.Raw, 2534 + Blob: *blobs[i], 2524 2535 } 2525 2536 pull := models.Pull{ 2526 2537 Title: title, ··· 2536 2547 }, 2537 2548 PullSource: pullSource, 2538 2549 Created: time.Now(), 2550 + State: models.PullOpen, 2539 2551 2540 - StackId: stackId, 2541 - ChangeId: changeId, 2542 - ParentChangeId: parentChangeId, 2552 + DependentOn: parentAtUri, 2543 2553 } 2544 2554 2545 2555 stack = append(stack, &pull) 2546 2556 2547 - parentChangeId = changeId 2557 + parent := pull.AtUri() 2558 + parentAtUri = &parent 2548 2559 } 2549 2560 2550 2561 return stack, nil
+1
appview/state/profile.go
··· 245 245 Repos: repos, 246 246 Card: profile, 247 247 }) 248 + fmt.Println(err) 248 249 } 249 250 250 251 func (s *State) starredPage(w http.ResponseWriter, r *http.Request) {
+1
appview/state/state.go
··· 120 120 tangled.SpindleMemberNSID, 121 121 tangled.SpindleNSID, 122 122 tangled.StringNSID, 123 + tangled.RepoPullNSID, 123 124 tangled.RepoIssueNSID, 124 125 tangled.RepoIssueCommentNSID, 125 126 tangled.LabelDefinitionNSID,
+1
cmd/cborgen/cborgen.go
··· 50 50 tangled.RepoPull{}, 51 51 tangled.RepoPullComment{}, 52 52 tangled.RepoPull_Source{}, 53 + tangled.RepoPull_Round{}, 53 54 tangled.RepoPullStatus{}, 54 55 tangled.RepoPull_Target{}, 55 56 tangled.Spindle{},
+150 -46
knotserver/ingester.go
··· 11 11 "strings" 12 12 13 13 comatproto "github.com/bluesky-social/indigo/api/atproto" 14 + "github.com/bluesky-social/indigo/atproto/identity" 14 15 "github.com/bluesky-social/indigo/atproto/syntax" 15 16 "github.com/bluesky-social/indigo/xrpc" 16 - "github.com/bluesky-social/jetstream/pkg/models" 17 + jmodels "github.com/bluesky-social/jetstream/pkg/models" 17 18 securejoin "github.com/cyphar/filepath-securejoin" 18 19 "tangled.org/core/api/tangled" 20 + "tangled.org/core/appview/models" 19 21 "tangled.org/core/knotserver/db" 20 22 "tangled.org/core/knotserver/git" 21 23 "tangled.org/core/log" ··· 23 25 "tangled.org/core/workflow" 24 26 ) 25 27 26 - func (h *Knot) processPublicKey(ctx context.Context, event *models.Event) error { 28 + func (h *Knot) processPublicKey(ctx context.Context, event *jmodels.Event) error { 27 29 l := log.FromContext(ctx) 28 30 raw := json.RawMessage(event.Commit.Record) 29 31 did := event.Did ··· 45 47 return nil 46 48 } 47 49 48 - func (h *Knot) processKnotMember(ctx context.Context, event *models.Event) error { 50 + func (h *Knot) processKnotMember(ctx context.Context, event *jmodels.Event) error { 49 51 l := log.FromContext(ctx) 50 52 raw := json.RawMessage(event.Commit.Record) 51 53 did := event.Did ··· 85 87 return nil 86 88 } 87 89 88 - func (h *Knot) processPull(ctx context.Context, event *models.Event) error { 89 - raw := json.RawMessage(event.Commit.Record) 90 - did := event.Did 91 - 92 - var record tangled.RepoPull 93 - if err := json.Unmarshal(raw, &record); err != nil { 94 - return fmt.Errorf("failed to unmarshal record: %w", err) 95 - } 96 - 97 - l := log.FromContext(ctx) 98 - l = l.With("handler", "processPull") 99 - l = l.With("did", did) 100 - 90 + func (h *Knot) validatePullRecord(record *tangled.RepoPull) error { 101 91 if record.Target == nil { 102 92 return fmt.Errorf("ignoring pull record: target repo is nil") 103 93 } 104 - 105 - l = l.With("target_repo", record.Target.Repo) 106 - l = l.With("target_branch", record.Target.Branch) 107 94 108 95 if record.Source == nil { 109 96 return fmt.Errorf("ignoring pull record: not a branch-based pull request") ··· 113 100 return fmt.Errorf("ignoring pull record: fork based pull") 114 101 } 115 102 116 - repoAt, err := syntax.ParseATURI(record.Target.Repo) 103 + return nil 104 + } 105 + 106 + func (h *Knot) resolveTargetRepo(ctx context.Context, targetRepoUri string) (*identity.Identity, *tangled.Repo, error) { 107 + repoAt, err := syntax.ParseATURI(targetRepoUri) 117 108 if err != nil { 118 - return fmt.Errorf("failed to parse ATURI: %w", err) 109 + return nil, nil, fmt.Errorf("failed to parse ATURI: %w", err) 119 110 } 120 111 121 - // resolve this aturi to extract the repo record 122 - ident, err := h.resolver.ResolveIdent(ctx, repoAt.Authority().String()) 123 - if err != nil || ident.Handle.IsInvalidHandle() { 124 - return fmt.Errorf("failed to resolve handle: %w", err) 112 + // resolve the repo owner to extract the repo record 113 + repoOwnerIdent, err := h.resolver.ResolveIdent(ctx, repoAt.Authority().String()) 114 + if err != nil || repoOwnerIdent.Handle.IsInvalidHandle() { 115 + return nil, nil, fmt.Errorf("failed to resolve repo owner handle: %w", err) 125 116 } 126 117 127 118 xrpcc := xrpc.Client{ 128 - Host: ident.PDSEndpoint(), 119 + Host: repoOwnerIdent.PDSEndpoint(), 129 120 } 130 121 131 122 resp, err := comatproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String()) 132 123 if err != nil { 133 - return fmt.Errorf("failed to resolver repo: %w", err) 124 + return nil, nil, fmt.Errorf("failed to resolve repo: %w", err) 134 125 } 135 126 136 127 repo := resp.Value.Val.(*tangled.Repo) 128 + return repoOwnerIdent, repo, nil 129 + } 137 130 138 - if repo.Knot != h.c.Server.Hostname { 139 - return fmt.Errorf("rejected pull record: not this knot, %s != %s", repo.Knot, h.c.Server.Hostname) 131 + func (h *Knot) fetchLatestSubmission(ctx context.Context, did, rkey string, record *tangled.RepoPull) (*models.PullSubmission, error) { 132 + // resolve the PR owner's identity to fetch the blob from their PDS 133 + prOwnerIdent, err := h.resolver.ResolveIdent(ctx, did) 134 + if err != nil || prOwnerIdent.Handle.IsInvalidHandle() { 135 + return nil, fmt.Errorf("failed to resolve PR owner handle: %w", err) 140 136 } 141 137 142 - didSlashRepo, err := securejoin.SecureJoin(ident.DID.String(), repo.Name) 138 + roundNumber := len(record.Rounds) - 1 139 + round := record.Rounds[roundNumber] 140 + 141 + // fetch the blob from the PR owner's PDS 142 + prOwnerPds := prOwnerIdent.PDSEndpoint() 143 + blobUrl, err := url.Parse(fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob", prOwnerPds)) 144 + if err != nil { 145 + return nil, fmt.Errorf("failed to construct blob URL: %w", err) 146 + } 147 + q := blobUrl.Query() 148 + q.Set("cid", round.PatchBlob.Ref.String()) 149 + q.Set("did", did) 150 + blobUrl.RawQuery = q.Encode() 151 + 152 + req, err := http.NewRequestWithContext(ctx, http.MethodGet, blobUrl.String(), nil) 153 + if err != nil { 154 + return nil, fmt.Errorf("failed to create blob request: %w", err) 155 + } 156 + req.Header.Set("Content-Type", "application/json") 157 + 158 + blobResp, err := http.DefaultClient.Do(req) 143 159 if err != nil { 144 - return fmt.Errorf("failed to construct relative repo path: %w", err) 160 + return nil, fmt.Errorf("failed to fetch blob: %w", err) 145 161 } 162 + defer blobResp.Body.Close() 146 163 147 - repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, didSlashRepo) 164 + blob := io.ReadCloser(blobResp.Body) 165 + latestSubmission, err := models.PullSubmissionFromRecord(did, rkey, roundNumber, round, &blob) 148 166 if err != nil { 149 - return fmt.Errorf("failed to construct absolute repo path: %w", err) 167 + return nil, fmt.Errorf("failed to parse submission: %w", err) 150 168 } 151 169 152 - gr, err := git.Open(repoPath, record.Source.Sha) 170 + return latestSubmission, nil 171 + } 172 + 173 + func (h *Knot) discoverWorkflows(ctx context.Context, repoPath, sha string) (workflow.RawPipeline, error) { 174 + gr, err := git.Open(repoPath, sha) 153 175 if err != nil { 154 - return fmt.Errorf("failed to open git repository: %w", err) 176 + return nil, fmt.Errorf("failed to open git repository: %w", err) 155 177 } 156 178 157 179 workflowDir, err := gr.FileTree(ctx, workflow.WorkflowDir) 158 180 if err != nil { 159 - return fmt.Errorf("failed to open workflow directory: %w", err) 181 + return nil, fmt.Errorf("failed to open workflow directory: %w", err) 160 182 } 161 183 162 184 var pipeline workflow.RawPipeline ··· 177 199 }) 178 200 } 179 201 202 + return pipeline, nil 203 + } 204 + 205 + func (h *Knot) compilePipeline(ctx context.Context, repoOwner *identity.Identity, repo *tangled.Repo, sourceBranch, sourceSha, targetBranch string, rawPipeline workflow.RawPipeline) tangled.Pipeline { 206 + l := log.FromContext(ctx) 207 + 180 208 trigger := tangled.Pipeline_PullRequestTriggerData{ 181 209 Action: "create", 182 - SourceBranch: record.Source.Branch, 183 - SourceSha: record.Source.Sha, 184 - TargetBranch: record.Target.Branch, 210 + SourceBranch: sourceBranch, 211 + SourceSha: sourceSha, 212 + TargetBranch: targetBranch, 185 213 } 186 214 187 215 compiler := workflow.Compiler{ ··· 189 217 Kind: string(workflow.TriggerKindPullRequest), 190 218 PullRequest: &trigger, 191 219 Repo: &tangled.Pipeline_TriggerRepo{ 192 - Did: ident.DID.String(), 220 + Did: repoOwner.DID.String(), 193 221 Knot: repo.Knot, 194 222 Repo: repo.Name, 195 223 }, 196 224 }, 197 225 } 198 226 199 - cp := compiler.Compile(compiler.Parse(pipeline)) 200 - eventJson, err := json.Marshal(cp) 227 + l.Info("raw", "raw", rawPipeline) 228 + parsed := compiler.Parse(rawPipeline) 229 + l.Info("parsed", "parsed", parsed) 230 + compiled := compiler.Compile(parsed) 231 + 232 + l.Info("compiler diagnostics", "diagnostics", compiler.Diagnostics) 233 + 234 + return compiled 235 + } 236 + 237 + func (h *Knot) processPull(ctx context.Context, event *jmodels.Event) error { 238 + raw := json.RawMessage(event.Commit.Record) 239 + rkey := event.Commit.RKey 240 + did := event.Did 241 + 242 + var record tangled.RepoPull 243 + if err := json.Unmarshal(raw, &record); err != nil { 244 + return fmt.Errorf("failed to unmarshal record: %w", err) 245 + } 246 + 247 + l := log.FromContext(ctx) 248 + l = l.With("handler", "processPull") 249 + l = l.With("did", did) 250 + 251 + l.Info("validating pull record") 252 + if err := h.validatePullRecord(&record); err != nil { 253 + return err 254 + } 255 + 256 + l = l.With("target_repo", record.Target.Repo) 257 + l = l.With("target_branch", record.Target.Branch) 258 + 259 + l.Info("resolving target repo") 260 + repoOwnerIdent, repo, err := h.resolveTargetRepo(ctx, record.Target.Repo) 201 261 if err != nil { 202 - return fmt.Errorf("failed to marshal pipeline event: %w", err) 262 + return err 203 263 } 204 264 265 + if repo.Knot != h.c.Server.Hostname { 266 + return fmt.Errorf("rejected pull record: not this knot, %s != %s", repo.Knot, h.c.Server.Hostname) 267 + } 268 + 269 + l.Info("fetching latest submission") 270 + latestSubmission, err := h.fetchLatestSubmission(ctx, did, rkey, &record) 271 + if err != nil { 272 + return err 273 + } 274 + 275 + sha := latestSubmission.SourceRev 276 + if sha == "" { 277 + return fmt.Errorf("failed to extract source SHA from pull submission") 278 + } 279 + l = l.With("sha", sha) 280 + 281 + l.Info("constructing repo path") 282 + didSlashRepo, err := securejoin.SecureJoin(repoOwnerIdent.DID.String(), repo.Name) 283 + if err != nil { 284 + return fmt.Errorf("failed to construct relative repo path: %w", err) 285 + } 286 + 287 + repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, didSlashRepo) 288 + if err != nil { 289 + return fmt.Errorf("failed to construct absolute repo path: %w", err) 290 + } 291 + 292 + l.Info("discovering workflows", "repo_path", repoPath) 293 + pipeline, err := h.discoverWorkflows(ctx, repoPath, sha) 294 + if err != nil { 295 + return err 296 + } 297 + 298 + l.Info("compiling pipeline", "workflow_count", len(pipeline)) 299 + cp := h.compilePipeline(ctx, repoOwnerIdent, repo, record.Source.Branch, sha, record.Target.Branch, pipeline) 300 + 205 301 // do not run empty pipelines 206 302 if cp.Workflows == nil { 303 + l.Info("skipping empty pipeline") 207 304 return nil 208 305 } 209 306 307 + l.Info("marshaling pipeline event") 308 + eventJson, err := json.Marshal(cp) 309 + if err != nil { 310 + return fmt.Errorf("failed to marshal pipeline event: %w", err) 311 + } 312 + 210 313 ev := db.Event{ 211 314 Rkey: TID(), 212 315 Nsid: tangled.PipelineNSID, 213 316 EventJson: string(eventJson), 214 317 } 215 318 319 + l.Info("inserting pipeline event") 216 320 return h.db.InsertEvent(ev, h.n) 217 321 } 218 322 219 323 // duplicated from add collaborator 220 - func (h *Knot) processCollaborator(ctx context.Context, event *models.Event) error { 324 + func (h *Knot) processCollaborator(ctx context.Context, event *jmodels.Event) error { 221 325 raw := json.RawMessage(event.Commit.Record) 222 326 did := event.Did 223 327 ··· 319 423 return nil 320 424 } 321 425 322 - func (h *Knot) processMessages(ctx context.Context, event *models.Event) error { 323 - if event.Kind != models.EventKindCommit { 426 + func (h *Knot) processMessages(ctx context.Context, event *jmodels.Event) error { 427 + if event.Kind != jmodels.EventKindCommit { 324 428 return nil 325 429 } 326 430
+35 -22
lexicons/pulls/pull.json
··· 12 12 "required": [ 13 13 "target", 14 14 "title", 15 - "patchBlob", 16 15 "createdAt" 17 16 ], 18 17 "properties": { 19 - "target": { 20 - "type": "ref", 21 - "ref": "#target" 22 - }, 23 18 "title": { 24 19 "type": "string" 25 20 }, 26 21 "body": { 27 22 "type": "string" 28 23 }, 29 - "patch": { 30 - "type": "string", 31 - "description": "(deprecated) use patchBlob instead" 32 - }, 33 - "patchBlob": { 34 - "type": "blob", 35 - "accept": [ 36 - "text/x-patch" 37 - ], 38 - "description": "patch content" 24 + "rounds": { 25 + "type": "array", 26 + "items": { 27 + "type": "ref", 28 + "ref": "#round" 29 + } 39 30 }, 40 31 "source": { 41 32 "type": "ref", 42 33 "ref": "#source" 34 + }, 35 + "target": { 36 + "type": "ref", 37 + "ref": "#target" 43 38 }, 44 39 "createdAt": { 45 40 "type": "string", ··· 58 53 "type": "string", 59 54 "format": "at-uri" 60 55 } 56 + }, 57 + "dependentOn": { 58 + "type": "string", 59 + "format": "at-uri" 61 60 } 62 61 } 63 62 } ··· 81 80 "source": { 82 81 "type": "object", 83 82 "required": [ 84 - "branch", 85 - "sha" 83 + "branch" 86 84 ], 87 85 "properties": { 88 86 "branch": { 89 87 "type": "string" 90 88 }, 91 - "sha": { 92 - "type": "string", 93 - "minLength": 40, 94 - "maxLength": 40 95 - }, 96 89 "repo": { 97 90 "type": "string", 98 91 "format": "at-uri" 92 + } 93 + } 94 + }, 95 + "round": { 96 + "type": "object", 97 + "required": [ 98 + "patchBlob", 99 + "createdAt" 100 + ], 101 + "description": "revisions of this pull request, newer rounds are appended to this array. appviews may reject records do not treat this field as append-only. the blob format is gzipped text-based git-format-patches.", 102 + "properties": { 103 + "createdAt": { 104 + "type": "string", 105 + "format": "datetime" 106 + }, 107 + "patchBlob": { 108 + "type": "blob", 109 + "accept": [ 110 + "application/gzip" 111 + ] 99 112 } 100 113 } 101 114 }
+7 -8
patchutil/patchutil.go
··· 17 17 func ExtractPatches(formatPatch string) ([]types.FormatPatch, error) { 18 18 patches := splitFormatPatch(formatPatch) 19 19 20 - result := []types.FormatPatch{} 21 - 22 - for _, patch := range patches { 20 + result := make([]types.FormatPatch, len(patches)) 21 + for i, patch := range patches { 23 22 files, headerStr, err := gitdiff.Parse(strings.NewReader(patch)) 24 23 if err != nil { 25 24 return nil, fmt.Errorf("failed to parse patch: %w", err) ··· 30 29 return nil, fmt.Errorf("failed to parse patch header: %w", err) 31 30 } 32 31 33 - result = append(result, types.FormatPatch{ 32 + result[i] = types.FormatPatch{ 34 33 Files: files, 35 34 PatchHeader: header, 36 35 Raw: patch, 37 - }) 36 + } 38 37 } 39 38 40 39 return result, nil ··· 114 113 return headerCount >= 2 115 114 } 116 115 117 - func splitFormatPatch(patchText string) []string { 118 - re := regexp.MustCompile(`(?m)^From [0-9a-f]{40} .*$`) 116 + var formatPatchRegex = regexp.MustCompile(`(?m)^From [0-9a-f]{40} .*$`) 119 117 120 - indexes := re.FindAllStringIndex(patchText, -1) 118 + func splitFormatPatch(patchText string) []string { 119 + indexes := formatPatchRegex.FindAllStringIndex(patchText, -1) 121 120 122 121 if len(indexes) == 0 { 123 122 return []string{}