From da5b9f0a89472e0875dddfe2cb711aa284b1c403 Mon Sep 17 00:00:00 2001 From: brian Date: Thu, 21 Sep 2023 15:00:48 -0300 Subject: [PATCH 01/18] Added protocol v2 --- plumbing/protocol/v2/advrefs.go | 212 +++++ plumbing/protocol/v2/advrefs_decode.go | 289 +++++++ plumbing/protocol/v2/advrefs_decode_test.go | 493 ++++++++++++ plumbing/protocol/v2/advrefs_encode.go | 164 ++++ plumbing/protocol/v2/advrefs_encode_test.go | 233 ++++++ plumbing/protocol/v2/advrefs_test.go | 380 +++++++++ plumbing/protocol/v2/capability/capability.go | 385 ++++++++++ .../protocol/v2/capability/capability_test.go | 22 + plumbing/protocol/v2/capability/list.go | 195 +++++ plumbing/protocol/v2/capability/list_test.go | 217 ++++++ plumbing/protocol/v2/common.go | 69 ++ plumbing/protocol/v2/common_test.go | 33 + plumbing/protocol/v2/doc.go | 724 ++++++++++++++++++ plumbing/protocol/v2/report_status.go | 165 ++++ plumbing/protocol/v2/report_status_test.go | 256 +++++++ plumbing/protocol/v2/shallowupd.go | 92 +++ plumbing/protocol/v2/shallowupd_test.go | 150 ++++ plumbing/protocol/v2/sideband/common.go | 33 + plumbing/protocol/v2/sideband/demux.go | 148 ++++ plumbing/protocol/v2/sideband/demux_test.go | 154 ++++ plumbing/protocol/v2/sideband/doc.go | 31 + plumbing/protocol/v2/sideband/muxer.go | 65 ++ plumbing/protocol/v2/sideband/muxer_test.go | 39 + plumbing/protocol/v2/srvresp.go | 139 ++++ plumbing/protocol/v2/srvresp_test.go | 92 +++ plumbing/protocol/v2/ulreq.go | 168 ++++ plumbing/protocol/v2/ulreq_decode.go | 257 +++++++ plumbing/protocol/v2/ulreq_decode_test.go | 533 +++++++++++++ plumbing/protocol/v2/ulreq_encode.go | 145 ++++ plumbing/protocol/v2/ulreq_encode_test.go | 315 ++++++++ plumbing/protocol/v2/ulreq_test.go | 109 +++ plumbing/protocol/v2/updreq.go | 128 ++++ plumbing/protocol/v2/updreq_decode.go | 249 ++++++ plumbing/protocol/v2/updreq_decode_test.go | 307 ++++++++ plumbing/protocol/v2/updreq_encode.go | 89 +++ plumbing/protocol/v2/updreq_encode_test.go | 190 +++++ plumbing/protocol/v2/updreq_test.go | 39 + plumbing/protocol/v2/uppackreq.go | 98 +++ plumbing/protocol/v2/uppackreq_test.go | 76 ++ plumbing/protocol/v2/uppackresp.go | 108 +++ plumbing/protocol/v2/uppackresp_test.go | 130 ++++ 41 files changed, 7721 insertions(+) create mode 100644 plumbing/protocol/v2/advrefs.go create mode 100644 plumbing/protocol/v2/advrefs_decode.go create mode 100644 plumbing/protocol/v2/advrefs_decode_test.go create mode 100644 plumbing/protocol/v2/advrefs_encode.go create mode 100644 plumbing/protocol/v2/advrefs_encode_test.go create mode 100644 plumbing/protocol/v2/advrefs_test.go create mode 100644 plumbing/protocol/v2/capability/capability.go create mode 100644 plumbing/protocol/v2/capability/capability_test.go create mode 100644 plumbing/protocol/v2/capability/list.go create mode 100644 plumbing/protocol/v2/capability/list_test.go create mode 100644 plumbing/protocol/v2/common.go create mode 100644 plumbing/protocol/v2/common_test.go create mode 100644 plumbing/protocol/v2/doc.go create mode 100644 plumbing/protocol/v2/report_status.go create mode 100644 plumbing/protocol/v2/report_status_test.go create mode 100644 plumbing/protocol/v2/shallowupd.go create mode 100644 plumbing/protocol/v2/shallowupd_test.go create mode 100644 plumbing/protocol/v2/sideband/common.go create mode 100644 plumbing/protocol/v2/sideband/demux.go create mode 100644 plumbing/protocol/v2/sideband/demux_test.go create mode 100644 plumbing/protocol/v2/sideband/doc.go create mode 100644 plumbing/protocol/v2/sideband/muxer.go create mode 100644 plumbing/protocol/v2/sideband/muxer_test.go create mode 100644 plumbing/protocol/v2/srvresp.go create mode 100644 plumbing/protocol/v2/srvresp_test.go create mode 100644 plumbing/protocol/v2/ulreq.go create mode 100644 plumbing/protocol/v2/ulreq_decode.go create mode 100644 plumbing/protocol/v2/ulreq_decode_test.go create mode 100644 plumbing/protocol/v2/ulreq_encode.go create mode 100644 plumbing/protocol/v2/ulreq_encode_test.go create mode 100644 plumbing/protocol/v2/ulreq_test.go create mode 100644 plumbing/protocol/v2/updreq.go create mode 100644 plumbing/protocol/v2/updreq_decode.go create mode 100644 plumbing/protocol/v2/updreq_decode_test.go create mode 100644 plumbing/protocol/v2/updreq_encode.go create mode 100644 plumbing/protocol/v2/updreq_encode_test.go create mode 100644 plumbing/protocol/v2/updreq_test.go create mode 100644 plumbing/protocol/v2/uppackreq.go create mode 100644 plumbing/protocol/v2/uppackreq_test.go create mode 100644 plumbing/protocol/v2/uppackresp.go create mode 100644 plumbing/protocol/v2/uppackresp_test.go diff --git a/plumbing/protocol/v2/advrefs.go b/plumbing/protocol/v2/advrefs.go new file mode 100644 index 000000000..9f436f8b7 --- /dev/null +++ b/plumbing/protocol/v2/advrefs.go @@ -0,0 +1,212 @@ +package packp + +import ( + "fmt" + "sort" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/v2/capability" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage/memory" +) + +// AdvRefs values represent the information transmitted on an +// advertised-refs message. Values from this type are not zero-value +// safe, use the New function instead. +type AdvRefs struct { + // Prefix stores prefix payloads. + // + // When using this message over (smart) HTTP, you have to add a pktline + // before the whole thing with the following payload: + // + // '# service=$servicename" LF + // + // Moreover, some (all) git HTTP smart servers will send a flush-pkt + // just after the first pkt-line. + // + // To accommodate both situations, the Prefix field allow you to store + // any data you want to send before the actual pktlines. It will also + // be filled up with whatever is found on the line. + Prefix [][]byte + // Head stores the resolved HEAD reference if present. + // This can be present with git-upload-pack, not with git-receive-pack. + Head *plumbing.Hash + // Capabilities are the capabilities. + Capabilities *capability.List + // References are the hash references. + References map[string]plumbing.Hash + // Peeled are the peeled hash references. + Peeled map[string]plumbing.Hash + // Shallows are the shallow object ids. + Shallows []plumbing.Hash +} + +// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. +func NewAdvRefs() *AdvRefs { + return &AdvRefs{ + Prefix: [][]byte{}, + Capabilities: capability.NewList(), + References: make(map[string]plumbing.Hash), + Peeled: make(map[string]plumbing.Hash), + Shallows: []plumbing.Hash{}, + } +} + +func (a *AdvRefs) AddReference(r *plumbing.Reference) error { + switch r.Type() { + case plumbing.SymbolicReference: + v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) + return a.Capabilities.Add(capability.SymRef, v) + case plumbing.HashReference: + a.References[r.Name().String()] = r.Hash() + default: + return plumbing.ErrInvalidType + } + + return nil +} + +func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { + s := memory.ReferenceStorage{} + if err := a.addRefs(s); err != nil { + return s, plumbing.NewUnexpectedError(err) + } + + return s, nil +} + +func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { + for name, hash := range a.References { + ref := plumbing.NewReferenceFromStrings(name, hash.String()) + if err := s.SetReference(ref); err != nil { + return err + } + } + + if a.supportSymrefs() { + return a.addSymbolicRefs(s) + } + + return a.resolveHead(s) +} + +// If the server does not support symrefs capability, +// we need to guess the reference where HEAD is pointing to. +// +// Git versions prior to 1.8.4.3 had an special procedure to get +// the reference where is pointing to HEAD: +// - Check if a reference called master exists. If exists and it +// has the same hash as HEAD hash, we can say that HEAD is pointing to master +// - If master does not exists or does not have the same hash as HEAD, +// order references and check in that order if that reference has the same +// hash than HEAD. If yes, set HEAD pointing to that branch hash +// - If no reference is found, throw an error +// TODO: Do not use Master, use the default branch +func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { + if a.Head == nil { + return nil + } + + ref, err := s.Reference(plumbing.Master) + + // check first if HEAD is pointing to master + if err == nil { + ok, err := a.createHeadIfCorrectReference(ref, s) + if err != nil { + return err + } + + if ok { + return nil + } + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return err + } + + // From here we are trying to guess the branch that HEAD is pointing + refIter, err := s.IterReferences() + if err != nil { + return err + } + + var refNames []string + err = refIter.ForEach(func(r *plumbing.Reference) error { + refNames = append(refNames, string(r.Name())) + return nil + }) + if err != nil { + return err + } + + sort.Strings(refNames) + + var headSet bool + for _, refName := range refNames { + ref, err := s.Reference(plumbing.ReferenceName(refName)) + if err != nil { + return err + } + ok, err := a.createHeadIfCorrectReference(ref, s) + if err != nil { + return err + } + if ok { + headSet = true + break + } + } + + if !headSet { + return plumbing.ErrReferenceNotFound + } + + return nil +} + +func (a *AdvRefs) createHeadIfCorrectReference( + reference *plumbing.Reference, + s storer.ReferenceStorer) (bool, error) { + if reference.Hash() == *a.Head { + headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) + if err := s.SetReference(headRef); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { + for _, symref := range a.Capabilities.Get(capability.SymRef) { + chunks := strings.Split(symref, ":") + if len(chunks) != 2 { + err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) + return plumbing.NewUnexpectedError(err) + } + name := plumbing.ReferenceName(chunks[0]) + target := plumbing.ReferenceName(chunks[1]) + ref := plumbing.NewSymbolicReference(name, target) + if err := s.SetReference(ref); err != nil { + return nil + } + } + + return nil +} + +func (a *AdvRefs) supportSymrefs() bool { + return a.Capabilities.Supports(capability.SymRef) +} + +// IsEmpty returns true if doesn't contain any reference. +func (a *AdvRefs) IsEmpty() bool { + return a.Head == nil && + len(a.References) == 0 && + len(a.Peeled) == 0 && + len(a.Shallows) == 0 +} diff --git a/plumbing/protocol/v2/advrefs_decode.go b/plumbing/protocol/v2/advrefs_decode.go new file mode 100644 index 000000000..f8d26a28e --- /dev/null +++ b/plumbing/protocol/v2/advrefs_decode.go @@ -0,0 +1,289 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Decode reads the next advertised-refs message form its input and +// stores it in the AdvRefs. +func (a *AdvRefs) Decode(r io.Reader) error { + d := newAdvRefsDecoder(r) + return d.Decode(a) +} + +type advRefsDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + hash plumbing.Hash // last hash read + err error // sticky error, use the parser.error() method to fill this out + data *AdvRefs // parsed data is stored here +} + +var ( + // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised + // references message. + ErrEmptyAdvRefs = errors.New("empty advertised-ref message") + // ErrEmptyInput is returned by Decode if the input is empty. + ErrEmptyInput = errors.New("empty input") +) + +func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { + return &advRefsDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *advRefsDecoder) Decode(v *AdvRefs) error { + d.data = v + + for state := decodePrefix; state != nil; { + state = state(d) + } + + return d.err +} + +type decoderStateFn func(*advRefsDecoder) decoderStateFn + +// fills out the parser sticky error +func (d *advRefsDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *advRefsDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + if d.nLine == 1 { + d.err = ErrEmptyInput + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// The HTTP smart prefix is often followed by a flush-pkt. +func decodePrefix(d *advRefsDecoder) decoderStateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !isPrefix(d.line) { + return decodeFirstHash + } + + tmp := make([]byte, len(d.line)) + copy(tmp, d.line) + d.data.Prefix = append(d.data.Prefix, tmp) + if ok := d.nextLine(); !ok { + return nil + } + + if !isFlush(d.line) { + return decodeFirstHash + } + + d.data.Prefix = append(d.data.Prefix, pktline.Flush) + if ok := d.nextLine(); !ok { + return nil + } + + return decodeFirstHash +} + +func isPrefix(payload []byte) bool { + return len(payload) > 0 && payload[0] == '#' +} + +// If the first hash is zero, then a no-refs is coming. Otherwise, a +// list-of-refs is coming, and the hash will be followed by the first +// advertised ref. +func decodeFirstHash(p *advRefsDecoder) decoderStateFn { + // If the repository is empty, we receive a flush here (HTTP). + if isFlush(p.line) { + p.err = ErrEmptyAdvRefs + return nil + } + + // TODO: Use object-format (when available) for hash size. Git 2.41+ + if len(p.line) < hashSize { + p.error("cannot read hash, pkt-line too short") + return nil + } + + if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.line = p.line[hashSize:] + + if p.hash.IsZero() { + return decodeSkipNoRefs + } + + return decodeFirstRef +} + +// Skips SP "capabilities^{}" NUL +func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { + if len(p.line) < len(noHeadMark) { + p.error("too short zero-id ref") + return nil + } + + if !bytes.HasPrefix(p.line, noHeadMark) { + p.error("malformed zero-id ref") + return nil + } + + p.line = p.line[len(noHeadMark):] + + return decodeCaps +} + +// decode the refname, expects SP refname NULL +func decodeFirstRef(l *advRefsDecoder) decoderStateFn { + if len(l.line) < 3 { + l.error("line too short after hash") + return nil + } + + if !bytes.HasPrefix(l.line, sp) { + l.error("no space after hash") + return nil + } + l.line = l.line[1:] + + chunks := bytes.SplitN(l.line, null, 2) + if len(chunks) < 2 { + l.error("NULL not found") + return nil + } + ref := chunks[0] + l.line = chunks[1] + + if bytes.Equal(ref, []byte(head)) { + l.data.Head = &l.hash + } else { + l.data.References[string(ref)] = l.hash + } + + return decodeCaps +} + +func decodeCaps(p *advRefsDecoder) decoderStateFn { + if err := p.data.Capabilities.Decode(p.line); err != nil { + p.error("invalid capabilities: %s", err) + return nil + } + + return decodeOtherRefs +} + +// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). +// If there are no refs, then there might be a shallow or flush-ptk. +func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { + if ok := p.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(p.line, shallow) { + return decodeShallow + } + + if len(p.line) == 0 { + return nil + } + + saveTo := p.data.References + if bytes.HasSuffix(p.line, peeled) { + p.line = bytes.TrimSuffix(p.line, peeled) + saveTo = p.data.Peeled + } + + ref, hash, err := readRef(p.line) + if err != nil { + p.error("%s", err) + return nil + } + saveTo[ref] = hash + + return decodeOtherRefs +} + +// Reads a ref-name +func readRef(data []byte) (string, plumbing.Hash, error) { + chunks := bytes.Split(data, sp) + switch { + case len(chunks) == 1: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") + case len(chunks) > 2: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") + default: + return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil + } +} + +// Keeps reading shallows until a flush-pkt is found +func decodeShallow(p *advRefsDecoder) decoderStateFn { + if !bytes.HasPrefix(p.line, shallow) { + p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) + return nil + } + p.line = bytes.TrimPrefix(p.line, shallow) + + if len(p.line) != hashSize { + p.error(fmt.Sprintf( + "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", + len(p.line))) + return nil + } + + text := p.line[:hashSize] + var h plumbing.Hash + if _, err := hex.Decode(h[:], text); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.data.Shallows = append(p.data.Shallows, h) + + if ok := p.nextLine(); !ok { + return nil + } + + if len(p.line) == 0 { + return nil // successful parse of the advertised-refs message + } + + return decodeShallow +} diff --git a/plumbing/protocol/v2/advrefs_decode_test.go b/plumbing/protocol/v2/advrefs_decode_test.go new file mode 100644 index 000000000..d1271450e --- /dev/null +++ b/plumbing/protocol/v2/advrefs_decode_test.go @@ -0,0 +1,493 @@ +package packp + +import ( + "bytes" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + + . "gopkg.in/check.v1" +) + +type AdvRefsDecodeSuite struct{} + +var _ = Suite(&AdvRefsDecodeSuite{}) + +func (s *AdvRefsDecodeSuite) TestEmpty(c *C) { + var buf bytes.Buffer + ar := NewAdvRefs() + c.Assert(ar.Decode(&buf), Equals, ErrEmptyInput) +} + +func (s *AdvRefsDecodeSuite) TestEmptyFlush(c *C) { + var buf bytes.Buffer + e := pktline.NewEncoder(&buf) + e.Flush() + ar := NewAdvRefs() + c.Assert(ar.Decode(&buf), Equals, ErrEmptyAdvRefs) +} + +func (s *AdvRefsDecodeSuite) TestEmptyPrefixFlush(c *C) { + var buf bytes.Buffer + e := pktline.NewEncoder(&buf) + e.EncodeString("# service=git-upload-pack") + e.Flush() + e.Flush() + ar := NewAdvRefs() + c.Assert(ar.Decode(&buf), Equals, ErrEmptyAdvRefs) +} + +func (s *AdvRefsDecodeSuite) TestShortForHash(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*too short.*") +} + +func (s *AdvRefsDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, pattern string) { + ar := NewAdvRefs() + c.Assert(ar.Decode(input), ErrorMatches, pattern) +} + +func (s *AdvRefsDecodeSuite) TestInvalidFirstHash(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796alberto2219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*invalid hash.*") +} + +func (s *AdvRefsDecodeSuite) TestZeroId(c *C) { + payloads := []string{ + "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack thin-pack\n", + pktline.FlushString, + } + ar := s.testDecodeOK(c, payloads) + c.Assert(ar.Head, IsNil) +} + +func (s *AdvRefsDecodeSuite) testDecodeOK(c *C, payloads []string) *AdvRefs { + var buf bytes.Buffer + e := pktline.NewEncoder(&buf) + err := e.EncodeString(payloads...) + c.Assert(err, IsNil) + + ar := NewAdvRefs() + c.Assert(ar.Decode(&buf), IsNil) + + return ar +} + +func (s *AdvRefsDecodeSuite) TestMalformedZeroId(c *C) { + payloads := []string{ + "0000000000000000000000000000000000000000 wrong\x00multi_ack thin-pack\n", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*malformed zero-id.*") +} + +func (s *AdvRefsDecodeSuite) TestShortZeroId(c *C) { + payloads := []string{ + "0000000000000000000000000000000000000000 capabi", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*too short zero-id.*") +} + +func (s *AdvRefsDecodeSuite) TestHead(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", + pktline.FlushString, + } + ar := s.testDecodeOK(c, payloads) + c.Assert(*ar.Head, Equals, + plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) +} + +func (s *AdvRefsDecodeSuite) TestFirstIsNotHead(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\x00", + pktline.FlushString, + } + ar := s.testDecodeOK(c, payloads) + c.Assert(ar.Head, IsNil) + c.Assert(ar.References["refs/heads/master"], Equals, + plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")) +} + +func (s *AdvRefsDecodeSuite) TestShortRef(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 H", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*too short.*") +} + +func (s *AdvRefsDecodeSuite) TestNoNULL(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEADofs-delta multi_ack", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*NULL not found.*") +} + +func (s *AdvRefsDecodeSuite) TestNoSpaceAfterHash(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5-HEAD\x00", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*no space after hash.*") +} + +func (s *AdvRefsDecodeSuite) TestNoCaps(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", + pktline.FlushString, + } + ar := s.testDecodeOK(c, payloads) + c.Assert(ar.Capabilities.IsEmpty(), Equals, true) +} + +func (s *AdvRefsDecodeSuite) TestCaps(c *C) { + type entry struct { + Name capability.Capability + Values []string + } + + for _, test := range [...]struct { + input []string + capabilities []entry + }{{ + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", + pktline.FlushString, + }, + capabilities: []entry{}, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n", + pktline.FlushString, + }, + capabilities: []entry{}, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta", + pktline.FlushString, + }, + capabilities: []entry{ + { + Name: capability.OFSDelta, + Values: []string(nil), + }, + }, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack", + pktline.FlushString, + }, + capabilities: []entry{ + {Name: capability.OFSDelta, Values: []string(nil)}, + {Name: capability.MultiACK, Values: []string(nil)}, + }, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack\n", + pktline.FlushString, + }, + capabilities: []entry{ + {Name: capability.OFSDelta, Values: []string(nil)}, + {Name: capability.MultiACK, Values: []string(nil)}, + }, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar\n", + pktline.FlushString, + }, + capabilities: []entry{ + {Name: capability.SymRef, Values: []string{"HEAD:refs/heads/master"}}, + {Name: capability.Agent, Values: []string{"foo=bar"}}, + }, + }, { + input: []string{ + "0000000000000000000000000000000000000000 capabilities^{}\x00report-status report-status-v2 delete-refs side-band-64k quiet atomic ofs-delta object-format=sha1 agent=git/2.41.0\n", + pktline.FlushString, + }, + capabilities: []entry{ + {Name: capability.ReportStatus, Values: []string(nil)}, + {Name: capability.ObjectFormat, Values: []string{"sha1"}}, + {Name: capability.Agent, Values: []string{"git/2.41.0"}}, + }, + }} { + ar := s.testDecodeOK(c, test.input) + for _, fixCap := range test.capabilities { + c.Assert(ar.Capabilities.Supports(fixCap.Name), Equals, true, + Commentf("input = %q, capability = %q", test.input, fixCap.Name)) + c.Assert(ar.Capabilities.Get(fixCap.Name), DeepEquals, fixCap.Values, + Commentf("input = %q, capability = %q", test.input, fixCap.Name)) + } + } +} + +func (s *AdvRefsDecodeSuite) TestWithPrefix(c *C) { + payloads := []string{ + "# this is a prefix\n", + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta\n", + pktline.FlushString, + } + ar := s.testDecodeOK(c, payloads) + c.Assert(len(ar.Prefix), Equals, 1) + c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix")) +} + +func (s *AdvRefsDecodeSuite) TestWithPrefixAndFlush(c *C) { + payloads := []string{ + "# this is a prefix\n", + pktline.FlushString, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta\n", + pktline.FlushString, + } + ar := s.testDecodeOK(c, payloads) + c.Assert(len(ar.Prefix), Equals, 2) + c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix")) + c.Assert(ar.Prefix[1], DeepEquals, []byte(pktline.FlushString)) +} + +func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { + for _, test := range [...]struct { + input []string + references map[string]plumbing.Hash + peeled map[string]plumbing.Hash + }{{ + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + pktline.FlushString, + }, + references: make(map[string]plumbing.Hash), + peeled: make(map[string]plumbing.Hash), + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "1111111111111111111111111111111111111111 ref/foo", + pktline.FlushString, + }, + references: map[string]plumbing.Hash{ + "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), + }, + peeled: make(map[string]plumbing.Hash), + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "1111111111111111111111111111111111111111 ref/foo\n", + pktline.FlushString, + }, + references: map[string]plumbing.Hash{ + "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), + }, + peeled: make(map[string]plumbing.Hash), + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "1111111111111111111111111111111111111111 ref/foo\n", + "2222222222222222222222222222222222222222 ref/bar", + pktline.FlushString, + }, + references: map[string]plumbing.Hash{ + "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), + "ref/bar": plumbing.NewHash("2222222222222222222222222222222222222222"), + }, + peeled: make(map[string]plumbing.Hash), + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "1111111111111111111111111111111111111111 ref/foo^{}\n", + pktline.FlushString, + }, + references: make(map[string]plumbing.Hash), + peeled: map[string]plumbing.Hash{ + "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), + }, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "1111111111111111111111111111111111111111 ref/foo\n", + "2222222222222222222222222222222222222222 ref/bar^{}", + pktline.FlushString, + }, + references: map[string]plumbing.Hash{ + "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), + }, + peeled: map[string]plumbing.Hash{ + "ref/bar": plumbing.NewHash("2222222222222222222222222222222222222222"), + }, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "51b8b4fb32271d39fbdd760397406177b2b0fd36 refs/pull/10/head\n", + "02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca refs/pull/100/head\n", + "c284c212704c43659bf5913656b8b28e32da1621 refs/pull/100/merge\n", + "3d6537dce68c8b7874333a1720958bd8db3ae8ca refs/pull/101/merge\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11^{}\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + pktline.FlushString, + }, + references: map[string]plumbing.Hash{ + "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), + "refs/pull/10/head": plumbing.NewHash("51b8b4fb32271d39fbdd760397406177b2b0fd36"), + "refs/pull/100/head": plumbing.NewHash("02b5a6031ba7a8cbfde5d65ff9e13ecdbc4a92ca"), + "refs/pull/100/merge": plumbing.NewHash("c284c212704c43659bf5913656b8b28e32da1621"), + "refs/pull/101/merge": plumbing.NewHash("3d6537dce68c8b7874333a1720958bd8db3ae8ca"), + "refs/tags/v2.6.11": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), + "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), + }, + peeled: map[string]plumbing.Hash{ + "refs/tags/v2.6.11": plumbing.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"), + "refs/tags/v2.6.11-tree": plumbing.NewHash("c39ae07f393806ccf406ef966e9a15afc43cc36a"), + }, + }} { + ar := s.testDecodeOK(c, test.input) + comment := Commentf("input = %v\n", test.input) + c.Assert(ar.References, DeepEquals, test.references, comment) + c.Assert(ar.Peeled, DeepEquals, test.peeled, comment) + } +} + +func (s *AdvRefsDecodeSuite) TestMalformedOtherRefsNoSpace(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8crefs/tags/v2.6.11\n", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*malformed ref data.*") +} + +func (s *AdvRefsDecodeSuite) TestMalformedOtherRefsMultipleSpaces(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags v2.6.11\n", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*malformed ref data.*") +} + +func (s *AdvRefsDecodeSuite) TestShallow(c *C) { + for _, test := range [...]struct { + input []string + shallows []plumbing.Hash + }{{ + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + pktline.FlushString, + }, + shallows: []plumbing.Hash{}, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + pktline.FlushString, + }, + shallows: []plumbing.Hash{plumbing.NewHash("1111111111111111111111111111111111111111")}, + }, { + input: []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + }, + shallows: []plumbing.Hash{ + plumbing.NewHash("1111111111111111111111111111111111111111"), + plumbing.NewHash("2222222222222222222222222222222222222222"), + }, + }} { + ar := s.testDecodeOK(c, test.input) + comment := Commentf("input = %v\n", test.input) + c.Assert(ar.Shallows, DeepEquals, test.shallows, comment) + } +} + +func (s *AdvRefsDecodeSuite) TestInvalidShallowHash(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "shallow 11111111alcortes111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*invalid hash text.*") +} + +func (s *AdvRefsDecodeSuite) TestGarbageAfterShallow(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + "b5be40b90dbaa6bd337f3b77de361bfc0723468b refs/tags/v4.4", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*malformed shallow prefix.*") +} + +func (s *AdvRefsDecodeSuite) TestMalformedShallowHash(c *C) { + payloads := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222 malformed\n", + pktline.FlushString, + } + r := toPktLines(c, payloads) + s.testDecoderErrorMatches(c, r, ".*malformed shallow hash.*") +} + +func (s *AdvRefsDecodeSuite) TestEOFRefs(c *C) { + input := strings.NewReader("" + + "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" + + "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" + + "00355dc01c595e6c6ec9ccda4f6ffbf614e4d92bb0c7 refs/foo\n", + ) + s.testDecoderErrorMatches(c, input, ".*invalid pkt-len.*") +} + +func (s *AdvRefsDecodeSuite) TestEOFShallows(c *C) { + input := strings.NewReader("" + + "005b6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n" + + "003fa6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n" + + "00445dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n" + + "0047c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n" + + "0035shallow 1111111111111111111111111111111111111111\n" + + "0034shallow 222222222222222222222222") + s.testDecoderErrorMatches(c, input, ".*unexpected EOF.*") +} diff --git a/plumbing/protocol/v2/advrefs_encode.go b/plumbing/protocol/v2/advrefs_encode.go new file mode 100644 index 000000000..c5d18bdfb --- /dev/null +++ b/plumbing/protocol/v2/advrefs_encode.go @@ -0,0 +1,164 @@ +package packp + +import ( + "bytes" + "io" + "sort" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/v2/capability" +) + +// Encode writes the AdvRefs encoding to a writer. +// +// All the payloads will end with a newline character. Capabilities, +// references and shallows are written in alphabetical order, except for +// peeled references that always follow their corresponding references. +func (a *AdvRefs) Encode(w io.Writer) error { + e := newAdvRefsEncoder(w) + return e.Encode(a) +} + +type advRefsEncoder struct { + data *AdvRefs // data to encode + pe *pktline.Encoder // where to write the encoded data + firstRefName string // reference name to encode in the first pkt-line (HEAD if present) + firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) + sortedRefs []string // hash references to encode ordered by increasing order + err error // sticky error + +} + +func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { + return &advRefsEncoder{ + pe: pktline.NewEncoder(w), + } +} + +func (e *advRefsEncoder) Encode(v *AdvRefs) error { + e.data = v + e.sortRefs() + e.setFirstRef() + + for state := encodePrefix; state != nil; { + state = state(e) + } + + return e.err +} + +func (e *advRefsEncoder) sortRefs() { + if len(e.data.References) > 0 { + refs := make([]string, 0, len(e.data.References)) + for refName := range e.data.References { + refs = append(refs, refName) + } + + sort.Strings(refs) + e.sortedRefs = refs + } +} + +func (e *advRefsEncoder) setFirstRef() { + if e.data.Head != nil { + e.firstRefName = head + e.firstRefHash = *e.data.Head + return + } + + if len(e.sortedRefs) > 0 { + refName := e.sortedRefs[0] + e.firstRefName = refName + e.firstRefHash = e.data.References[refName] + } +} + +type encoderStateFn func(*advRefsEncoder) encoderStateFn + +func encodePrefix(e *advRefsEncoder) encoderStateFn { + for _, p := range e.data.Prefix { + if bytes.Equal(p, pktline.Flush) { + if e.err = e.pe.Flush(); e.err != nil { + return nil + } + continue + } + if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { + return nil + } + } + + return encodeCaps +} + +// Adds the first pkt-line payload: head hash, head ref and capabilities. +// If HEAD ref is not found, the first reference ordered in increasing order will be used. +// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". +// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt +// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt +func encodeCaps(e *advRefsEncoder) encoderStateFn { + firstLine := "version 2\n" + capabilities := formatCaps(e.data.Capabilities) + + e.pe.EncodeString(firstLine, capabilities, string(pktline.Flush)) + return nil +} + +func formatCaps(c *capability.List) string { + if c == nil { + return "" + } + + return c.String() +} + +// Adds the (sorted) refs: hash SP refname EOL +// and their peeled refs if any. +func encodeRefs(e *advRefsEncoder) encoderStateFn { + for _, r := range e.sortedRefs { + if r == e.firstRefName { + continue + } + + hash := e.data.References[r] + if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { + return nil + } + + if hash, ok := e.data.Peeled[r]; ok { + if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { + return nil + } + } + } + + return encodeShallow +} + +// Adds the (sorted) shallows: "shallow" SP hash EOL +func encodeShallow(e *advRefsEncoder) encoderStateFn { + sorted := sortShallows(e.data.Shallows) + for _, hash := range sorted { + if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { + return nil + } + } + + return encodeFlush +} + +func sortShallows(c []plumbing.Hash) []string { + ret := []string{} + for _, h := range c { + ret = append(ret, h.String()) + } + sort.Strings(ret) + + return ret +} + +func encodeFlush(e *advRefsEncoder) encoderStateFn { + e.err = e.pe.Flush() + return nil +} diff --git a/plumbing/protocol/v2/advrefs_encode_test.go b/plumbing/protocol/v2/advrefs_encode_test.go new file mode 100644 index 000000000..d28828092 --- /dev/null +++ b/plumbing/protocol/v2/advrefs_encode_test.go @@ -0,0 +1,233 @@ +package packp + +import ( + "bytes" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/v2/capability" + + . "gopkg.in/check.v1" +) + +type AdvRefsEncodeSuite struct{} + +var _ = Suite(&AdvRefsEncodeSuite{}) + +func testEncode(c *C, input *AdvRefs, expected []byte) { + var buf bytes.Buffer + c.Assert(input.Encode(&buf), IsNil) + obtained := buf.Bytes() + + comment := Commentf("\nobtained = %s\nexpected = %s\n", string(obtained), string(expected)) + + c.Assert(obtained, DeepEquals, expected, comment) +} + +func (s *AdvRefsEncodeSuite) TestZeroValue(c *C) { + ar := &AdvRefs{} + + expected := pktlines(c, + "0000000000000000000000000000000000000000 capabilities^{}\x00\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestHead(c *C) { + hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") + ar := &AdvRefs{ + Head: &hash, + } + + expected := pktlines(c, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestCapsNoHead(c *C) { + capabilities := capability.NewList() + capabilities.Add(capability.MultiACK) + capabilities.Add(capability.OFSDelta) + capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") + ar := &AdvRefs{ + Capabilities: capabilities, + } + + expected := pktlines(c, + "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestCapsWithHead(c *C) { + hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") + capabilities := capability.NewList() + capabilities.Add(capability.MultiACK) + capabilities.Add(capability.OFSDelta) + capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") + ar := &AdvRefs{ + Head: &hash, + Capabilities: capabilities, + } + + expected := pktlines(c, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestRefs(c *C) { + references := map[string]plumbing.Hash{ + "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), + "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"), + "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"), + "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"), + "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), + } + ar := &AdvRefs{ + References: references, + } + + expected := pktlines(c, + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\x00\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", + "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", + "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestPeeled(c *C) { + references := map[string]plumbing.Hash{ + "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), + "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"), + "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"), + "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"), + "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), + } + peeled := map[string]plumbing.Hash{ + "refs/tags/v2.7.13-tree": plumbing.NewHash("4444444444444444444444444444444444444444"), + "refs/tags/v2.6.12-tree": plumbing.NewHash("5555555555555555555555555555555555555555"), + } + ar := &AdvRefs{ + References: references, + Peeled: peeled, + } + + expected := pktlines(c, + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\x00\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", + "5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n", + "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", + "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", + "4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestShallow(c *C) { + shallows := []plumbing.Hash{ + plumbing.NewHash("1111111111111111111111111111111111111111"), + plumbing.NewHash("4444444444444444444444444444444444444444"), + plumbing.NewHash("3333333333333333333333333333333333333333"), + plumbing.NewHash("2222222222222222222222222222222222222222"), + } + ar := &AdvRefs{ + Shallows: shallows, + } + + expected := pktlines(c, + "0000000000000000000000000000000000000000 capabilities^{}\x00\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + "shallow 3333333333333333333333333333333333333333\n", + "shallow 4444444444444444444444444444444444444444\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestAll(c *C) { + hash := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") + + capabilities := capability.NewList() + capabilities.Add(capability.MultiACK) + capabilities.Add(capability.OFSDelta) + capabilities.Add(capability.SymRef, "HEAD:/refs/heads/master") + + references := map[string]plumbing.Hash{ + "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), + "refs/tags/v2.6.12-tree": plumbing.NewHash("1111111111111111111111111111111111111111"), + "refs/tags/v2.7.13-tree": plumbing.NewHash("3333333333333333333333333333333333333333"), + "refs/tags/v2.6.13-tree": plumbing.NewHash("2222222222222222222222222222222222222222"), + "refs/tags/v2.6.11-tree": plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c"), + } + + peeled := map[string]plumbing.Hash{ + "refs/tags/v2.7.13-tree": plumbing.NewHash("4444444444444444444444444444444444444444"), + "refs/tags/v2.6.12-tree": plumbing.NewHash("5555555555555555555555555555555555555555"), + } + + shallows := []plumbing.Hash{ + plumbing.NewHash("1111111111111111111111111111111111111111"), + plumbing.NewHash("4444444444444444444444444444444444444444"), + plumbing.NewHash("3333333333333333333333333333333333333333"), + plumbing.NewHash("2222222222222222222222222222222222222222"), + } + + ar := &AdvRefs{ + Head: &hash, + Capabilities: capabilities, + References: references, + Peeled: peeled, + Shallows: shallows, + } + + expected := pktlines(c, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", + "5555555555555555555555555555555555555555 refs/tags/v2.6.12-tree^{}\n", + "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", + "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", + "4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + "shallow 3333333333333333333333333333333333333333\n", + "shallow 4444444444444444444444444444444444444444\n", + pktline.FlushString, + ) + + testEncode(c, ar, expected) +} + +func (s *AdvRefsEncodeSuite) TestErrorTooLong(c *C) { + references := map[string]plumbing.Hash{ + strings.Repeat("a", pktline.MaxPayloadSize): plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), + } + ar := &AdvRefs{ + References: references, + } + + var buf bytes.Buffer + err := ar.Encode(&buf) + c.Assert(err, ErrorMatches, ".*payload is too long.*") +} diff --git a/plumbing/protocol/v2/advrefs_test.go b/plumbing/protocol/v2/advrefs_test.go new file mode 100644 index 000000000..1b8db981c --- /dev/null +++ b/plumbing/protocol/v2/advrefs_test.go @@ -0,0 +1,380 @@ +package packp + +import ( + "bytes" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + + . "gopkg.in/check.v1" +) + +type AdvRefSuite struct{} + +var _ = Suite(&AdvRefSuite{}) + +func (s *AdvRefSuite) TestAddReferenceSymbolic(c *C) { + ref := plumbing.NewSymbolicReference("foo", "bar") + + a := NewAdvRefs() + err := a.AddReference(ref) + c.Assert(err, IsNil) + + values := a.Capabilities.Get(capability.SymRef) + c.Assert(values, HasLen, 1) + c.Assert(values[0], Equals, "foo:bar") +} + +func (s *AdvRefSuite) TestAddReferenceHash(c *C) { + ref := plumbing.NewHashReference("foo", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) + + a := NewAdvRefs() + err := a.AddReference(ref) + c.Assert(err, IsNil) + + c.Assert(a.References, HasLen, 1) + c.Assert(a.References["foo"].String(), Equals, "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") +} + +func (s *AdvRefSuite) TestAllReferences(c *C) { + hash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") + + a := NewAdvRefs() + err := a.AddReference(plumbing.NewSymbolicReference("foo", "bar")) + c.Assert(err, IsNil) + err = a.AddReference(plumbing.NewHashReference("bar", hash)) + c.Assert(err, IsNil) + + refs, err := a.AllReferences() + c.Assert(err, IsNil) + + iter, err := refs.IterReferences() + c.Assert(err, IsNil) + + var count int + iter.ForEach(func(ref *plumbing.Reference) error { + count++ + switch ref.Name() { + case "bar": + c.Assert(ref.Hash(), Equals, hash) + case "foo": + c.Assert(ref.Target().String(), Equals, "bar") + } + return nil + }) + + c.Assert(count, Equals, 2) +} + +func (s *AdvRefSuite) TestAllReferencesBadSymref(c *C) { + a := NewAdvRefs() + err := a.Capabilities.Set(capability.SymRef, "foo") + c.Assert(err, IsNil) + + _, err = a.AllReferences() + c.Assert(err, NotNil) +} + +func (s *AdvRefSuite) TestIsEmpty(c *C) { + a := NewAdvRefs() + c.Assert(a.IsEmpty(), Equals, true) +} + +func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToMaster(c *C) { + a := NewAdvRefs() + headHash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") + a.Head = &headHash + ref := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) + + err := a.AddReference(ref) + c.Assert(err, IsNil) + + storage, err := a.AllReferences() + c.Assert(err, IsNil) + + head, err := storage.Reference(plumbing.HEAD) + c.Assert(err, IsNil) + c.Assert(head.Target(), Equals, ref.Name()) +} + +func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToOtherThanMaster(c *C) { + a := NewAdvRefs() + headHash := plumbing.NewHash("0000000000000000000000000000000000000000") + a.Head = &headHash + ref1 := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) + ref2 := plumbing.NewHashReference("other/ref", plumbing.NewHash("0000000000000000000000000000000000000000")) + + err := a.AddReference(ref1) + c.Assert(err, IsNil) + err = a.AddReference(ref2) + c.Assert(err, IsNil) + + storage, err := a.AllReferences() + c.Assert(err, IsNil) + + head, err := storage.Reference(plumbing.HEAD) + c.Assert(err, IsNil) + c.Assert(head.Hash(), Equals, ref2.Hash()) +} + +func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToNoRef(c *C) { + a := NewAdvRefs() + headHash := plumbing.NewHash("0000000000000000000000000000000000000000") + a.Head = &headHash + ref := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) + + err := a.AddReference(ref) + c.Assert(err, IsNil) + + _, err = a.AllReferences() + c.Assert(err, NotNil) +} + +func (s *AdvRefSuite) TestNoSymRefCapabilityHeadToNoMasterAlphabeticallyOrdered(c *C) { + a := NewAdvRefs() + headHash := plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c") + a.Head = &headHash + ref1 := plumbing.NewHashReference(plumbing.Master, plumbing.NewHash("0000000000000000000000000000000000000000")) + ref2 := plumbing.NewHashReference("aaaaaaaaaaaaaaa", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) + ref3 := plumbing.NewHashReference("bbbbbbbbbbbbbbb", plumbing.NewHash("5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c")) + + err := a.AddReference(ref1) + c.Assert(err, IsNil) + err = a.AddReference(ref3) + c.Assert(err, IsNil) + err = a.AddReference(ref2) + c.Assert(err, IsNil) + + storage, err := a.AllReferences() + c.Assert(err, IsNil) + + head, err := storage.Reference(plumbing.HEAD) + c.Assert(err, IsNil) + c.Assert(head.Target(), Equals, ref2.Name()) +} + +type AdvRefsDecodeEncodeSuite struct{} + +var _ = Suite(&AdvRefsDecodeEncodeSuite{}) + +func (s *AdvRefsDecodeEncodeSuite) test(c *C, in []string, exp []string, isEmpty bool) { + var err error + var input io.Reader + { + var buf bytes.Buffer + p := pktline.NewEncoder(&buf) + err = p.EncodeString(in...) + c.Assert(err, IsNil) + input = &buf + } + + var expected []byte + { + var buf bytes.Buffer + p := pktline.NewEncoder(&buf) + err = p.EncodeString(exp...) + c.Assert(err, IsNil) + + expected = buf.Bytes() + } + + var obtained []byte + { + ar := NewAdvRefs() + c.Assert(ar.Decode(input), IsNil) + c.Assert(ar.IsEmpty(), Equals, isEmpty) + + var buf bytes.Buffer + c.Assert(ar.Encode(&buf), IsNil) + + obtained = buf.Bytes() + } + + c.Assert(string(obtained), DeepEquals, string(expected)) +} + +func (s *AdvRefsDecodeEncodeSuite) TestNoHead(c *C) { + input := []string{ + "0000000000000000000000000000000000000000 capabilities^{}\x00", + pktline.FlushString, + } + + expected := []string{ + "0000000000000000000000000000000000000000 capabilities^{}\x00\n", + pktline.FlushString, + } + + s.test(c, input, expected, true) +} + +func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmart(c *C) { + input := []string{ + "# service=git-upload-pack\n", + "0000000000000000000000000000000000000000 capabilities^{}\x00", + pktline.FlushString, + } + + expected := []string{ + "# service=git-upload-pack\n", + "0000000000000000000000000000000000000000 capabilities^{}\x00\n", + pktline.FlushString, + } + + s.test(c, input, expected, true) +} + +func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmartBug(c *C) { + input := []string{ + "# service=git-upload-pack\n", + pktline.FlushString, + "0000000000000000000000000000000000000000 capabilities^{}\x00\n", + pktline.FlushString, + } + + expected := []string{ + "# service=git-upload-pack\n", + pktline.FlushString, + "0000000000000000000000000000000000000000 capabilities^{}\x00\n", + pktline.FlushString, + } + + s.test(c, input, expected, true) +} + +func (s *AdvRefsDecodeEncodeSuite) TestRefs(c *C) { + input := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree", + pktline.FlushString, + } + + expected := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + pktline.FlushString, + } + + s.test(c, input, expected, false) +} + +func (s *AdvRefsDecodeEncodeSuite) TestPeeled(c *C) { + input := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + pktline.FlushString, + } + + expected := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", + pktline.FlushString, + } + + s.test(c, input, expected, false) +} + +func (s *AdvRefsDecodeEncodeSuite) TestAll(c *C) { + input := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}", + "shallow 1111111111111111111111111111111111111111", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + + expected := []string{ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + + s.test(c, input, expected, false) +} + +func (s *AdvRefsDecodeEncodeSuite) TestAllSmart(c *C) { + input := []string{ + "# service=git-upload-pack\n", + pktline.FlushString, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + + expected := []string{ + "# service=git-upload-pack\n", + pktline.FlushString, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + + s.test(c, input, expected, false) +} + +func (s *AdvRefsDecodeEncodeSuite) TestAllSmartBug(c *C) { + input := []string{ + "# service=git-upload-pack\n", + pktline.FlushString, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + + expected := []string{ + "# service=git-upload-pack\n", + pktline.FlushString, + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", + "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", + "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", + "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", + "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", + "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", + "shallow 1111111111111111111111111111111111111111\n", + "shallow 2222222222222222222222222222222222222222\n", + pktline.FlushString, + } + + s.test(c, input, expected, false) +} diff --git a/plumbing/protocol/v2/capability/capability.go b/plumbing/protocol/v2/capability/capability.go new file mode 100644 index 000000000..8942c9c1a --- /dev/null +++ b/plumbing/protocol/v2/capability/capability.go @@ -0,0 +1,385 @@ +// Package capability defines the server and client capabilities. +package capability + +import ( + "fmt" + "os" +) + +// Capability describes a server or client capability. +type Capability string + +func (n Capability) String() string { + return string(n) +} + +const ( + // MultiACK capability allows the server to return "ACK obj-id continue" as + // soon as it finds a commit that it can use as a common base, between the + // client's wants and the client's have set. + // + // By sending this early, the server can potentially head off the client + // from walking any further down that particular branch of the client's + // repository history. The client may still need to walk down other + // branches, sending have lines for those, until the server has a + // complete cut across the DAG, or the client has said "done". + // + // Without multi_ack, a client sends have lines in --date-order until + // the server has found a common base. That means the client will send + // have lines that are already known by the server to be common, because + // they overlap in time with another branch that the server hasn't found + // a common base on yet. + // + // For example suppose the client has commits in caps that the server + // doesn't and the server has commits in lower case that the client + // doesn't, as in the following diagram: + // + // +---- u ---------------------- x + // / +----- y + // / / + // a -- b -- c -- d -- E -- F + // \ + // +--- Q -- R -- S + // + // If the client wants x,y and starts out by saying have F,S, the server + // doesn't know what F,S is. Eventually the client says "have d" and + // the server sends "ACK d continue" to let the client know to stop + // walking down that line (so don't send c-b-a), but it's not done yet, + // it needs a base for x. The client keeps going with S-R-Q, until a + // gets reached, at which point the server has a clear base and it all + // ends. + // + // Without multi_ack the client would have sent that c-b-a chain anyway, + // interleaved with S-R-Q. + MultiACK Capability = "multi_ack" + // MultiACKDetailed is an extension of multi_ack that permits client to + // better understand the server's in-memory state. + MultiACKDetailed Capability = "multi_ack_detailed" + // NoDone should only be used with the smart HTTP protocol. If + // multi_ack_detailed and no-done are both present, then the sender is + // free to immediately send a pack following its first "ACK obj-id ready" + // message. + // + // Without no-done in the smart HTTP protocol, the server session would + // end and the client has to make another trip to send "done" before + // the server can send the pack. no-done removes the last round and + // thus slightly reduces latency. + NoDone Capability = "no-done" + // ThinPack is one with deltas which reference base objects not + // contained within the pack (but are known to exist at the receiving + // end). This can reduce the network traffic significantly, but it + // requires the receiving end to know how to "thicken" these packs by + // adding the missing bases to the pack. + // + // The upload-pack server advertises 'thin-pack' when it can generate + // and send a thin pack. A client requests the 'thin-pack' capability + // when it understands how to "thicken" it, notifying the server that + // it can receive such a pack. A client MUST NOT request the + // 'thin-pack' capability if it cannot turn a thin pack into a + // self-contained pack. + // + // Receive-pack, on the other hand, is assumed by default to be able to + // handle thin packs, but can ask the client not to use the feature by + // advertising the 'no-thin' capability. A client MUST NOT send a thin + // pack if the server advertises the 'no-thin' capability. + // + // The reasons for this asymmetry are historical. The receive-pack + // program did not exist until after the invention of thin packs, so + // historically the reference implementation of receive-pack always + // understood thin packs. Adding 'no-thin' later allowed receive-pack + // to disable the feature in a backwards-compatible manner. + ThinPack Capability = "thin-pack" + // Sideband means that server can send, and client understand multiplexed + // progress reports and error info interleaved with the packfile itself. + // + // These two options are mutually exclusive. A modern client always + // favors Sideband64k. + // + // Either mode indicates that the packfile data will be streamed broken + // up into packets of up to either 1000 bytes in the case of 'side_band', + // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up + // of a leading 4-byte pkt-line length of how much data is in the packet, + // followed by a 1-byte stream code, followed by the actual data. + // + // The stream code can be one of: + // + // 1 - pack data + // 2 - progress messages + // 3 - fatal error message just before stream aborts + // + // The "side-band-64k" capability came about as a way for newer clients + // that can handle much larger packets to request packets that are + // actually crammed nearly full, while maintaining backward compatibility + // for the older clients. + // + // Further, with side-band and its up to 1000-byte messages, it's actually + // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, + // same deal, you have up to 65519 bytes of data and 1 byte for the stream + // code. + // + // The client MUST send only maximum of one of "side-band" and "side- + // band-64k". Server MUST diagnose it as an error if client requests + // both. + Sideband Capability = "side-band" + Sideband64k Capability = "side-band-64k" + // OFSDelta server can send, and client understand PACKv2 with delta + // referring to its base by position in pack rather than by an obj-id. That + // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. + OFSDelta Capability = "ofs-delta" + // Agent the server may optionally send this capability to notify the client + // that the server is running version `X`. The client may optionally return + // its own agent string by responding with an `agent=Y` capability (but it + // MUST NOT do so if the server did not mention the agent capability). The + // `X` and `Y` strings may contain any printable ASCII characters except + // space (i.e., the byte range 32 < x < 127), and are typically of the form + // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely + // informative for statistics and debugging purposes, and MUST NOT be used + // to programmatically assume the presence or absence of particular features. + Agent Capability = "agent" + // Shallow capability adds "deepen", "shallow" and "unshallow" commands to + // the fetch-pack/upload-pack protocol so clients can request shallow + // clones. + Shallow Capability = "shallow" + // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific time, instead of depth. Internally it's equivalent of doing + // "rev-list --max-age=" on the server side. "deepen-since" + // cannot be used with "deepen". + DeepenSince Capability = "deepen-since" + // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific revision, instead of depth. Internally it's equivalent of + // doing "rev-list --not " on the server side. "deepen-not" + // cannot be used with "deepen", but can be used with "deepen-since". + DeepenNot Capability = "deepen-not" + // DeepenRelative if this capability is requested by the client, the + // semantics of "deepen" command is changed. The "depth" argument is the + // depth from the current shallow boundary, instead of the depth from + // remote refs. + DeepenRelative Capability = "deepen-relative" + // NoProgress the client was started with "git clone -q" or something, and + // doesn't want that side band 2. Basically the client just says "I do not + // wish to receive stream 2 on sideband, so do not send it to me, and if + // you did, I will drop it on the floor anyway". However, the sideband + // channel 3 is still used for error responses. + NoProgress Capability = "no-progress" + // IncludeTag capability is about sending annotated tags if we are + // sending objects they point to. If we pack an object to the client, and + // a tag object points exactly at that object, we pack the tag object too. + // In general this allows a client to get all new annotated tags when it + // fetches a branch, in a single network connection. + // + // Clients MAY always send include-tag, hardcoding it into a request when + // the server advertises this capability. The decision for a client to + // request include-tag only has to do with the client's desires for tag + // data, whether or not a server had advertised objects in the + // refs/tags/* namespace. + // + // Servers MUST pack the tags if their referrant is packed and the client + // has requested include-tags. + // + // Clients MUST be prepared for the case where a server has ignored + // include-tag and has not actually sent tags in the pack. In such + // cases the client SHOULD issue a subsequent fetch to acquire the tags + // that include-tag would have otherwise given the client. + // + // The server SHOULD send include-tag, if it supports it, regardless + // of whether or not there are tags available. + IncludeTag Capability = "include-tag" + // ReportStatus the receive-pack process can receive a 'report-status' + // capability, which tells it that the client wants a report of what + // happened after a packfile upload and reference update. If the pushing + // client requests this capability, after unpacking and updating references + // the server will respond with whether the packfile unpacked successfully + // and if each reference was updated successfully. If any of those were not + // successful, it will send back an error message. See pack-protocol.txt + // for example messages. + ReportStatus Capability = "report-status" + // DeleteRefs If the server sends back this capability, it means that + // it is capable of accepting a zero-id value as the target + // value of a reference update. It is not sent back by the client, it + // simply informs the client that it can be sent zero-id values + // to delete references + DeleteRefs Capability = "delete-refs" + // Quiet If the receive-pack server advertises this capability, it is + // capable of silencing human-readable progress output which otherwise may + // be shown when processing the received pack. A send-pack client should + // respond with the 'quiet' capability to suppress server-side progress + // reporting if the local progress reporting is also being suppressed + // (e.g., via `push -q`, or if stderr does not go to a tty). + Quiet Capability = "quiet" + // Atomic If the server sends this capability it is capable of accepting + // atomic pushes. If the pushing client requests this capability, the server + // will update the refs in one atomic transaction. Either all refs are + // updated or none. + Atomic Capability = "atomic" + // PushOptions If the server sends this capability it is able to accept + // push options after the update commands have been sent, but before the + // packfile is streamed. If the pushing client requests this capability, + // the server will pass the options to the pre- and post- receive hooks + // that process this push request. + PushOptions Capability = "push-options" + // AllowTipSHA1InWant if the upload-pack server advertises this capability, + // fetch-pack may send "want" lines with SHA-1s that exist at the server but + // are not advertised by upload-pack. + AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" + // AllowReachableSHA1InWant if the upload-pack server advertises this + // capability, fetch-pack may send "want" lines with SHA-1s that exist at + // the server but are not advertised by upload-pack. + AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" + // PushCert the receive-pack server that advertises this capability is + // willing to accept a signed push certificate, and asks the to be + // included in the push certificate. A send-pack client MUST NOT + // send a push-cert packet unless the receive-pack server advertises + // this capability. + PushCert Capability = "push-cert" + // SymRef symbolic reference support for better negotiation. + SymRef Capability = "symref" + // ObjectFormat takes a hash algorithm as an argument, indicates that the + // server supports the given hash algorithms. + ObjectFormat Capability = "object-format" + // Filter if present, fetch-pack may send "filter" commands to request a + // partial clone or partial fetch and request that the server omit various objects from the packfile + Filter Capability = "filter" + // LsRefs is the command used to request a reference advertisement in v2. + // Unlike the current reference advertisement, ls-refs takes in arguments + // which can be used to limit the refs sent from the server. + // + // Additional features not supported in the base command will be + // advertised as the value of the command in the capability + // advertisement in the form of a space separated list of features: + // "= " + // + // ls-refs takes in the following arguments: + // + // symrefs + // In addition to the object pointed by it, show the underlying ref + // pointed by it when showing a symbolic ref. + // peel + // Show peeled tags. + // ref-prefix + // When specified, only references having a prefix matching one of + // the provided prefixes are displayed. Multiple instances may be + // given, in which case references matching any prefix will be + // shown. Note that this is purely for optimization; a server MAY + // show refs not matching the prefix if it chooses, and clients + // should filter the result themselves. + LsRefs Capability = "ls-refs" + // Fetch is the command used to fetch a packfile in v2. It can be + // looked at as a modified version of the v1 fetch where the + // ref-advertisement is stripped out (since the ls-refs command fills + // that role) and the message format is tweaked to eliminate + // redundancies and permit easy addition of future extensions. + // + // Additional features not supported in the base command will be + // advertised as the value of the command in the capability + // advertisement in the form of a space separated list of features: + // "= " + // + // A fetch request can take the following arguments: + // + // want + // Indicates to the server an object which the client wants to + // retrieve. Wants can be anything and are not limited to + // advertised objects. + // + // have + // Indicates to the server an object which the client has locally. + // This allows the server to make a packfile which only contains + // the objects that the client needs. Multiple 'have' lines can be + // supplied. + // + // done + // Indicates to the server that negotiation should terminate (or + // not even begin if performing a clone) and that the server should + // use the information supplied in the request to construct the + // packfile. + // + // thin-pack + // Request that a thin pack be sent, which is a pack with deltas + // which reference base objects not contained within the pack (but + // are known to exist at the receiving end). This can reduce the + // network traffic significantly, but it requires the receiving end + // to know how to "thicken" these packs by adding the missing bases + // to the pack. + // + // no-progress + // Request that progress information that would normally be sent on + // side-band channel 2, during the packfile transfer, should not be + // sent. However, the side-band channel 3 is still used for error + // responses. + // + // include-tag + // Request that annotated tags should be sent if the objects they + // point to are being sent. + // + // ofs-delta + // Indicate that the client understands PACKv2 with delta referring + // to its base by position in pack rather than by an oid. That is, + // they can read OBJ_OFS_DELTA (aka type 6) in a packfile. + Fetch Capability = "fetch" + // ServerOption if advertised, indicates that any number of server + // specific options can be included in a request. This is done by + // sending each option as a "server-option=