diff --git a/plumbing/format/pktline/common.go b/plumbing/format/pktline/common.go new file mode 100644 index 000000000..2862f820f --- /dev/null +++ b/plumbing/format/pktline/common.go @@ -0,0 +1,56 @@ +package pktline + +import "errors" + +const ( + // Err is returned when the pktline has encountered an error. + Err = iota - 1 + + // Flush is the numeric value of a flush packet. It is returned when the + // pktline is a flush packet. + Flush + + // Delim is the numeric value of a delim packet. It is returned when the + // pktline is a delim packet. + Delim + + // ResponseEnd is the numeric value of a response-end packet. It is + // returned when the pktline is a response-end packet. + ResponseEnd +) + +const ( + // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. + // See https://git-scm.com/docs/protocol-common#_pkt_line_format + MaxPayloadSize = MaxPacketSize - PacketLenSize + + // MaxPacketSize is the maximum packet size of a pkt-line in bytes. + // See https://git-scm.com/docs/protocol-common#_pkt_line_format + MaxPacketSize = 65520 + + // PacketLenSize is the size of the packet length in bytes. + PacketLenSize = 4 +) + +var ( + // ErrPayloadTooLong is returned by the Encode methods when any of the + // provided payloads is bigger than MaxPayloadSize. + ErrPayloadTooLong = errors.New("payload is too long") + + // ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. + ErrInvalidPktLen = errors.New("invalid pkt-len found") +) + +var ( + // flushPkt are the contents of a flush-pkt pkt-line. + flushPkt = []byte{'0', '0', '0', '0'} + + // delimPkt are the contents of a delim-pkt pkt-line. + delimPkt = []byte{'0', '0', '0', '1'} + + // responseEndPkt are the contents of a response-end-pkt pkt-line. + responseEndPkt = []byte{'0', '0', '0', '2'} + + // emptyPkt is an empty string pkt-line payload. + emptyPkt = []byte{'0', '0', '0', '4'} +) diff --git a/plumbing/format/pktline/encoder.go b/plumbing/format/pktline/encoder.go deleted file mode 100644 index b6144faf5..000000000 --- a/plumbing/format/pktline/encoder.go +++ /dev/null @@ -1,126 +0,0 @@ -// Package pktline implements reading payloads form pkt-lines and encoding -// pkt-lines from payloads. -package pktline - -import ( - "bytes" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/trace" -) - -// An Encoder writes pkt-lines to an output stream. -type Encoder struct { - w io.Writer -} - -const ( - // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. - MaxPayloadSize = 65516 - - // For compatibility with canonical Git implementation, accept longer pkt-lines - OversizePayloadMax = 65520 -) - -var ( - // FlushPkt are the contents of a flush-pkt pkt-line. - FlushPkt = []byte{'0', '0', '0', '0'} - // Flush is the payload to use with the Encode method to encode a flush-pkt. - Flush = []byte{} - // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. - FlushString = "" - // ErrPayloadTooLong is returned by the Encode methods when any of the - // provided payloads is bigger than MaxPayloadSize. - ErrPayloadTooLong = errors.New("payload is too long") -) - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - } -} - -// Flush encodes a flush-pkt to the output stream. -func (e *Encoder) Flush() error { - defer trace.Packet.Print("packet: > 0000") - _, err := e.w.Write(FlushPkt) - return err -} - -// Encode encodes a pkt-line with the payload specified and write it to -// the output stream. If several payloads are specified, each of them -// will get streamed in their own pkt-lines. -func (e *Encoder) Encode(payloads ...[]byte) error { - for _, p := range payloads { - if err := e.encodeLine(p); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeLine(p []byte) error { - if len(p) > MaxPayloadSize { - return ErrPayloadTooLong - } - - if bytes.Equal(p, Flush) { - return e.Flush() - } - - n := len(p) + 4 - defer trace.Packet.Printf("packet: > %04x %s", n, p) - if _, err := e.w.Write(asciiHex16(n)); err != nil { - return err - } - _, err := e.w.Write(p) - return err -} - -// Returns the hexadecimal ascii representation of the 16 less -// significant bits of n. The length of the returned slice will always -// be 4. Example: if n is 1234 (0x4d2), the return value will be -// []byte{'0', '4', 'd', '2'}. -func asciiHex16(n int) []byte { - var ret [4]byte - ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) - ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) - ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) - ret[3] = byteToASCIIHex(byte(n & 0x000f)) - - return ret[:] -} - -// turns a byte into its hexadecimal ascii representation. Example: -// from 11 (0xb) to 'b'. -func byteToASCIIHex(n byte) byte { - if n < 10 { - return '0' + n - } - - return 'a' - 10 + n -} - -// EncodeString works similarly as Encode but payloads are specified as strings. -func (e *Encoder) EncodeString(payloads ...string) error { - for _, p := range payloads { - if err := e.Encode([]byte(p)); err != nil { - return err - } - } - - return nil -} - -// Encodef encodes a single pkt-line with the payload formatted as -// the format specifier. The rest of the arguments will be used in -// the format string. -func (e *Encoder) Encodef(format string, a ...interface{}) error { - return e.EncodeString( - fmt.Sprintf(format, a...), - ) -} diff --git a/plumbing/format/pktline/error.go b/plumbing/format/pktline/error.go index 2c0e5a72a..419fc51a9 100644 --- a/plumbing/format/pktline/error.go +++ b/plumbing/format/pktline/error.go @@ -1,10 +1,8 @@ package pktline import ( - "bytes" "errors" "io" - "strings" ) var ( @@ -12,9 +10,16 @@ var ( // error line. ErrInvalidErrorLine = errors.New("expected an error-line") + // ErrNilWriter is returned when a nil writer is passed to WritePacket. + ErrNilWriter = errors.New("nil writer") + errPrefix = []byte("ERR ") ) +const ( + errPrefixSize = PacketLenSize +) + // ErrorLine is a packet line that contains an error message. // Once this packet is sent by client or server, the data transfer process is // terminated. @@ -30,22 +35,17 @@ func (e *ErrorLine) Error() string { // Encode encodes the ErrorLine into a packet line. func (e *ErrorLine) Encode(w io.Writer) error { - p := NewEncoder(w) - return p.Encodef("%s%s\n", string(errPrefix), e.Text) + _, err := Writef(w, "%s%s\n", errPrefix, e.Text) + return err } // Decode decodes a packet line into an ErrorLine. func (e *ErrorLine) Decode(r io.Reader) error { - s := NewScanner(r) - if !s.Scan() { - return s.Err() - } - - line := s.Bytes() - if !bytes.HasPrefix(line, errPrefix) { + _, _, err := ReadLine(r) + var el *ErrorLine + if !errors.As(err, &el) { return ErrInvalidErrorLine } - - e.Text = strings.TrimSpace(string(line[4:])) + e.Text = el.Text return nil } diff --git a/plumbing/format/pktline/error_test.go b/plumbing/format/pktline/error_test.go index 3cffd20d1..fff17932b 100644 --- a/plumbing/format/pktline/error_test.go +++ b/plumbing/format/pktline/error_test.go @@ -1,6 +1,7 @@ package pktline import ( + "bufio" "bytes" "errors" "io" @@ -33,7 +34,7 @@ func TestDecodeEmptyErrorLine(t *testing.T) { var buf bytes.Buffer e := &ErrorLine{} err := e.Decode(&buf) - if err != nil { + if !errors.Is(err, ErrInvalidErrorLine) { t.Fatal(err) } if e.Text != "" { @@ -44,10 +45,10 @@ func TestDecodeEmptyErrorLine(t *testing.T) { func TestDecodeErrorLine(t *testing.T) { var buf bytes.Buffer buf.WriteString("000eERR foobar") - var e *ErrorLine + var e ErrorLine err := e.Decode(&buf) - if !errors.As(err, &e) { - t.Fatalf("expected error line, got: %T: %v", err, err) + if err != nil { + t.Fatal(err) } if e.Text != "foobar" { t.Fatalf("unexpected error line: %q", e.Text) @@ -57,12 +58,22 @@ func TestDecodeErrorLine(t *testing.T) { func TestDecodeErrorLineLn(t *testing.T) { var buf bytes.Buffer buf.WriteString("000fERR foobar\n") - var e *ErrorLine + var e ErrorLine err := e.Decode(&buf) - if !errors.As(err, &e) { - t.Fatalf("expected error line, got: %T: %v", err, err) + if err != nil { + t.Fatal(err) } if e.Text != "foobar" { t.Fatalf("unexpected error line: %q", e.Text) } } + +func TestPeekErrorLine(t *testing.T) { + var buf bytes.Buffer + buf.WriteString("000fERR foobar\n") + var e *ErrorLine + _, _, err := PeekLine(bufio.NewReader(&buf)) + if !errors.As(err, &e) { + t.Fatalf("expected error line, got: %T: %v", err, err) + } +} diff --git a/plumbing/format/pktline/length.go b/plumbing/format/pktline/length.go new file mode 100644 index 000000000..2917b717e --- /dev/null +++ b/plumbing/format/pktline/length.go @@ -0,0 +1,85 @@ +package pktline + +// ParseLength parses a four digit hexadecimal number from the given byte slice +// into its integer representation. If the byte slice contains non-hexadecimal, +// it will return an error. +func ParseLength(b []byte) (int, error) { + if b == nil { + return Err, ErrInvalidPktLen + } + + n, err := hexDecode(b) + if err != nil { + return Err, err + } + + if n == 3 { + return Err, ErrInvalidPktLen + } + + // Limit the maximum size of a pkt-line to 65520 bytes. + // Fixes: b4177b89c08b (plumbing: format: pktline, Accept oversized pkt-lines up to 65524 bytes) + // See https://github.com/git/git/commit/7841c4801ce51f1f62d376d164372e8677c6bc94 + if n > MaxPacketSize { + return Err, ErrInvalidPktLen + } + + return n, nil +} + +// Turns the hexadecimal representation of a number in a byte slice into +// a number. This function substitute strconv.ParseUint(string(buf), 16, +// 16) and/or hex.Decode, to avoid generating new strings, thus helping the +// GC. +func hexDecode(buf []byte) (int, error) { + if len(buf) < 4 { + return 0, ErrInvalidPktLen + } + + var ret int + for i := 0; i < PacketLenSize; i++ { + n, err := asciiHexToByte(buf[i]) + if err != nil { + return 0, ErrInvalidPktLen + } + ret = 16*ret + int(n) + } + return ret, nil +} + +// turns the hexadecimal ascii representation of a byte into its +// numerical value. Example: from 'b' to 11 (0xb). +func asciiHexToByte(b byte) (byte, error) { + switch { + case b >= '0' && b <= '9': + return b - '0', nil + case b >= 'a' && b <= 'f': + return b - 'a' + 10, nil + default: + return 0, ErrInvalidPktLen + } +} + +// Returns the hexadecimal ascii representation of the 16 less +// significant bits of n. The length of the returned slice will always +// be 4. Example: if n is 1234 (0x4d2), the return value will be +// []byte{'0', '4', 'd', '2'}. +func asciiHex16(n int) []byte { + var ret [4]byte + ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) + ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) + ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) + ret[3] = byteToASCIIHex(byte(n & 0x000f)) + + return ret[:] +} + +// turns a byte into its hexadecimal ascii representation. Example: +// from 11 (0xb) to 'b'. +func byteToASCIIHex(n byte) byte { + if n < 10 { + return '0' + n + } + + return 'a' - 10 + n +} diff --git a/plumbing/format/pktline/pktline.go b/plumbing/format/pktline/pktline.go new file mode 100644 index 000000000..d70f9025b --- /dev/null +++ b/plumbing/format/pktline/pktline.go @@ -0,0 +1,217 @@ +package pktline + +import ( + "bytes" + "fmt" + "io" + + "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/trace" +) + +// Write writes a pktline packet. +func Write(w io.Writer, p []byte) (n int, err error) { + if w == nil { + return 0, ErrNilWriter + } + + defer func() { + if err == nil { + trace.Packet.Printf("packet: > %04x %s", n, p) + } + }() + + if len(p) == 0 { + return w.Write(emptyPkt) + } + + if len(p) > MaxPayloadSize { + return 0, ErrPayloadTooLong + } + + pktlen := len(p) + PacketLenSize + n, err = w.Write(asciiHex16(pktlen)) + if err != nil { + return + } + + n2, err := w.Write(p) + n += n2 + return +} + +// Writef writes a pktline packet from a format string. +func Writef(w io.Writer, format string, a ...interface{}) (n int, err error) { + if len(a) == 0 { + return Write(w, []byte(format)) + } + return Write(w, []byte(fmt.Sprintf(format, a...))) +} + +// Writeln writes a pktline packet from a string and appends a newline. +func Writeln(w io.Writer, s string) (n int, err error) { + return Write(w, []byte(s+"\n")) +} + +// WriteString writes a pktline packet from a string. +func WriteString(w io.Writer, s string) (n int, err error) { + return Write(w, []byte(s)) +} + +// WriteError writes an error packet. +func WriteError(w io.Writer, e error) (n int, err error) { + return Writef(w, "%s%s\n", errPrefix, e.Error()) +} + +// WriteFlush writes a flush packet. +// This always writes 4 bytes. +func WriteFlush(w io.Writer) (err error) { + defer func() { + if err == nil { + trace.Packet.Printf("packet: > 0000") + } + }() + + _, err = w.Write(flushPkt) + return err +} + +// WriteDelim writes a delimiter packet. +// This always writes 4 bytes. +func WriteDelim(w io.Writer) (err error) { + defer func() { + if err == nil { + trace.Packet.Printf("packet: > 0001") + } + }() + + _, err = w.Write(delimPkt) + return err +} + +// WriteResponseEnd writes a response-end packet. +// This always writes 4 bytes. +func WriteResponseEnd(w io.Writer) (err error) { + defer func() { + if err == nil { + trace.Packet.Printf("packet: > 0002") + } + }() + + _, err = w.Write(responseEndPkt) + return err +} + +// Read reads a pktline packet payload into p and returns the packet full +// length. +// +// If p is less than 4 bytes, Read returns ErrInvalidPktLen. If p cannot hold +// the entire packet, Read returns io.ErrUnexpectedEOF. +// The error can be of type *ErrorLine if the packet is an error packet. +// +// Use packet length to determine the type of packet i.e. 0 is a flush packet, +// 1 is a delim packet, 2 is a response-end packet, and a length greater or +// equal to 4 is a data packet. +func Read(r io.Reader, p []byte) (l int, err error) { + _, err = io.ReadFull(r, p[:PacketLenSize]) + if err != nil { + if err == io.ErrUnexpectedEOF { + return Err, ErrInvalidPktLen + } + return Err, err + } + + length, err := ParseLength(p) + if err != nil { + return Err, err + } + + switch length { + case Flush, Delim, ResponseEnd: + trace.Packet.Printf("packet: < %04x", l) + return length, nil + case PacketLenSize: // empty line + trace.Packet.Printf("packet: < %04x", l) + return length, nil + } + + _, err = io.ReadFull(r, p[PacketLenSize:length]) + if err != nil { + return Err, err + } + + if bytes.HasPrefix(p[PacketLenSize:], errPrefix) { + err = &ErrorLine{ + Text: string(bytes.TrimSpace(p[PacketLenSize+errPrefixSize : length])), + } + } + + trace.Packet.Printf("packet: < %04x %s", l, p[PacketLenSize:length]) + + return length, err +} + +// ReadLine reads a packet line into a temporary shared buffer and +// returns the packet length and payload. +// Subsequent calls to ReadLine may overwrite the buffer. +// +// Use packet length to determine the type of packet i.e. 0 is a flush packet, +// 1 is a delim packet, 2 is a response-end packet, and a length greater or +// equal to 4 is a data packet. +// +// The error can be of type *ErrorLine if the packet is an error packet. +func ReadLine(r io.Reader) (l int, p []byte, err error) { + buf := GetPacketBuffer() + defer PutPacketBuffer(buf) + + l, err = Read(r, (*buf)[:]) + if l < PacketLenSize { + return l, nil, err + } + + return l, (*buf)[PacketLenSize:l], err +} + +// PeekLine reads a packet line without consuming it. +// +// Use packet length to determine the type of packet i.e. 0 is a flush packet, +// 1 is a delim packet, 2 is a response-end packet, and a length greater or +// equal to 4 is a data packet. +// +// The error can be of type *ErrorLine if the packet is an error packet. +func PeekLine(r ioutil.ReadPeeker) (l int, p []byte, err error) { + n, err := r.Peek(PacketLenSize) + if err != nil { + return Err, nil, err + } + + length, err := ParseLength(n) + if err != nil { + return Err, nil, err + } + + switch length { + case Flush, Delim, ResponseEnd: + trace.Packet.Printf("packet: < %04x", l) + return length, nil, nil + case PacketLenSize: // empty line + trace.Packet.Printf("packet: < %04x", l) + return length, []byte{}, nil + } + + data, err := r.Peek(length) + if err != nil { + return Err, nil, err + } + + buf := data[PacketLenSize:length] + if bytes.HasPrefix(buf, errPrefix) { + err = &ErrorLine{ + Text: string(bytes.TrimSpace(buf[errPrefixSize:])), + } + } + + trace.Packet.Printf("packet: < %04x %s", l, buf) + + return length, buf, err +} diff --git a/plumbing/format/pktline/pktline_bench_test.go b/plumbing/format/pktline/pktline_bench_test.go new file mode 100644 index 000000000..dc2ce3db7 --- /dev/null +++ b/plumbing/format/pktline/pktline_bench_test.go @@ -0,0 +1,214 @@ +package pktline_test + +import ( + "bytes" + "io" + "strings" + "testing" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +func BenchmarkScanner(b *testing.B) { + sections, err := sectionsExample(2, 4) + if err != nil { + b.Fatal(err) + } + + var maxp bytes.Buffer + if _, err := pktline.WriteString(&maxp, strings.Repeat("a", pktline.MaxPayloadSize)); err != nil { + b.Fatal(err) + } + + cases := []struct { + name string + input string + }{ + { + name: "empty", + input: "", + }, + { + name: "one message", + input: "000ahello\n", + }, + { + name: "two messages", + input: "000ahello\n000bworld!\n", + }, + { + name: "sections", + input: sections.String(), + }, + { + name: "max packet size", + input: maxp.String(), + }, + } + for _, tc := range cases { + r := strings.NewReader("") + s := pktline.NewScanner(r) + b.Run(tc.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + r.Reset(tc.input) + for s.Scan() { + if err := s.Err(); err != nil && err != io.EOF { + b.Error(err) + } + } + } + }) + } +} + +func BenchmarkReadPacket(b *testing.B) { + sections, err := sectionsExample(2, 4) + if err != nil { + b.Fatal(err) + } + + var maxp bytes.Buffer + if _, err := pktline.WriteString(&maxp, strings.Repeat("a", pktline.MaxPayloadSize)); err != nil { + b.Fatal(err) + } + + cases := []struct { + name string + input string + }{ + { + name: "empty", + input: "", + }, + { + name: "one message", + input: "000ahello\n", + }, + { + name: "two messages", + input: "000ahello\n000bworld!\n", + }, + { + name: "sections", + input: sections.String(), + }, + { + name: "max packet size", + input: maxp.String(), + }, + } + for _, tc := range cases { + r := strings.NewReader("") + b.Run(tc.name, func(b *testing.B) { + buf := pktline.GetPacketBuffer() + for i := 0; i < b.N; i++ { + r.Reset(tc.input) + for { + _, err := pktline.Read(r, (*buf)[:]) + if err == io.EOF { + break + } + if err != nil { + b.Error(err) + } + } + } + pktline.PutPacketBuffer(buf) + }) + } +} + +func BenchmarkReadPacketLine(b *testing.B) { + sections, err := sectionsExample(2, 4) + if err != nil { + b.Fatal(err) + } + + var maxp bytes.Buffer + if _, err := pktline.WriteString(&maxp, strings.Repeat("a", pktline.MaxPayloadSize)); err != nil { + b.Fatal(err) + } + + cases := []struct { + name string + input string + }{ + { + name: "empty", + input: "", + }, + { + name: "one message", + input: "000ahello\n", + }, + { + name: "two messages", + input: "000ahello\n000bworld!\n", + }, + { + name: "sections", + input: sections.String(), + }, + { + name: "max packet size", + input: maxp.String(), + }, + } + for _, tc := range cases { + r := strings.NewReader("") + b.Run(tc.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + r.Reset(tc.input) + for { + _, _, err := pktline.ReadLine(r) + if err == io.EOF { + break + } + if err != nil { + break + } + } + } + }) + } +} + +func BenchmarkWritePacket(b *testing.B) { + sections, err := sectionsExample(2, 4) + if err != nil { + b.Fatal(err) + } + + cases := []struct { + name string + input []byte + }{ + { + name: "empty", + input: []byte(""), + }, + { + name: "one message", + input: []byte("hello\n"), + }, + { + name: "two messages", + input: []byte("hello\nworld!\n"), + }, + { + name: "sections", + input: sections.Bytes(), + }, + } + for _, tc := range cases { + b.Run(tc.name, func(b *testing.B) { + var buf bytes.Buffer + for i := 0; i < b.N; i++ { + _, err := pktline.Write(&buf, tc.input) + if err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/plumbing/format/pktline/pktline_read_test.go b/plumbing/format/pktline/pktline_read_test.go new file mode 100644 index 000000000..5ad2d142c --- /dev/null +++ b/plumbing/format/pktline/pktline_read_test.go @@ -0,0 +1,361 @@ +package pktline_test + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" + + . "gopkg.in/check.v1" +) + +type SuiteReader struct{} + +var _ = Suite(&SuiteReader{}) + +func (s *SuiteReader) TestInvalid(c *C) { + for i, test := range [...]string{ + "0003", + "fff5", "ffff", + "gorka", + "0", "003", + " 5a", "5 a", "5 \n", + "-001", "-000", + } { + r := strings.NewReader(test) + _, _, err := pktline.ReadLine(r) + c.Assert(err, ErrorMatches, pktline.ErrInvalidPktLen.Error()+".*", + Commentf("i = %d, data = %q", i, test)) + } +} + +func (s *SuiteReader) TestDecodeOversizePktLines(c *C) { + for _, test := range [...]string{ + "fff1" + strings.Repeat("a", 0xfff1), + "fff2" + strings.Repeat("a", 0xfff2), + "fff3" + strings.Repeat("a", 0xfff3), + "fff4" + strings.Repeat("a", 0xfff4), + } { + r := strings.NewReader(test) + _, _, err := pktline.ReadLine(r) + c.Assert(err, NotNil) + } +} + +func (s *SuiteReader) TestEmptyReader(c *C) { + r := strings.NewReader("") + l, p, err := pktline.ReadLine(r) + c.Assert(l, Equals, -1) + c.Assert(p, IsNil) + c.Assert(err, ErrorMatches, io.EOF.Error()) +} + +func (s *SuiteReader) TestFlush(c *C) { + var buf bytes.Buffer + err := pktline.WriteFlush(&buf) + c.Assert(err, IsNil) + + l, p, err := pktline.ReadLine(&buf) + c.Assert(l, Equals, pktline.Flush) + c.Assert(p, IsNil) + c.Assert(err, IsNil) + c.Assert(len(p), Equals, 0) +} + +func (s *SuiteReader) TestPktLineTooShort(c *C) { + r := strings.NewReader("010cfoobar") + _, _, err := pktline.ReadLine(r) + c.Assert(err, ErrorMatches, "unexpected EOF") +} + +func (s *SuiteReader) TestScanAndPayload(c *C) { + for i, test := range [...]string{ + "a", + "a\n", + strings.Repeat("a", 100), + strings.Repeat("a", 100) + "\n", + strings.Repeat("\x00", 100), + strings.Repeat("\x00", 100) + "\n", + strings.Repeat("a", pktline.MaxPayloadSize), + strings.Repeat("a", pktline.MaxPayloadSize-1) + "\n", + } { + var buf bytes.Buffer + _, err := pktline.Writef(&buf, test) + c.Assert(err, IsNil, + Commentf("input len=%x, contents=%.10q\n", len(test), test)) + + _, p, err := pktline.ReadLine(&buf) + c.Assert(err, IsNil) + c.Assert(p, NotNil, + Commentf("i = %d, payload = %q, test = %.20q...", i, p, test)) + + c.Assert(p, DeepEquals, []byte(test), + Commentf("in = %.20q out = %.20q", test, string(p))) + } +} + +func (s *SuiteReader) TestSkip(c *C) { + for _, test := range [...]struct { + input []string + n int + expected []byte + }{ + { + input: []string{ + "first", + "second", + "third", + }, + n: 1, + expected: []byte("second"), + }, + { + input: []string{ + "first", + "second", + "third", + }, + n: 2, + expected: []byte("third"), + }, + } { + var buf bytes.Buffer + for _, in := range test.input { + _, err := pktline.Writef(&buf, in) + c.Assert(err, IsNil) + } + + for i := 0; i < test.n; i++ { + _, p, err := pktline.ReadLine(&buf) + c.Assert(p, NotNil, + Commentf("scan error = %s", err)) + } + _, p, err := pktline.ReadLine(&buf) + c.Assert(p, NotNil, + Commentf("scan error = %s", err)) + + c.Assert(p, DeepEquals, test.expected, + Commentf("\nin = %.20q\nout = %.20q\nexp = %.20q", + test.input, p, test.expected)) + } +} + +func (s *SuiteReader) TestEOF(c *C) { + var buf bytes.Buffer + _, err := pktline.Writef(&buf, "first") + c.Assert(err, IsNil) + _, err = pktline.Writef(&buf, "second") + c.Assert(err, IsNil) + + for { + _, _, err = pktline.ReadLine(&buf) + if err == io.EOF { + break + } + } + c.Assert(err, ErrorMatches, "EOF") +} + +type mockSuiteReader struct{} + +func (r *mockSuiteReader) Read([]byte) (int, error) { return 0, errors.New("foo") } + +func (s *SuiteReader) TestInternalReadError(c *C) { + r := &mockSuiteReader{} + _, p, err := pktline.ReadLine(r) + c.Assert(p, IsNil) + c.Assert(err, ErrorMatches, "foo") +} + +// A section are several non flush-pkt lines followed by a flush-pkt, which +// how the git protocol sends long messages. +func (s *SuiteReader) TestReadSomeSections(c *C) { + nSections := 2 + nLines := 4 + data, err := sectionsExample(nSections, nLines) + c.Assert(err, IsNil) + + sectionCounter := 0 + lineCounter := 0 + var ( + p []byte + e error + ) + for { + _, p, e = pktline.ReadLine(data) + if e == io.EOF { + break + } + if len(p) == 0 { + sectionCounter++ + } + lineCounter++ + } + c.Assert(e, ErrorMatches, "EOF") + c.Assert(sectionCounter, Equals, nSections) + c.Assert(lineCounter, Equals, (1+nLines)*nSections) +} + +func (s *SuiteReader) TestPeekReadPacket(c *C) { + var buf bytes.Buffer + _, err := pktline.Writef(&buf, "first") + c.Assert(err, IsNil) + _, err = pktline.Writef(&buf, "second") + c.Assert(err, IsNil) + + sc := bufio.NewReader(&buf) + p, err := sc.Peek(4) + c.Assert(err, IsNil) + c.Assert(p, DeepEquals, []byte("0009")) + + l, p, err := pktline.ReadLine(sc) + c.Assert(err, IsNil) + c.Assert(l, Equals, 9) + c.Assert(p, DeepEquals, []byte("first")) + + p, err = sc.Peek(4) + c.Assert(err, IsNil) + c.Assert(p, DeepEquals, []byte("000a")) +} + +func (s *SuiteReader) TestPeekMultiple(c *C) { + var buf bytes.Buffer + _, err := pktline.WriteString(&buf, "a") + c.Assert(err, IsNil) + + sc := bufio.NewReader(&buf) + b, err := sc.Peek(4) + c.Assert(b, DeepEquals, []byte("0005")) + c.Assert(err, IsNil) + + b, err = sc.Peek(5) + c.Assert(b, DeepEquals, []byte("0005a")) + c.Assert(err, IsNil) +} + +func (s *SuiteReader) TestInvalidPeek(c *C) { + var buf bytes.Buffer + _, err := pktline.WriteString(&buf, "a") + c.Assert(err, IsNil) + c.Assert(err, IsNil) + + sc := bufio.NewReader(&buf) + _, err = sc.Peek(-1) + c.Assert(err, ErrorMatches, bufio.ErrNegativeCount.Error()) +} + +func (s *SuiteReader) TestPeekPacket(c *C) { + var buf bytes.Buffer + _, err := pktline.Writef(&buf, "first") + c.Assert(err, IsNil) + _, err = pktline.Writef(&buf, "second") + c.Assert(err, IsNil) + sc := bufio.NewReader(&buf) + l, p, err := pktline.PeekLine(sc) + c.Assert(err, IsNil) + c.Assert(l, Equals, 9) + c.Assert(p, DeepEquals, []byte("first")) + l, p, err = pktline.PeekLine(sc) + c.Assert(err, IsNil) + c.Assert(l, Equals, 9) + c.Assert(p, DeepEquals, []byte("first")) +} + +func (s *SuiteReader) TestPeekPacketReadPacket(c *C) { + var buf bytes.Buffer + _, err := pktline.WriteString(&buf, "a") + c.Assert(err, IsNil) + + sc := bufio.NewReader(&buf) + l, p, err := pktline.PeekLine(sc) + c.Assert(err, IsNil) + c.Assert(l, Equals, 5) + c.Assert(p, DeepEquals, []byte("a")) + + l, p, err = pktline.ReadLine(sc) + c.Assert(err, IsNil) + c.Assert(l, Equals, 5) + c.Assert(p, DeepEquals, []byte("a")) + + l, p, err = pktline.PeekLine(sc) + c.Assert(err, ErrorMatches, io.EOF.Error()) + c.Assert(l, Equals, -1) + c.Assert(p, IsNil) +} + +func (s *SuiteReader) TestPeekRead(c *C) { + hash := "6ecf0ef2c2dffb796033e5a02219af86ec6584e5" + + var buf bytes.Buffer + _, err := pktline.Writef(&buf, hash) + c.Assert(err, NotNil) + + sc := bufio.NewReader(&buf) + b, err := sc.Peek(7) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("002c6ec")) + + full, err := io.ReadAll(sc) + c.Assert(err, IsNil) + c.Assert(string(full), DeepEquals, "002c"+hash) +} + +func (s *SuiteReader) TestPeekReadPart(c *C) { + hash := "6ecf0ef2c2dffb796033e5a02219af86ec6584e5" + + var buf bytes.Buffer + _, err := pktline.Writef(&buf, hash) + c.Assert(err, NotNil) + + sc := bufio.NewReader(&buf) + b, err := sc.Peek(7) + c.Assert(err, IsNil) + c.Assert(b, DeepEquals, []byte("002c6ec")) + + var part [8]byte + n, err := sc.Read(part[:]) + c.Assert(err, IsNil) + c.Assert(n, Equals, 8) + c.Assert(part[:], DeepEquals, []byte("002c6ecf")) +} + +func (s *SuiteReader) TestReadPacketError(c *C) { + var buf bytes.Buffer + _, err := pktline.WriteError(&buf, io.EOF) + c.Assert(err, NotNil) + + l, p, err := pktline.ReadLine(&buf) + c.Assert(err, NotNil) + c.Assert(l, Equals, 12) + c.Assert(string(p), DeepEquals, "ERR EOF\n") +} + +// returns nSection sections, each of them with nLines pkt-lines (not +// counting the flush-pkt: +// +// 0009 0.0\n +// 0009 0.1\n +// ... +// 0000 +// and so on +func sectionsExample(nSections, nLines int) (*bytes.Buffer, error) { + var buf bytes.Buffer + for section := 0; section < nSections; section++ { + for line := 0; line < nLines; line++ { + line := fmt.Sprintf(" %d.%d\n", section, line) + _, err := pktline.WriteString(&buf, line) + if err != nil { + return nil, err + } + } + if err := pktline.WriteFlush(&buf); err != nil { + return nil, err + } + } + + return &buf, nil +} diff --git a/plumbing/format/pktline/encoder_test.go b/plumbing/format/pktline/pktline_write_test.go similarity index 73% rename from plumbing/format/pktline/encoder_test.go rename to plumbing/format/pktline/pktline_write_test.go index a6addd658..0b0c070fe 100644 --- a/plumbing/format/pktline/encoder_test.go +++ b/plumbing/format/pktline/pktline_write_test.go @@ -3,31 +3,26 @@ package pktline_test import ( "bytes" "strings" - "testing" "github.com/go-git/go-git/v5/plumbing/format/pktline" . "gopkg.in/check.v1" ) -func Test(t *testing.T) { TestingT(t) } +type SuiteWriter struct{} -type SuiteEncoder struct{} +var _ = Suite(&SuiteWriter{}) -var _ = Suite(&SuiteEncoder{}) - -func (s *SuiteEncoder) TestFlush(c *C) { +func (s *SuiteWriter) TestFlush(c *C) { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - err := e.Flush() + err := pktline.WriteFlush(&buf) c.Assert(err, IsNil) obtained := buf.Bytes() - c.Assert(obtained, DeepEquals, pktline.FlushPkt) + c.Assert(obtained, DeepEquals, []byte("0000")) } -func (s *SuiteEncoder) TestEncode(c *C) { +func (s *SuiteWriter) TestEncode(c *C) { for i, test := range [...]struct { input [][]byte expected []byte @@ -40,7 +35,7 @@ func (s *SuiteEncoder) TestEncode(c *C) { }, { input: [][]byte{ []byte("hello\n"), - pktline.Flush, + {}, }, expected: []byte("000ahello\n0000"), }, { @@ -53,10 +48,10 @@ func (s *SuiteEncoder) TestEncode(c *C) { }, { input: [][]byte{ []byte("hello\n"), - pktline.Flush, + {}, []byte("world!\n"), []byte("foo"), - pktline.Flush, + {}, }, expected: []byte("000ahello\n0000000bworld!\n0007foo0000"), }, { @@ -75,19 +70,25 @@ func (s *SuiteEncoder) TestEncode(c *C) { "fff0" + strings.Repeat("b", pktline.MaxPayloadSize)), }, } { - comment := Commentf("input %d = %v\n", i, test.input) + comment := Commentf("input %d = %s\n", i, test.input) var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - err := e.Encode(test.input...) - c.Assert(err, IsNil, comment) - c.Assert(buf.Bytes(), DeepEquals, test.expected, comment) + for _, p := range test.input { + var err error + if len(p) == 0 { + err = pktline.WriteFlush(&buf) + } else { + _, err = pktline.Write(&buf, p) + } + c.Assert(err, IsNil, comment) + } + + c.Assert(buf.String(), DeepEquals, string(test.expected), comment) } } -func (s *SuiteEncoder) TestEncodeErrPayloadTooLong(c *C) { +func (s *SuiteWriter) TestEncodeErrPayloadTooLong(c *C) { for i, input := range [...][][]byte{ { []byte(strings.Repeat("a", pktline.MaxPayloadSize+1)), @@ -105,14 +106,12 @@ func (s *SuiteEncoder) TestEncodeErrPayloadTooLong(c *C) { comment := Commentf("input %d = %v\n", i, input) var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - err := e.Encode(input...) + _, err := pktline.Write(&buf, bytes.Join(input, nil)) c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment) } } -func (s *SuiteEncoder) TestEncodeStrings(c *C) { +func (s *SuiteWriter) TestWritePacketStrings(c *C) { for i, test := range [...]struct { input []string expected []byte @@ -125,7 +124,7 @@ func (s *SuiteEncoder) TestEncodeStrings(c *C) { }, { input: []string{ "hello\n", - pktline.FlushString, + "", }, expected: []byte("000ahello\n0000"), }, { @@ -138,10 +137,10 @@ func (s *SuiteEncoder) TestEncodeStrings(c *C) { }, { input: []string{ "hello\n", - pktline.FlushString, + "", "world!\n", "foo", - pktline.FlushString, + "", }, expected: []byte("000ahello\n0000000bworld!\n0007foo0000"), }, { @@ -163,15 +162,20 @@ func (s *SuiteEncoder) TestEncodeStrings(c *C) { comment := Commentf("input %d = %v\n", i, test.input) var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - err := e.EncodeString(test.input...) - c.Assert(err, IsNil, comment) - c.Assert(buf.Bytes(), DeepEquals, test.expected, comment) + for _, p := range test.input { + var err error + if p == "" { + err = pktline.WriteFlush(&buf) + } else { + _, err = pktline.WriteString(&buf, p) + } + c.Assert(err, IsNil, comment) + } + c.Assert(buf.String(), DeepEquals, string(test.expected), comment) } } -func (s *SuiteEncoder) TestEncodeStringErrPayloadTooLong(c *C) { +func (s *SuiteWriter) TestWritePacketStringErrPayloadTooLong(c *C) { for i, input := range [...][]string{ { strings.Repeat("a", pktline.MaxPayloadSize+1), @@ -189,22 +193,18 @@ func (s *SuiteEncoder) TestEncodeStringErrPayloadTooLong(c *C) { comment := Commentf("input %d = %v\n", i, input) var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - err := e.EncodeString(input...) + _, err := pktline.WriteString(&buf, strings.Join(input, "")) c.Assert(err, Equals, pktline.ErrPayloadTooLong, comment) } } -func (s *SuiteEncoder) TestEncodef(c *C) { +func (s *SuiteWriter) TestFormatString(c *C) { format := " %s %d\n" str := "foo" d := 42 var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - err := e.Encodef(format, str, d) + _, err := pktline.Writef(&buf, format, str, d) c.Assert(err, IsNil) expected := []byte("000c foo 42\n") diff --git a/plumbing/format/pktline/scanner.go b/plumbing/format/pktline/scanner.go index fbb137de0..6226b87b8 100644 --- a/plumbing/format/pktline/scanner.go +++ b/plumbing/format/pktline/scanner.go @@ -1,21 +1,9 @@ package pktline import ( - "bytes" - "errors" "io" - "strings" - - "github.com/go-git/go-git/v5/utils/trace" -) - -const ( - lenSize = 4 ) -// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. -var ErrInvalidPktLen = errors.New("invalid pkt-len found") - // Scanner provides a convenient interface for reading the payloads of a // series of pkt-lines. It takes an io.Reader providing the source, // which then can be tokenized through repeated calls to the Scan @@ -27,10 +15,10 @@ var ErrInvalidPktLen = errors.New("invalid pkt-len found") // // Scanning stops at EOF or the first I/O error. type Scanner struct { - r io.Reader // The reader provided by the client - err error // Sticky error - payload []byte // Last pkt-payload - len [lenSize]byte // Last pkt-len + r io.Reader // The reader provided by the client + err error // Sticky error + buf [MaxPacketSize]byte // Buffer used to read the pktlines + n int // Number of bytes read in the last read } // NewScanner returns a new Scanner to read from r. @@ -51,96 +39,28 @@ func (s *Scanner) Err() error { // will return any error that occurred during scanning, except that if // it was io.EOF, Err will return nil. func (s *Scanner) Scan() bool { - var l int - l, s.err = s.readPayloadLen() - if s.err == io.EOF { - s.err = nil - return false - } - if s.err != nil { - return false - } - - if cap(s.payload) < l { - s.payload = make([]byte, 0, l) - } - - if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { - return false - } - s.payload = s.payload[:l] - trace.Packet.Printf("packet: < %04x %s", l, s.payload) - - if bytes.HasPrefix(s.payload, errPrefix) { - s.err = &ErrorLine{ - Text: strings.TrimSpace(string(s.payload[4:])), - } - return false - } - - return true + s.n, s.err = Read(s.r, s.buf[:]) + return s.err == nil } -// Bytes returns the most recent payload generated by a call to Scan. +// Bytes returns the most recent packet generated by a call to Scan. // The underlying array may point to data that will be overwritten by a // subsequent call to Scan. It does no allocation. func (s *Scanner) Bytes() []byte { - return s.payload -} - -// Method readPayloadLen returns the payload length by reading the -// pkt-len and subtracting the pkt-len size. -func (s *Scanner) readPayloadLen() (int, error) { - if _, err := io.ReadFull(s.r, s.len[:]); err != nil { - if err == io.ErrUnexpectedEOF { - return 0, ErrInvalidPktLen - } - - return 0, err - } - - n, err := hexDecode(s.len) - if err != nil { - return 0, err - } - - switch { - case n == 0: - return 0, nil - case n <= lenSize: - return 0, ErrInvalidPktLen - case n > OversizePayloadMax+lenSize: - return 0, ErrInvalidPktLen - default: - return n - lenSize, nil - } + return s.buf[:s.n] } -// Turns the hexadecimal representation of a number in a byte slice into -// a number. This function substitute strconv.ParseUint(string(buf), 16, -// 16) and/or hex.Decode, to avoid generating new strings, thus helping the -// GC. -func hexDecode(buf [lenSize]byte) (int, error) { - var ret int - for i := 0; i < lenSize; i++ { - n, err := asciiHexToByte(buf[i]) - if err != nil { - return 0, ErrInvalidPktLen - } - ret = 16*ret + int(n) - } - return ret, nil +// Text returns the most recent packet generated by a call to Scan. +func (s *Scanner) Text() string { + return string(s.Bytes()) } -// turns the hexadecimal ascii representation of a byte into its -// numerical value. Example: from 'b' to 11 (0xb). -func asciiHexToByte(b byte) (byte, error) { - switch { - case b >= '0' && b <= '9': - return b - '0', nil - case b >= 'a' && b <= 'f': - return b - 'a' + 10, nil - default: - return 0, ErrInvalidPktLen +// PacketLine returns the most recent packet line read along with its length. +// The underlying array may point to data that will be overwritten by a +// subsequent call to Scan. It does no allocation. +func (s *Scanner) PacketLine() (int, []byte) { + if s.n < PacketLenSize { + return s.n, nil } + return s.n, s.buf[PacketLenSize:s.n] } diff --git a/plumbing/format/pktline/scanner_test.go b/plumbing/format/pktline/scanner_test.go index 60b622407..407f86a0c 100644 --- a/plumbing/format/pktline/scanner_test.go +++ b/plumbing/format/pktline/scanner_test.go @@ -3,8 +3,6 @@ package pktline_test import ( "bytes" "errors" - "fmt" - "io" "strings" "github.com/go-git/go-git/v5/plumbing/format/pktline" @@ -58,8 +56,7 @@ func (s *SuiteScanner) TestEmptyReader(c *C) { func (s *SuiteScanner) TestFlush(c *C) { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.Flush() + err := pktline.WriteFlush(&buf) c.Assert(err, IsNil) sc := pktline.NewScanner(&buf) @@ -90,8 +87,7 @@ func (s *SuiteScanner) TestScanAndPayload(c *C) { strings.Repeat("a", pktline.MaxPayloadSize-1) + "\n", } { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(test) + _, err := pktline.Writef(&buf, test) c.Assert(err, IsNil, Commentf("input len=%x, contents=%.10q\n", len(test), test)) @@ -108,14 +104,15 @@ func (s *SuiteScanner) TestScanAndPayload(c *C) { func (s *SuiteScanner) TestSkip(c *C) { for _, test := range [...]struct { input []string - n int expected []byte + n int }{ { input: []string{ "first", "second", - "third"}, + "third", + }, n: 1, expected: []byte("second"), }, @@ -123,15 +120,17 @@ func (s *SuiteScanner) TestSkip(c *C) { input: []string{ "first", "second", - "third"}, + "third", + }, n: 2, expected: []byte("third"), }, } { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(test.input...) - c.Assert(err, IsNil) + for _, in := range test.input { + _, err := pktline.Writef(&buf, in) + c.Assert(err, IsNil) + } sc := pktline.NewScanner(&buf) for i := 0; i < test.n; i++ { @@ -150,9 +149,10 @@ func (s *SuiteScanner) TestSkip(c *C) { func (s *SuiteScanner) TestEOF(c *C) { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString("first", "second") - c.Assert(err, IsNil) + for _, in := range []string{"first", "second"} { + _, err := pktline.Writef(&buf, in) + c.Assert(err, IsNil) + } sc := pktline.NewScanner(&buf) for sc.Scan() { @@ -175,7 +175,8 @@ func (s *SuiteScanner) TestInternalReadError(c *C) { func (s *SuiteScanner) TestReadSomeSections(c *C) { nSections := 2 nLines := 4 - data := sectionsExample(c, nSections, nLines) + data, err := sectionsExample(nSections, nLines) + c.Assert(err, IsNil) sc := pktline.NewScanner(data) sectionCounter := 0 @@ -190,30 +191,3 @@ func (s *SuiteScanner) TestReadSomeSections(c *C) { c.Assert(sectionCounter, Equals, nSections) c.Assert(lineCounter, Equals, (1+nLines)*nSections) } - -// returns nSection sections, each of them with nLines pkt-lines (not -// counting the flush-pkt: -// -// 0009 0.0\n -// 0009 0.1\n -// ... -// 0000 -// and so on -func sectionsExample(c *C, nSections, nLines int) io.Reader { - var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - - for section := 0; section < nSections; section++ { - ss := []string{} - for line := 0; line < nLines; line++ { - line := fmt.Sprintf(" %d.%d\n", section, line) - ss = append(ss, line) - } - err := e.EncodeString(ss...) - c.Assert(err, IsNil) - err = e.Flush() - c.Assert(err, IsNil) - } - - return &buf -} diff --git a/plumbing/format/pktline/sync.go b/plumbing/format/pktline/sync.go new file mode 100644 index 000000000..8ebcfb90c --- /dev/null +++ b/plumbing/format/pktline/sync.go @@ -0,0 +1,25 @@ +package pktline + +import "sync" + +var byteSlice = sync.Pool{ + New: func() interface{} { + var b [MaxPacketSize]byte + return &b + }, +} + +// GetPacketBuffer returns a *[MaxPacketSize]byte that is managed by a +// sync.Pool. The initial slice length will be 65520 (65kb). +// +// After use, the *[MaxPacketSize]byte should be put back into the sync.Pool by +// calling PutByteSlice. +func GetPacketBuffer() *[MaxPacketSize]byte { + buf := byteSlice.Get().(*[MaxPacketSize]byte) + return buf +} + +// PutPacketBuffer puts buf back into its sync.Pool. +func PutPacketBuffer(buf *[MaxPacketSize]byte) { + byteSlice.Put(buf) +} diff --git a/plumbing/protocol/packp/advrefs_decode.go b/plumbing/protocol/packp/advrefs_decode.go index f8d26a28e..c45ecf76f 100644 --- a/plumbing/protocol/packp/advrefs_decode.go +++ b/plumbing/protocol/packp/advrefs_decode.go @@ -19,12 +19,12 @@ func (a *AdvRefs) Decode(r io.Reader) error { } type advRefsDecoder struct { - s *pktline.Scanner // a pkt-line scanner from the input stream - line []byte // current pkt-line contents, use parser.nextLine() to make it advance - nLine int // current pkt-line number for debugging, begins at 1 - hash plumbing.Hash // last hash read - err error // sticky error, use the parser.error() method to fill this out - data *AdvRefs // parsed data is stored here + s io.Reader // a pkt-line reader from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + hash plumbing.Hash // last hash read + err error // sticky error, use the parser.error() method to fill this out + data *AdvRefs // parsed data is stored here } var ( @@ -37,7 +37,7 @@ var ( func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { return &advRefsDecoder{ - s: pktline.NewScanner(r), + s: r, } } @@ -70,8 +70,10 @@ func (d *advRefsDecoder) error(format string, a ...interface{}) { func (d *advRefsDecoder) nextLine() bool { d.nLine++ - if !d.s.Scan() { - if d.err = d.s.Err(); d.err != nil { + _, p, err := pktline.ReadLine(d.s) + if err != nil { + if !errors.Is(err, io.EOF) { + d.err = err return false } @@ -84,7 +86,7 @@ func (d *advRefsDecoder) nextLine() bool { return false } - d.line = d.s.Bytes() + d.line = p d.line = bytes.TrimSuffix(d.line, eol) return true @@ -111,7 +113,7 @@ func decodePrefix(d *advRefsDecoder) decoderStateFn { return decodeFirstHash } - d.data.Prefix = append(d.data.Prefix, pktline.Flush) + d.data.Prefix = append(d.data.Prefix, []byte{}) // empty slice for flush-pkt if ok := d.nextLine(); !ok { return nil } diff --git a/plumbing/protocol/packp/advrefs_decode_test.go b/plumbing/protocol/packp/advrefs_decode_test.go index d1271450e..7e4a01629 100644 --- a/plumbing/protocol/packp/advrefs_decode_test.go +++ b/plumbing/protocol/packp/advrefs_decode_test.go @@ -24,18 +24,16 @@ func (s *AdvRefsDecodeSuite) TestEmpty(c *C) { func (s *AdvRefsDecodeSuite) TestEmptyFlush(c *C) { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - e.Flush() + pktline.WriteFlush(&buf) ar := NewAdvRefs() c.Assert(ar.Decode(&buf), Equals, ErrEmptyAdvRefs) } func (s *AdvRefsDecodeSuite) TestEmptyPrefixFlush(c *C) { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - e.EncodeString("# service=git-upload-pack") - e.Flush() - e.Flush() + pktline.WriteString(&buf, "# service=git-upload-pack") + pktline.WriteFlush(&buf) + pktline.WriteFlush(&buf) ar := NewAdvRefs() c.Assert(ar.Decode(&buf), Equals, ErrEmptyAdvRefs) } @@ -43,7 +41,7 @@ func (s *AdvRefsDecodeSuite) TestEmptyPrefixFlush(c *C) { func (s *AdvRefsDecodeSuite) TestShortForHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*too short.*") @@ -57,7 +55,7 @@ func (s *AdvRefsDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, patt func (s *AdvRefsDecodeSuite) TestInvalidFirstHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796alberto2219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid hash.*") @@ -66,7 +64,7 @@ func (s *AdvRefsDecodeSuite) TestInvalidFirstHash(c *C) { func (s *AdvRefsDecodeSuite) TestZeroId(c *C) { payloads := []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack thin-pack\n", - pktline.FlushString, + "", } ar := s.testDecodeOK(c, payloads) c.Assert(ar.Head, IsNil) @@ -74,9 +72,14 @@ func (s *AdvRefsDecodeSuite) TestZeroId(c *C) { func (s *AdvRefsDecodeSuite) testDecodeOK(c *C, payloads []string) *AdvRefs { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(payloads...) - c.Assert(err, IsNil) + for _, p := range payloads { + if p == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, p) + c.Assert(err, IsNil) + } + } ar := NewAdvRefs() c.Assert(ar.Decode(&buf), IsNil) @@ -87,7 +90,7 @@ func (s *AdvRefsDecodeSuite) testDecodeOK(c *C, payloads []string) *AdvRefs { func (s *AdvRefsDecodeSuite) TestMalformedZeroId(c *C) { payloads := []string{ "0000000000000000000000000000000000000000 wrong\x00multi_ack thin-pack\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed zero-id.*") @@ -96,7 +99,7 @@ func (s *AdvRefsDecodeSuite) TestMalformedZeroId(c *C) { func (s *AdvRefsDecodeSuite) TestShortZeroId(c *C) { payloads := []string{ "0000000000000000000000000000000000000000 capabi", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*too short zero-id.*") @@ -105,7 +108,7 @@ func (s *AdvRefsDecodeSuite) TestShortZeroId(c *C) { func (s *AdvRefsDecodeSuite) TestHead(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", - pktline.FlushString, + "", } ar := s.testDecodeOK(c, payloads) c.Assert(*ar.Head, Equals, @@ -115,7 +118,7 @@ func (s *AdvRefsDecodeSuite) TestHead(c *C) { func (s *AdvRefsDecodeSuite) TestFirstIsNotHead(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\x00", - pktline.FlushString, + "", } ar := s.testDecodeOK(c, payloads) c.Assert(ar.Head, IsNil) @@ -126,7 +129,7 @@ func (s *AdvRefsDecodeSuite) TestFirstIsNotHead(c *C) { func (s *AdvRefsDecodeSuite) TestShortRef(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 H", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*too short.*") @@ -135,7 +138,7 @@ func (s *AdvRefsDecodeSuite) TestShortRef(c *C) { func (s *AdvRefsDecodeSuite) TestNoNULL(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEADofs-delta multi_ack", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*NULL not found.*") @@ -144,7 +147,7 @@ func (s *AdvRefsDecodeSuite) TestNoNULL(c *C) { func (s *AdvRefsDecodeSuite) TestNoSpaceAfterHash(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5-HEAD\x00", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*no space after hash.*") @@ -153,7 +156,7 @@ func (s *AdvRefsDecodeSuite) TestNoSpaceAfterHash(c *C) { func (s *AdvRefsDecodeSuite) TestNoCaps(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", - pktline.FlushString, + "", } ar := s.testDecodeOK(c, payloads) c.Assert(ar.Capabilities.IsEmpty(), Equals, true) @@ -171,19 +174,19 @@ func (s *AdvRefsDecodeSuite) TestCaps(c *C) { }{{ input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00", - pktline.FlushString, + "", }, capabilities: []entry{}, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n", - pktline.FlushString, + "", }, capabilities: []entry{}, }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta", - pktline.FlushString, + "", }, capabilities: []entry{ { @@ -194,7 +197,7 @@ func (s *AdvRefsDecodeSuite) TestCaps(c *C) { }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack", - pktline.FlushString, + "", }, capabilities: []entry{ {Name: capability.OFSDelta, Values: []string(nil)}, @@ -203,7 +206,7 @@ func (s *AdvRefsDecodeSuite) TestCaps(c *C) { }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta multi_ack\n", - pktline.FlushString, + "", }, capabilities: []entry{ {Name: capability.OFSDelta, Values: []string(nil)}, @@ -212,7 +215,7 @@ func (s *AdvRefsDecodeSuite) TestCaps(c *C) { }, { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:refs/heads/master agent=foo=bar\n", - pktline.FlushString, + "", }, capabilities: []entry{ {Name: capability.SymRef, Values: []string{"HEAD:refs/heads/master"}}, @@ -221,7 +224,7 @@ func (s *AdvRefsDecodeSuite) TestCaps(c *C) { }, { input: []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00report-status report-status-v2 delete-refs side-band-64k quiet atomic ofs-delta object-format=sha1 agent=git/2.41.0\n", - pktline.FlushString, + "", }, capabilities: []entry{ {Name: capability.ReportStatus, Values: []string(nil)}, @@ -243,7 +246,7 @@ func (s *AdvRefsDecodeSuite) TestWithPrefix(c *C) { payloads := []string{ "# this is a prefix\n", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta\n", - pktline.FlushString, + "", } ar := s.testDecodeOK(c, payloads) c.Assert(len(ar.Prefix), Equals, 1) @@ -253,14 +256,14 @@ func (s *AdvRefsDecodeSuite) TestWithPrefix(c *C) { func (s *AdvRefsDecodeSuite) TestWithPrefixAndFlush(c *C) { payloads := []string{ "# this is a prefix\n", - pktline.FlushString, + "", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta\n", - pktline.FlushString, + "", } ar := s.testDecodeOK(c, payloads) c.Assert(len(ar.Prefix), Equals, 2) c.Assert(ar.Prefix[0], DeepEquals, []byte("# this is a prefix")) - c.Assert(ar.Prefix[1], DeepEquals, []byte(pktline.FlushString)) + c.Assert(ar.Prefix[1], DeepEquals, []byte("")) } func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { @@ -271,7 +274,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { }{{ input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", - pktline.FlushString, + "", }, references: make(map[string]plumbing.Hash), peeled: make(map[string]plumbing.Hash), @@ -279,7 +282,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo", - pktline.FlushString, + "", }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), @@ -289,7 +292,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo\n", - pktline.FlushString, + "", }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), @@ -300,7 +303,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo\n", "2222222222222222222222222222222222222222 ref/bar", - pktline.FlushString, + "", }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), @@ -311,7 +314,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { input: []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo^{}\n", - pktline.FlushString, + "", }, references: make(map[string]plumbing.Hash), peeled: map[string]plumbing.Hash{ @@ -322,7 +325,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00ofs-delta symref=HEAD:/refs/heads/master\n", "1111111111111111111111111111111111111111 ref/foo\n", "2222222222222222222222222222222222222222 ref/bar^{}", - pktline.FlushString, + "", }, references: map[string]plumbing.Hash{ "ref/foo": plumbing.NewHash("1111111111111111111111111111111111111111"), @@ -342,7 +345,7 @@ func (s *AdvRefsDecodeSuite) TestOtherRefs(c *C) { "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11^{}\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", - pktline.FlushString, + "", }, references: map[string]plumbing.Hash{ "refs/heads/master": plumbing.NewHash("a6930aaee06755d1bdcfd943fbf614e4d92bb0c7"), @@ -369,7 +372,7 @@ func (s *AdvRefsDecodeSuite) TestMalformedOtherRefsNoSpace(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8crefs/tags/v2.6.11\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed ref data.*") @@ -379,7 +382,7 @@ func (s *AdvRefsDecodeSuite) TestMalformedOtherRefsMultipleSpaces(c *C) { payloads := []string{ "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack thin-pack\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags v2.6.11\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed ref data.*") @@ -395,7 +398,7 @@ func (s *AdvRefsDecodeSuite) TestShallow(c *C) { "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", - pktline.FlushString, + "", }, shallows: []plumbing.Hash{}, }, { @@ -405,7 +408,7 @@ func (s *AdvRefsDecodeSuite) TestShallow(c *C) { "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", - pktline.FlushString, + "", }, shallows: []plumbing.Hash{plumbing.NewHash("1111111111111111111111111111111111111111")}, }, { @@ -416,7 +419,7 @@ func (s *AdvRefsDecodeSuite) TestShallow(c *C) { "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", }, shallows: []plumbing.Hash{ plumbing.NewHash("1111111111111111111111111111111111111111"), @@ -437,7 +440,7 @@ func (s *AdvRefsDecodeSuite) TestInvalidShallowHash(c *C) { "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 11111111alcortes111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid hash text.*") @@ -452,7 +455,7 @@ func (s *AdvRefsDecodeSuite) TestGarbageAfterShallow(c *C) { "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", "b5be40b90dbaa6bd337f3b77de361bfc0723468b refs/tags/v4.4", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed shallow prefix.*") @@ -466,7 +469,7 @@ func (s *AdvRefsDecodeSuite) TestMalformedShallowHash(c *C) { "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222 malformed\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed shallow hash.*") diff --git a/plumbing/protocol/packp/advrefs_encode.go b/plumbing/protocol/packp/advrefs_encode.go index fb9bd883f..8b5cd06f2 100644 --- a/plumbing/protocol/packp/advrefs_encode.go +++ b/plumbing/protocol/packp/advrefs_encode.go @@ -1,7 +1,6 @@ package packp import ( - "bytes" "fmt" "io" "sort" @@ -22,18 +21,17 @@ func (a *AdvRefs) Encode(w io.Writer) error { } type advRefsEncoder struct { - data *AdvRefs // data to encode - pe *pktline.Encoder // where to write the encoded data - firstRefName string // reference name to encode in the first pkt-line (HEAD if present) - firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) - sortedRefs []string // hash references to encode ordered by increasing order - err error // sticky error - + data *AdvRefs // data to encode + w io.Writer // where to write the encoded data + firstRefName string // reference name to encode in the first pkt-line (HEAD if present) + firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) + sortedRefs []string // hash references to encode ordered by increasing order + err error // sticky error } func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { return &advRefsEncoder{ - pe: pktline.NewEncoder(w), + w: w, } } @@ -79,13 +77,13 @@ type encoderStateFn func(*advRefsEncoder) encoderStateFn func encodePrefix(e *advRefsEncoder) encoderStateFn { for _, p := range e.data.Prefix { - if bytes.Equal(p, pktline.Flush) { - if e.err = e.pe.Flush(); e.err != nil { + if len(p) == 0 { + if e.err = pktline.WriteFlush(e.w); e.err != nil { return nil } continue } - if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { + if _, e.err = pktline.WriteString(e.w, string(p)+"\n"); e.err != nil { return nil } } @@ -107,10 +105,9 @@ func encodeFirstLine(e *advRefsEncoder) encoderStateFn { firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) } else { firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) - } - if e.err = e.pe.EncodeString(firstLine); e.err != nil { + if _, e.err = pktline.WriteString(e.w, firstLine); e.err != nil { return nil } @@ -134,12 +131,12 @@ func encodeRefs(e *advRefsEncoder) encoderStateFn { } hash := e.data.References[r] - if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { + if _, e.err = pktline.Writef(e.w, "%s %s\n", hash.String(), r); e.err != nil { return nil } if hash, ok := e.data.Peeled[r]; ok { - if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { + if _, e.err = pktline.Writef(e.w, "%s %s^{}\n", hash.String(), r); e.err != nil { return nil } } @@ -152,7 +149,7 @@ func encodeRefs(e *advRefsEncoder) encoderStateFn { func encodeShallow(e *advRefsEncoder) encoderStateFn { sorted := sortShallows(e.data.Shallows) for _, hash := range sorted { - if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { + if _, e.err = pktline.Writef(e.w, "shallow %s\n", hash); e.err != nil { return nil } } @@ -171,6 +168,6 @@ func sortShallows(c []plumbing.Hash) []string { } func encodeFlush(e *advRefsEncoder) encoderStateFn { - e.err = e.pe.Flush() + e.err = pktline.WriteFlush(e.w) return nil } diff --git a/plumbing/protocol/packp/advrefs_encode_test.go b/plumbing/protocol/packp/advrefs_encode_test.go index a01e83341..b8f845749 100644 --- a/plumbing/protocol/packp/advrefs_encode_test.go +++ b/plumbing/protocol/packp/advrefs_encode_test.go @@ -30,7 +30,7 @@ func (s *AdvRefsEncodeSuite) TestZeroValue(c *C) { expected := pktlines(c, "0000000000000000000000000000000000000000 capabilities^{}\x00\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -44,7 +44,7 @@ func (s *AdvRefsEncodeSuite) TestHead(c *C) { expected := pktlines(c, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -61,7 +61,7 @@ func (s *AdvRefsEncodeSuite) TestCapsNoHead(c *C) { expected := pktlines(c, "0000000000000000000000000000000000000000 capabilities^{}\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -80,7 +80,7 @@ func (s *AdvRefsEncodeSuite) TestCapsWithHead(c *C) { expected := pktlines(c, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00multi_ack ofs-delta symref=HEAD:/refs/heads/master\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -104,7 +104,7 @@ func (s *AdvRefsEncodeSuite) TestRefs(c *C) { "1111111111111111111111111111111111111111 refs/tags/v2.6.12-tree\n", "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -135,7 +135,7 @@ func (s *AdvRefsEncodeSuite) TestPeeled(c *C) { "2222222222222222222222222222222222222222 refs/tags/v2.6.13-tree\n", "3333333333333333333333333333333333333333 refs/tags/v2.7.13-tree\n", "4444444444444444444444444444444444444444 refs/tags/v2.7.13-tree^{}\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -158,7 +158,7 @@ func (s *AdvRefsEncodeSuite) TestShallow(c *C) { "shallow 2222222222222222222222222222222222222222\n", "shallow 3333333333333333333333333333333333333333\n", "shallow 4444444444444444444444444444444444444444\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) @@ -213,7 +213,7 @@ func (s *AdvRefsEncodeSuite) TestAll(c *C) { "shallow 2222222222222222222222222222222222222222\n", "shallow 3333333333333333333333333333333333333333\n", "shallow 4444444444444444444444444444444444444444\n", - pktline.FlushString, + "", ) testEncode(c, ar, expected) diff --git a/plumbing/protocol/packp/advrefs_test.go b/plumbing/protocol/packp/advrefs_test.go index 1b8db981c..354314655 100644 --- a/plumbing/protocol/packp/advrefs_test.go +++ b/plumbing/protocol/packp/advrefs_test.go @@ -160,22 +160,31 @@ type AdvRefsDecodeEncodeSuite struct{} var _ = Suite(&AdvRefsDecodeEncodeSuite{}) func (s *AdvRefsDecodeEncodeSuite) test(c *C, in []string, exp []string, isEmpty bool) { - var err error var input io.Reader { var buf bytes.Buffer - p := pktline.NewEncoder(&buf) - err = p.EncodeString(in...) - c.Assert(err, IsNil) + for _, l := range in { + if l == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, l) + c.Assert(err, IsNil) + } + } input = &buf } var expected []byte { var buf bytes.Buffer - p := pktline.NewEncoder(&buf) - err = p.EncodeString(exp...) - c.Assert(err, IsNil) + for _, l := range exp { + if l == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, l) + c.Assert(err, IsNil) + } + } expected = buf.Bytes() } @@ -198,12 +207,12 @@ func (s *AdvRefsDecodeEncodeSuite) test(c *C, in []string, exp []string, isEmpty func (s *AdvRefsDecodeEncodeSuite) TestNoHead(c *C) { input := []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00", - pktline.FlushString, + "", } expected := []string{ "0000000000000000000000000000000000000000 capabilities^{}\x00\n", - pktline.FlushString, + "", } s.test(c, input, expected, true) @@ -213,13 +222,13 @@ func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmart(c *C) { input := []string{ "# service=git-upload-pack\n", "0000000000000000000000000000000000000000 capabilities^{}\x00", - pktline.FlushString, + "", } expected := []string{ "# service=git-upload-pack\n", "0000000000000000000000000000000000000000 capabilities^{}\x00\n", - pktline.FlushString, + "", } s.test(c, input, expected, true) @@ -228,16 +237,16 @@ func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmart(c *C) { func (s *AdvRefsDecodeEncodeSuite) TestNoHeadSmartBug(c *C) { input := []string{ "# service=git-upload-pack\n", - pktline.FlushString, + "", "0000000000000000000000000000000000000000 capabilities^{}\x00\n", - pktline.FlushString, + "", } expected := []string{ "# service=git-upload-pack\n", - pktline.FlushString, + "", "0000000000000000000000000000000000000000 capabilities^{}\x00\n", - pktline.FlushString, + "", } s.test(c, input, expected, true) @@ -249,7 +258,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestRefs(c *C) { "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree", - pktline.FlushString, + "", } expected := []string{ @@ -257,7 +266,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestRefs(c *C) { "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", - pktline.FlushString, + "", } s.test(c, input, expected, false) @@ -271,7 +280,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestPeeled(c *C) { "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree", "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", - pktline.FlushString, + "", } expected := []string{ @@ -281,7 +290,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestPeeled(c *C) { "c39ae07f393806ccf406ef966e9a15afc43cc36a refs/tags/v2.6.11-tree^{}\n", "7777777777777777777777777777777777777777 refs/tags/v2.6.12-tree\n", "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", - pktline.FlushString, + "", } s.test(c, input, expected, false) @@ -297,7 +306,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestAll(c *C) { "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}", "shallow 1111111111111111111111111111111111111111", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } expected := []string{ @@ -309,7 +318,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestAll(c *C) { "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } s.test(c, input, expected, false) @@ -318,7 +327,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestAll(c *C) { func (s *AdvRefsDecodeEncodeSuite) TestAllSmart(c *C) { input := []string{ "# service=git-upload-pack\n", - pktline.FlushString, + "", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", @@ -327,12 +336,12 @@ func (s *AdvRefsDecodeEncodeSuite) TestAllSmart(c *C) { "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } expected := []string{ "# service=git-upload-pack\n", - pktline.FlushString, + "", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", @@ -341,7 +350,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestAllSmart(c *C) { "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } s.test(c, input, expected, false) @@ -350,7 +359,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestAllSmart(c *C) { func (s *AdvRefsDecodeEncodeSuite) TestAllSmartBug(c *C) { input := []string{ "# service=git-upload-pack\n", - pktline.FlushString, + "", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", @@ -359,12 +368,12 @@ func (s *AdvRefsDecodeEncodeSuite) TestAllSmartBug(c *C) { "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } expected := []string{ "# service=git-upload-pack\n", - pktline.FlushString, + "", "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 HEAD\x00symref=HEAD:/refs/heads/master ofs-delta multi_ack\n", "a6930aaee06755d1bdcfd943fbf614e4d92bb0c7 refs/heads/master\n", "5dc01c595e6c6ec9ccda4f6f69c131c0dd945f8c refs/tags/v2.6.11-tree\n", @@ -373,7 +382,7 @@ func (s *AdvRefsDecodeEncodeSuite) TestAllSmartBug(c *C) { "8888888888888888888888888888888888888888 refs/tags/v2.6.12-tree^{}\n", "shallow 1111111111111111111111111111111111111111\n", "shallow 2222222222222222222222222222222222222222\n", - pktline.FlushString, + "", } s.test(c, input, expected, false) diff --git a/plumbing/protocol/packp/common_test.go b/plumbing/protocol/packp/common_test.go index 7989388c8..807bb9d32 100644 --- a/plumbing/protocol/packp/common_test.go +++ b/plumbing/protocol/packp/common_test.go @@ -15,19 +15,30 @@ func Test(t *testing.T) { TestingT(t) } // returns a byte slice with the pkt-lines for the given payloads. func pktlines(c *C, payloads ...string) []byte { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(payloads...) - c.Assert(err, IsNil, Commentf("building pktlines for %v\n", payloads)) + comment := Commentf("building pktlines for %v\n", payloads) + for _, p := range payloads { + if p == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil, comment) + } else { + _, err := pktline.WriteString(&buf, p) + c.Assert(err, IsNil, comment) + } + } return buf.Bytes() } func toPktLines(c *C, payloads []string) io.Reader { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(payloads...) - c.Assert(err, IsNil) + for _, p := range payloads { + if p == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, p) + c.Assert(err, IsNil) + } + } return &buf } diff --git a/plumbing/protocol/packp/gitproto.go b/plumbing/protocol/packp/gitproto.go index 0b7ff8f82..6cc524c4a 100644 --- a/plumbing/protocol/packp/gitproto.go +++ b/plumbing/protocol/packp/gitproto.go @@ -1,6 +1,7 @@ package packp import ( + "errors" "fmt" "io" "strings" @@ -8,11 +9,9 @@ import ( "github.com/go-git/go-git/v5/plumbing/format/pktline" ) -var ( - // ErrInvalidGitProtoRequest is returned by Decode if the input is not a - // valid git protocol request. - ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request") -) +// ErrInvalidGitProtoRequest is returned by Decode if the input is not a +// valid git protocol request. +var ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request") // GitProtoRequest is a command request for the git protocol. // It is used to send the command, endpoint, and extra parameters to the @@ -52,7 +51,6 @@ func (g *GitProtoRequest) Encode(w io.Writer) error { return err } - p := pktline.NewEncoder(w) req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname) if host := g.Host; host != "" { req += fmt.Sprintf("host=%s\x00", host) @@ -65,7 +63,7 @@ func (g *GitProtoRequest) Encode(w io.Writer) error { } } - if err := p.Encode([]byte(req)); err != nil { + if _, err := pktline.Writef(w, req); err != nil { return err } @@ -74,16 +72,15 @@ func (g *GitProtoRequest) Encode(w io.Writer) error { // Decode decodes the request from the reader. func (g *GitProtoRequest) Decode(r io.Reader) error { - s := pktline.NewScanner(r) - if !s.Scan() { - err := s.Err() - if err == nil { - return ErrInvalidGitProtoRequest - } + _, p, err := pktline.ReadLine(r) + if errors.Is(err, io.EOF) { + return ErrInvalidGitProtoRequest + } + if err != nil { return err } - line := string(s.Bytes()) + line := string(p) if len(line) == 0 { return io.EOF } diff --git a/plumbing/protocol/packp/report_status.go b/plumbing/protocol/packp/report_status.go index e2a0a108b..938b697b5 100644 --- a/plumbing/protocol/packp/report_status.go +++ b/plumbing/protocol/packp/report_status.go @@ -2,6 +2,7 @@ package packp import ( "bytes" + "errors" "fmt" "io" "strings" @@ -43,8 +44,7 @@ func (s *ReportStatus) Error() error { // Encode writes the report status to a writer. func (s *ReportStatus) Encode(w io.Writer) error { - e := pktline.NewEncoder(w) - if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { + if _, err := pktline.Writef(w, "unpack %s\n", s.UnpackStatus); err != nil { return err } @@ -54,25 +54,30 @@ func (s *ReportStatus) Encode(w io.Writer) error { } } - return e.Flush() + return pktline.WriteFlush(w) } // Decode reads from the given reader and decodes a report-status message. It // does not read more input than what is needed to fill the report status. func (s *ReportStatus) Decode(r io.Reader) error { - scan := pktline.NewScanner(r) - if err := s.scanFirstLine(scan); err != nil { + b, err := s.scanFirstLine(r) + if err != nil { return err } - if err := s.decodeReportStatus(scan.Bytes()); err != nil { + if err := s.decodeReportStatus(b); err != nil { return err } + var l int flushed := false - for scan.Scan() { - b := scan.Bytes() - if isFlush(b) { + for { + l, b, err = pktline.ReadLine(r) + if err != nil { + break + } + + if l == pktline.Flush { flushed = true break } @@ -86,19 +91,23 @@ func (s *ReportStatus) Decode(r io.Reader) error { return fmt.Errorf("missing flush") } - return scan.Err() + if err != nil && !errors.Is(err, io.EOF) { + return err + } + + return nil } -func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { - if scan.Scan() { - return nil +func (s *ReportStatus) scanFirstLine(r io.Reader) ([]byte, error) { + _, p, err := pktline.ReadLine(r) + if errors.Is(err, io.EOF) { + return p, io.ErrUnexpectedEOF } - - if scan.Err() != nil { - return scan.Err() + if err != nil { + return nil, err } - return io.ErrUnexpectedEOF + return p, nil } func (s *ReportStatus) decodeReportStatus(b []byte) error { @@ -156,10 +165,11 @@ func (s *CommandStatus) Error() error { } func (s *CommandStatus) encode(w io.Writer) error { - e := pktline.NewEncoder(w) if s.Error() == nil { - return e.Encodef("ok %s\n", s.ReferenceName.String()) + _, err := pktline.Writef(w, "ok %s\n", s.ReferenceName.String()) + return err } - return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) + _, err := pktline.Writef(w, "ng %s %s\n", s.ReferenceName.String(), s.Status) + return err } diff --git a/plumbing/protocol/packp/report_status_test.go b/plumbing/protocol/packp/report_status_test.go index 32b9e5b80..8ba29be4b 100644 --- a/plumbing/protocol/packp/report_status_test.go +++ b/plumbing/protocol/packp/report_status_test.go @@ -4,7 +4,6 @@ import ( "bytes" "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" . "gopkg.in/check.v1" ) @@ -74,7 +73,7 @@ func (s *ReportStatusSuite) TestEncodeDecodeOkOneReference(c *C) { s.testEncodeDecodeOk(c, rs, "unpack ok\n", "ok refs/heads/master\n", - pktline.FlushString, + "", ) } @@ -89,7 +88,7 @@ func (s *ReportStatusSuite) TestEncodeDecodeOkOneReferenceFailed(c *C) { s.testEncodeDecodeOk(c, rs, "unpack my error\n", "ng refs/heads/master command error\n", - pktline.FlushString, + "", ) } @@ -112,7 +111,7 @@ func (s *ReportStatusSuite) TestEncodeDecodeOkMoreReferences(c *C) { "ok refs/heads/master\n", "ok refs/heads/a\n", "ok refs/heads/b\n", - pktline.FlushString, + "", ) } @@ -135,7 +134,7 @@ func (s *ReportStatusSuite) TestEncodeDecodeOkMoreReferencesFailed(c *C) { "ok refs/heads/master\n", "ng refs/heads/a command error\n", "ok refs/heads/b\n", - pktline.FlushString, + "", ) } @@ -145,7 +144,7 @@ func (s *ReportStatusSuite) TestEncodeDecodeOkNoReferences(c *C) { s.testEncodeDecodeOk(c, expected, "unpack ok\n", - pktline.FlushString, + "", ) } @@ -155,7 +154,7 @@ func (s *ReportStatusSuite) TestEncodeDecodeOkNoReferencesFailed(c *C) { s.testEncodeDecodeOk(c, rs, "unpack my error\n", - pktline.FlushString, + "", ) } @@ -194,7 +193,7 @@ func (s *ReportStatusSuite) TestDecodeErrorMalformed(c *C) { s.testDecodeError(c, "malformed unpack status: unpackok", "unpackok\n", - pktline.FlushString, + "", ) } @@ -208,7 +207,7 @@ func (s *ReportStatusSuite) TestDecodeErrorMalformed2(c *C) { s.testDecodeError(c, "malformed unpack status: UNPACK OK", "UNPACK OK\n", - pktline.FlushString, + "", ) } @@ -223,7 +222,7 @@ func (s *ReportStatusSuite) TestDecodeErrorMalformedCommandStatus(c *C) { s.testDecodeError(c, "malformed command status: ko refs/heads/master", "unpack ok\n", "ko refs/heads/master\n", - pktline.FlushString, + "", ) } @@ -238,7 +237,7 @@ func (s *ReportStatusSuite) TestDecodeErrorMalformedCommandStatus2(c *C) { s.testDecodeError(c, "malformed command status: ng refs/heads/master", "unpack ok\n", "ng refs/heads/master\n", - pktline.FlushString, + "", ) } @@ -251,6 +250,6 @@ func (s *ReportStatusSuite) TestDecodeErrorPrematureFlush(c *C) { }} s.testDecodeError(c, "premature flush", - pktline.FlushString, + "", ) } diff --git a/plumbing/protocol/packp/shallowupd.go b/plumbing/protocol/packp/shallowupd.go index fe4fe6887..f187f98b1 100644 --- a/plumbing/protocol/packp/shallowupd.go +++ b/plumbing/protocol/packp/shallowupd.go @@ -20,19 +20,23 @@ type ShallowUpdate struct { } func (r *ShallowUpdate) Decode(reader io.Reader) error { - s := pktline.NewScanner(reader) - - for s.Scan() { - line := s.Bytes() - line = bytes.TrimSpace(line) + var ( + p []byte + err error + ) + for { + _, p, err = pktline.ReadLine(reader) + if err != nil { + break + } - var err error + line := bytes.TrimSpace(p) switch { case bytes.HasPrefix(line, shallow): err = r.decodeShallowLine(line) case bytes.HasPrefix(line, unshallow): err = r.decodeUnshallowLine(line) - case bytes.Equal(line, pktline.Flush): + case len(line) == 0: return nil } @@ -41,7 +45,11 @@ func (r *ShallowUpdate) Decode(reader io.Reader) error { } } - return s.Err() + if err != nil && err != io.EOF { + return err + } + + return nil } func (r *ShallowUpdate) decodeShallowLine(line []byte) error { @@ -74,19 +82,17 @@ func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Ha } func (r *ShallowUpdate) Encode(w io.Writer) error { - e := pktline.NewEncoder(w) - for _, h := range r.Shallows { - if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { + if _, err := pktline.Writef(w, "%s%s\n", shallow, h.String()); err != nil { return err } } for _, h := range r.Unshallows { - if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { + if _, err := pktline.Writef(w, "%s%s\n", unshallow, h.String()); err != nil { return err } } - return e.Flush() + return pktline.WriteFlush(w) } diff --git a/plumbing/protocol/packp/sideband/demux.go b/plumbing/protocol/packp/sideband/demux.go index 0116f962e..624bf02b5 100644 --- a/plumbing/protocol/packp/sideband/demux.go +++ b/plumbing/protocol/packp/sideband/demux.go @@ -33,7 +33,6 @@ type Progress interface { type Demuxer struct { t Type r io.Reader - s *pktline.Scanner max int pending []byte @@ -53,7 +52,6 @@ func NewDemuxer(t Type, r io.Reader) *Demuxer { t: t, r: r, max: max, - s: pktline.NewScanner(r), } } @@ -102,15 +100,12 @@ func (d *Demuxer) nextPackData() ([]byte, error) { return content, nil } - if !d.s.Scan() { - if err := d.s.Err(); err != nil { - return nil, err - } - - return nil, io.EOF + _, p, err := pktline.ReadLine(d.r) + if err != nil { + return nil, err } - content = d.s.Bytes() + content = p size := len(content) if size == 0 { diff --git a/plumbing/protocol/packp/sideband/demux_test.go b/plumbing/protocol/packp/sideband/demux_test.go index 8f233538c..99829513e 100644 --- a/plumbing/protocol/packp/sideband/demux_test.go +++ b/plumbing/protocol/packp/sideband/demux_test.go @@ -21,11 +21,10 @@ func (s *SidebandSuite) TestDecode(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - e.Encode(PackData.WithPayload(expected[0:8])) - e.Encode(ProgressMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) - e.Encode(PackData.WithPayload(expected[8:16])) - e.Encode(PackData.WithPayload(expected[16:26])) + pktline.Write(buf, PackData.WithPayload(expected[0:8])) + pktline.Write(buf, ProgressMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) + pktline.Write(buf, PackData.WithPayload(expected[8:16])) + pktline.Write(buf, PackData.WithPayload(expected[16:26])) content := make([]byte, 26) d := NewDemuxer(Sideband64k, buf) @@ -39,8 +38,7 @@ func (s *SidebandSuite) TestDecodeMoreThanContain(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - e.Encode(PackData.WithPayload(expected)) + pktline.Write(buf, PackData.WithPayload(expected)) content := make([]byte, 42) d := NewDemuxer(Sideband64k, buf) @@ -54,11 +52,10 @@ func (s *SidebandSuite) TestDecodeWithError(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - e.Encode(PackData.WithPayload(expected[0:8])) - e.Encode(ErrorMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) - e.Encode(PackData.WithPayload(expected[8:16])) - e.Encode(PackData.WithPayload(expected[16:26])) + pktline.Write(buf, PackData.WithPayload(expected[0:8])) + pktline.Write(buf, ErrorMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) + pktline.Write(buf, PackData.WithPayload(expected[8:16])) + pktline.Write(buf, PackData.WithPayload(expected[16:26])) content := make([]byte, 26) d := NewDemuxer(Sideband64k, buf) @@ -84,11 +81,10 @@ func (s *SidebandSuite) TestDecodeWithProgress(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") input := bytes.NewBuffer(nil) - e := pktline.NewEncoder(input) - e.Encode(PackData.WithPayload(expected[0:8])) - e.Encode(ProgressMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) - e.Encode(PackData.WithPayload(expected[8:16])) - e.Encode(PackData.WithPayload(expected[16:26])) + pktline.Write(input, PackData.WithPayload(expected[0:8])) + pktline.Write(input, ProgressMessage.WithPayload([]byte{'F', 'O', 'O', '\n'})) + pktline.Write(input, PackData.WithPayload(expected[8:16])) + pktline.Write(input, PackData.WithPayload(expected[16:26])) output := bytes.NewBuffer(nil) content := make([]byte, 26) @@ -106,10 +102,8 @@ func (s *SidebandSuite) TestDecodeWithProgress(c *C) { } func (s *SidebandSuite) TestDecodeWithUnknownChannel(c *C) { - buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - e.Encode([]byte{'4', 'F', 'O', 'O', '\n'}) + pktline.Write(buf, []byte{'4', 'F', 'O', 'O', '\n'}) content := make([]byte, 26) d := NewDemuxer(Sideband64k, buf) @@ -122,10 +116,9 @@ func (s *SidebandSuite) TestDecodeWithPending(c *C) { expected := []byte("abcdefghijklmnopqrstuvwxyz") buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - e.Encode(PackData.WithPayload(expected[0:8])) - e.Encode(PackData.WithPayload(expected[8:16])) - e.Encode(PackData.WithPayload(expected[16:26])) + pktline.Write(buf, PackData.WithPayload(expected[0:8])) + pktline.Write(buf, PackData.WithPayload(expected[8:16])) + pktline.Write(buf, PackData.WithPayload(expected[16:26])) content := make([]byte, 13) d := NewDemuxer(Sideband64k, buf) @@ -142,13 +135,11 @@ func (s *SidebandSuite) TestDecodeWithPending(c *C) { func (s *SidebandSuite) TestDecodeErrMaxPacked(c *C) { buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - e.Encode(PackData.WithPayload(bytes.Repeat([]byte{'0'}, MaxPackedSize+1))) + pktline.Write(buf, PackData.WithPayload(bytes.Repeat([]byte{'0'}, MaxPackedSize+1))) content := make([]byte, 13) d := NewDemuxer(Sideband, buf) n, err := io.ReadFull(d, content) c.Assert(err, Equals, ErrMaxPackedExceeded) c.Assert(n, Equals, 0) - } diff --git a/plumbing/protocol/packp/sideband/muxer.go b/plumbing/protocol/packp/sideband/muxer.go index d51ac8269..4cb70698d 100644 --- a/plumbing/protocol/packp/sideband/muxer.go +++ b/plumbing/protocol/packp/sideband/muxer.go @@ -10,7 +10,7 @@ import ( // information. The multiplex is perform using pktline format. type Muxer struct { max int - e *pktline.Encoder + w io.Writer } const chLen = 1 @@ -28,7 +28,7 @@ func NewMuxer(t Type, w io.Writer) *Muxer { return &Muxer{ max: max - chLen, - e: pktline.NewEncoder(w), + w: w, } } @@ -61,5 +61,6 @@ func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { sz = m.max } - return sz, m.e.Encode(ch.WithPayload(p[:sz])) + _, err := pktline.Write(m.w, ch.WithPayload(p[:sz])) + return sz, err } diff --git a/plumbing/protocol/packp/srvresp.go b/plumbing/protocol/packp/srvresp.go index a9ddb538b..f5d6cdb77 100644 --- a/plumbing/protocol/packp/srvresp.go +++ b/plumbing/protocol/packp/srvresp.go @@ -9,6 +9,7 @@ import ( "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/utils/ioutil" ) const ackLineLen = 44 @@ -20,20 +21,25 @@ type ServerResponse struct { // Decode decodes the response into the struct, isMultiACK should be true, if // the request was done with multi_ack or multi_ack_detailed capabilities. -func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { - s := pktline.NewScanner(reader) +func (r *ServerResponse) Decode(reader io.Reader, isMultiACK bool) error { + s := bufio.NewReader(reader) - for s.Scan() { - line := s.Bytes() + var err error + for { + var p []byte + _, p, err = pktline.ReadLine(s) + if err != nil { + break + } - if err := r.decodeLine(line); err != nil { + if err := r.decodeLine(p); err != nil { return err } // we need to detect when the end of a response header and the beginning // of a packfile header happened, some requests to the git daemon // produces a duplicate ACK header even when multi_ack is not supported. - stop, err := r.stopReading(reader) + stop, err := r.stopReading(s) if err != nil { return err } @@ -43,6 +49,10 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { } } + if err == io.EOF { + err = nil + } + // isMultiACK is true when the remote server advertises the related // capabilities when they are not in transport.UnsupportedCapabilities. // @@ -54,7 +64,6 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { // information highlighting that this capabilities are not supported by go-git. // // TODO: Implement support for multi_ack or multi_ack_detailed responses. - err := s.Err() if err != nil && isMultiACK { return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err) } @@ -64,7 +73,7 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { // stopReading detects when a valid command such as ACK or NAK is found to be // read in the buffer without moving the read pointer. -func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { +func (r *ServerResponse) stopReading(reader ioutil.ReadPeeker) (bool, error) { ahead, err := reader.Peek(7) if err == io.EOF { return true, nil @@ -132,10 +141,11 @@ func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error { return errors.New("multi_ack and multi_ack_detailed are not supported") } - e := pktline.NewEncoder(w) if len(r.ACKs) == 0 { - return e.Encodef("%s\n", nak) + _, err := pktline.WriteString(w, string(nak)+"\n") + return err } - return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) + _, err := pktline.Writef(w, "%s %s\n", ack, r.ACKs[0].String()) + return err } diff --git a/plumbing/protocol/packp/srvresp_test.go b/plumbing/protocol/packp/srvresp_test.go index b7270e79e..c66a99e5d 100644 --- a/plumbing/protocol/packp/srvresp_test.go +++ b/plumbing/protocol/packp/srvresp_test.go @@ -1,9 +1,9 @@ package packp import ( - "bufio" "bytes" "fmt" + "strings" "github.com/go-git/go-git/v5/plumbing" @@ -18,7 +18,7 @@ func (s *ServerResponseSuite) TestDecodeNAK(c *C) { raw := "0008NAK\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode((bytes.NewBufferString(raw)), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 0) @@ -28,16 +28,16 @@ func (s *ServerResponseSuite) TestDecodeNewLine(c *C) { raw := "\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "invalid pkt-len found") + c.Assert(err.Error(), Matches, "invalid pkt-len found.*") } func (s *ServerResponseSuite) TestDecodeEmpty(c *C) { raw := "" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, IsNil) } @@ -45,7 +45,7 @@ func (s *ServerResponseSuite) TestDecodePartial(c *C) { raw := "000600\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, NotNil) c.Assert(err.Error(), Equals, fmt.Sprintf("unexpected content %q", "00")) } @@ -54,7 +54,7 @@ func (s *ServerResponseSuite) TestDecodeACK(c *C) { raw := "0031ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e5\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 1) @@ -68,7 +68,7 @@ func (s *ServerResponseSuite) TestDecodeMultipleACK(c *C) { "00080PACK\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 2) @@ -83,7 +83,7 @@ func (s *ServerResponseSuite) TestDecodeMultipleACKWithSideband(c *C) { "00080aaaa\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 2) @@ -95,7 +95,7 @@ func (s *ServerResponseSuite) TestDecodeMalformed(c *C) { raw := "0029ACK 6ecf0ef2c2dffb796033e5a02219af86ec6584e\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), false) + err := sr.Decode(bytes.NewBufferString(raw), false) c.Assert(err, NotNil) } @@ -110,7 +110,7 @@ func (s *ServerResponseSuite) TestDecodeMultiACK(c *C) { "00080PACK\n" sr := &ServerResponse{} - err := sr.Decode(bufio.NewReader(bytes.NewBufferString(raw)), true) + err := sr.Decode(strings.NewReader(raw), true) c.Assert(err, IsNil) c.Assert(sr.ACKs, HasLen, 2) diff --git a/plumbing/protocol/packp/ulreq_decode.go b/plumbing/protocol/packp/ulreq_decode.go index 3da29985e..fbee2497d 100644 --- a/plumbing/protocol/packp/ulreq_decode.go +++ b/plumbing/protocol/packp/ulreq_decode.go @@ -20,16 +20,16 @@ func (req *UploadRequest) Decode(r io.Reader) error { } type ulReqDecoder struct { - s *pktline.Scanner // a pkt-line scanner from the input stream - line []byte // current pkt-line contents, use parser.nextLine() to make it advance - nLine int // current pkt-line number for debugging, begins at 1 - err error // sticky error, use the parser.error() method to fill this out - data *UploadRequest // parsed data is stored here + r io.Reader // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + err error // sticky error, use the parser.error() method to fill this out + data *UploadRequest // parsed data is stored here } func newUlReqDecoder(r io.Reader) *ulReqDecoder { return &ulReqDecoder{ - s: pktline.NewScanner(r), + r: r, } } @@ -60,16 +60,17 @@ func (d *ulReqDecoder) error(format string, a ...interface{}) { func (d *ulReqDecoder) nextLine() bool { d.nLine++ - if !d.s.Scan() { - if d.err = d.s.Err(); d.err != nil { - return false - } - + _, p, err := pktline.ReadLine(d.r) + if err == io.EOF { d.error("EOF") return false } + if err != nil { + d.err = err + return false + } - d.line = d.s.Bytes() + d.line = p d.line = bytes.TrimSuffix(d.line, eol) return true diff --git a/plumbing/protocol/packp/ulreq_decode_test.go b/plumbing/protocol/packp/ulreq_decode_test.go index 7658922de..49978c76f 100644 --- a/plumbing/protocol/packp/ulreq_decode_test.go +++ b/plumbing/protocol/packp/ulreq_decode_test.go @@ -30,7 +30,7 @@ func (s *UlReqDecodeSuite) TestEmpty(c *C) { func (s *UlReqDecodeSuite) TestNoWant(c *C) { payloads := []string{ "foobar", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*missing 'want '.*") @@ -47,7 +47,7 @@ func (s *UlReqDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, patter func (s *UlReqDecodeSuite) TestInvalidFirstHash(c *C) { payloads := []string{ "want 6ecf0ef2c2dffb796alberto2219af86ec6584e5\n", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid hash.*") @@ -56,7 +56,7 @@ func (s *UlReqDecodeSuite) TestInvalidFirstHash(c *C) { func (s *UlReqDecodeSuite) TestWantOK(c *C) { payloads := []string{ "want 1111111111111111111111111111111111111111", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -67,15 +67,19 @@ func (s *UlReqDecodeSuite) TestWantOK(c *C) { func (s *UlReqDecodeSuite) testDecodeOK(c *C, payloads []string) *UploadRequest { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(payloads...) - c.Assert(err, IsNil) + for _, p := range payloads { + if p == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, p) + c.Assert(err, IsNil) + } + } ur := NewUploadRequest() d := newUlReqDecoder(&buf) - err = d.Decode(ur) - c.Assert(err, IsNil) + c.Assert(d.Decode(ur), IsNil) return ur } @@ -83,11 +87,12 @@ func (s *UlReqDecodeSuite) testDecodeOK(c *C, payloads []string) *UploadRequest func (s *UlReqDecodeSuite) TestWantWithCapabilities(c *C) { payloads := []string{ "want 1111111111111111111111111111111111111111 ofs-delta multi_ack", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) c.Assert(ur.Wants, DeepEquals, []plumbing.Hash{ - plumbing.NewHash("1111111111111111111111111111111111111111")}) + plumbing.NewHash("1111111111111111111111111111111111111111"), + }) c.Assert(ur.Capabilities.Supports(capability.OFSDelta), Equals, true) c.Assert(ur.Capabilities.Supports(capability.MultiACK), Equals, true) @@ -99,7 +104,7 @@ func (s *UlReqDecodeSuite) TestManyWantsNoCapabilities(c *C) { "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -131,7 +136,7 @@ func (s *UlReqDecodeSuite) TestManyWantsBadWant(c *C) { "want 4444444444444444444444444444444444444444", "foo", "want 2222222222222222222222222222222222222222", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -143,7 +148,7 @@ func (s *UlReqDecodeSuite) TestManyWantsInvalidHash(c *C) { "want 4444444444444444444444444444444444444444", "want 1234567890abcdef", "want 2222222222222222222222222222222222222222", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed hash.*") @@ -155,7 +160,7 @@ func (s *UlReqDecodeSuite) TestManyWantsWithCapabilities(c *C) { "want 4444444444444444444444444444444444444444", "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -178,7 +183,7 @@ func (s *UlReqDecodeSuite) TestSingleShallowSingleWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -204,7 +209,7 @@ func (s *UlReqDecodeSuite) TestSingleShallowManyWants(c *C) { "want 1111111111111111111111111111111111111111", "want 2222222222222222222222222222222222222222", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -235,7 +240,7 @@ func (s *UlReqDecodeSuite) TestManyShallowSingleWant(c *C) { "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", "shallow dddddddddddddddddddddddddddddddddddddddd", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -269,7 +274,7 @@ func (s *UlReqDecodeSuite) TestManyShallowManyWants(c *C) { "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", "shallow dddddddddddddddddddddddddddddddddddddddd", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -302,7 +307,7 @@ func (s *UlReqDecodeSuite) TestMalformedShallow(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shalow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -312,7 +317,7 @@ func (s *UlReqDecodeSuite) TestMalformedShallowHash(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*malformed hash.*") @@ -324,7 +329,7 @@ func (s *UlReqDecodeSuite) TestMalformedShallowManyShallows(c *C) { "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "shalow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "shallow cccccccccccccccccccccccccccccccccccccccc", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -334,7 +339,7 @@ func (s *UlReqDecodeSuite) TestMalformedDeepenSpec(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen-foo 34", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected deepen.*") @@ -344,7 +349,7 @@ func (s *UlReqDecodeSuite) TestMalformedDeepenSingleWant(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "depth 32", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -355,7 +360,7 @@ func (s *UlReqDecodeSuite) TestMalformedDeepenMultiWant(c *C) { "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "want 2222222222222222222222222222222222222222", "depth 32", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -366,7 +371,7 @@ func (s *UlReqDecodeSuite) TestMalformedDeepenWithSingleShallow(c *C) { "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "shallow 2222222222222222222222222222222222222222", "depth 32", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -378,7 +383,7 @@ func (s *UlReqDecodeSuite) TestMalformedDeepenWithMultiShallow(c *C) { "shallow 2222222222222222222222222222222222222222", "shallow 5555555555555555555555555555555555555555", "depth 32", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") @@ -388,7 +393,7 @@ func (s *UlReqDecodeSuite) TestDeepenCommits(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen 1234", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -402,7 +407,7 @@ func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteImplicit(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen 0", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -415,7 +420,7 @@ func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteImplicit(c *C) { func (s *UlReqDecodeSuite) TestDeepenCommitsInfiniteExplicit(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -429,7 +434,7 @@ func (s *UlReqDecodeSuite) TestMalformedDeepenCommits(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen -32", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*negative depth.*") @@ -439,7 +444,7 @@ func (s *UlReqDecodeSuite) TestDeepenCommitsEmpty(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen ", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*invalid syntax.*") @@ -449,7 +454,7 @@ func (s *UlReqDecodeSuite) TestDeepenSince(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen-since 1420167845", // 2015-01-02T03:04:05+00:00 - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -466,7 +471,7 @@ func (s *UlReqDecodeSuite) TestDeepenReference(c *C) { payloads := []string{ "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen-not refs/heads/master", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -489,7 +494,7 @@ func (s *UlReqDecodeSuite) TestAll(c *C) { "shallow cccccccccccccccccccccccccccccccccccccccc", "shallow dddddddddddddddddddddddddddddddddddddddd", "deepen 1234", - pktline.FlushString, + "", } ur := s.testDecodeOK(c, payloads) @@ -526,7 +531,7 @@ func (s *UlReqDecodeSuite) TestExtraData(c *C) { "want 3333333333333333333333333333333333333333 ofs-delta multi_ack", "deepen 32", "foo", - pktline.FlushString, + "", } r := toPktLines(c, payloads) s.testDecoderErrorMatches(c, r, ".*unexpected payload.*") diff --git a/plumbing/protocol/packp/ulreq_encode.go b/plumbing/protocol/packp/ulreq_encode.go index c451e2316..93c316a11 100644 --- a/plumbing/protocol/packp/ulreq_encode.go +++ b/plumbing/protocol/packp/ulreq_encode.go @@ -21,14 +21,14 @@ func (req *UploadRequest) Encode(w io.Writer) error { } type ulReqEncoder struct { - pe *pktline.Encoder // where to write the encoded data - data *UploadRequest // the data to encode - err error // sticky error + w io.Writer // where to write the encoded data + data *UploadRequest // the data to encode + err error // sticky error } func newUlReqEncoder(w io.Writer) *ulReqEncoder { return &ulReqEncoder{ - pe: pktline.NewEncoder(w), + w: w, } } @@ -50,10 +50,9 @@ func (e *ulReqEncoder) Encode(v *UploadRequest) error { func (e *ulReqEncoder) encodeFirstWant() stateFn { var err error if e.data.Capabilities.IsEmpty() { - err = e.pe.Encodef("want %s\n", e.data.Wants[0]) + _, err = pktline.Writef(e.w, "want %s\n", e.data.Wants[0]) } else { - err = e.pe.Encodef( - "want %s %s\n", + _, err = pktline.Writef(e.w, "want %s %s\n", e.data.Wants[0], e.data.Capabilities.String(), ) @@ -74,7 +73,7 @@ func (e *ulReqEncoder) encodeAdditionalWants() stateFn { continue } - if err := e.pe.Encodef("want %s\n", w); err != nil { + if _, err := pktline.Writef(e.w, "want %s\n", w); err != nil { e.err = fmt.Errorf("encoding want %q: %s", w, err) return nil } @@ -94,7 +93,7 @@ func (e *ulReqEncoder) encodeShallows() stateFn { continue } - if err := e.pe.Encodef("shallow %s\n", s); err != nil { + if _, err := pktline.Writef(e.w, "shallow %s\n", s); err != nil { e.err = fmt.Errorf("encoding shallow %q: %s", s, err) return nil } @@ -110,20 +109,20 @@ func (e *ulReqEncoder) encodeDepth() stateFn { case DepthCommits: if depth != 0 { commits := int(depth) - if err := e.pe.Encodef("deepen %d\n", commits); err != nil { + if _, err := pktline.Writef(e.w, "deepen %d\n", commits); err != nil { e.err = fmt.Errorf("encoding depth %d: %s", depth, err) return nil } } case DepthSince: when := time.Time(depth).UTC() - if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { + if _, err := pktline.Writef(e.w, "deepen-since %d\n", when.Unix()); err != nil { e.err = fmt.Errorf("encoding depth %s: %s", when, err) return nil } case DepthReference: reference := string(depth) - if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { + if _, err := pktline.Writef(e.w, "deepen-not %s\n", reference); err != nil { e.err = fmt.Errorf("encoding depth %s: %s", reference, err) return nil } @@ -136,7 +135,7 @@ func (e *ulReqEncoder) encodeDepth() stateFn { } func (e *ulReqEncoder) encodeFlush() stateFn { - if err := e.pe.Flush(); err != nil { + if err := pktline.WriteFlush(e.w); err != nil { e.err = fmt.Errorf("encoding flush-pkt: %s", err) return nil } diff --git a/plumbing/protocol/packp/ulreq_encode_test.go b/plumbing/protocol/packp/ulreq_encode_test.go index ba6df1a6a..e060274c1 100644 --- a/plumbing/protocol/packp/ulreq_encode_test.go +++ b/plumbing/protocol/packp/ulreq_encode_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" @@ -52,7 +51,7 @@ func (s *UlReqEncodeSuite) TestOneWant(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -69,7 +68,7 @@ func (s *UlReqEncodeSuite) TestOneWantWithCapabilities(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack ofs-delta side-band symref=HEAD:/refs/heads/master thin-pack\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -91,7 +90,7 @@ func (s *UlReqEncodeSuite) TestWants(c *C) { "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", "want 5555555555555555555555555555555555555555\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -113,7 +112,7 @@ func (s *UlReqEncodeSuite) TestWantsDuplicates(c *C) { "want 2222222222222222222222222222222222222222\n", "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -141,7 +140,7 @@ func (s *UlReqEncodeSuite) TestWantsWithCapabilities(c *C) { "want 3333333333333333333333333333333333333333\n", "want 4444444444444444444444444444444444444444\n", "want 5555555555555555555555555555555555555555\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -156,7 +155,7 @@ func (s *UlReqEncodeSuite) TestShallow(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111 multi_ack\n", "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -179,7 +178,7 @@ func (s *UlReqEncodeSuite) TestManyShallows(c *C) { "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n", "shallow cccccccccccccccccccccccccccccccccccccccc\n", "shallow dddddddddddddddddddddddddddddddddddddddd\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -201,7 +200,7 @@ func (s *UlReqEncodeSuite) TestShallowsDuplicate(c *C) { "shallow aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n", "shallow bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n", "shallow cccccccccccccccccccccccccccccccccccccccc\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -215,7 +214,7 @@ func (s *UlReqEncodeSuite) TestDepthCommits(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen 1234\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -230,7 +229,7 @@ func (s *UlReqEncodeSuite) TestDepthSinceUTC(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen-since 1420167845\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -253,7 +252,7 @@ func (s *UlReqEncodeSuite) TestDepthSinceNonUTC(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen-since 1420164245\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -267,7 +266,7 @@ func (s *UlReqEncodeSuite) TestDepthReference(c *C) { expected := []string{ "want 1111111111111111111111111111111111111111\n", "deepen-not refs/heads/feature-foo\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) @@ -308,7 +307,7 @@ func (s *UlReqEncodeSuite) TestAll(c *C) { "shallow cccccccccccccccccccccccccccccccccccccccc\n", "shallow dddddddddddddddddddddddddddddddddddddddd\n", "deepen-since 1420167845\n", - pktline.FlushString, + "", } testUlReqEncode(c, ur, expected) diff --git a/plumbing/protocol/packp/updreq.go b/plumbing/protocol/packp/updreq.go index 8f39b39cb..6768103d3 100644 --- a/plumbing/protocol/packp/updreq.go +++ b/plumbing/protocol/packp/updreq.go @@ -16,6 +16,9 @@ var ( // ReferenceUpdateRequest values represent reference upload requests. // Values from this type are not zero-value safe, use the New function instead. +// TODO: remove the Packfile and Progress fields to make this 1-1 with the +// wire protocol. +// See https://git-scm.com/docs/pack-protocol#_reference_update_request_and_packfile_transfer type ReferenceUpdateRequest struct { Capabilities *capability.List Commands []*Command @@ -48,6 +51,7 @@ func NewReferenceUpdateRequest() *ReferenceUpdateRequest { // - ofs-delta // - ref-delta // - delete-refs +// // It leaves up to the user to add the following capabilities later: // - atomic // - ofs-delta diff --git a/plumbing/protocol/packp/updreq_decode.go b/plumbing/protocol/packp/updreq_decode.go index 076de545f..a40413d77 100644 --- a/plumbing/protocol/packp/updreq_decode.go +++ b/plumbing/protocol/packp/updreq_decode.go @@ -83,14 +83,16 @@ func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { rc = io.NopCloser(r) } - d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} + d := &updReqDecoder{r: rc, s: r} return d.Decode(req) } type updReqDecoder struct { r io.ReadCloser - s *pktline.Scanner + s io.Reader req *ReferenceUpdateRequest + + payload []byte } func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { @@ -113,16 +115,26 @@ func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { return nil } -func (d *updReqDecoder) scanLine() error { - if ok := d.s.Scan(); !ok { - return d.scanErrorOr(ErrEmpty) +func (d *updReqDecoder) readLine(e error) error { + _, p, err := pktline.ReadLine(d.s) + if err == io.EOF { + return e + } + if err != nil { + return err } + d.payload = p + return nil } +func (d *updReqDecoder) scanLine() error { + return d.readLine(ErrEmpty) +} + func (d *updReqDecoder) decodeShallow() error { - b := d.s.Bytes() + b := d.payload if !bytes.HasPrefix(b, shallowNoSp) { return nil @@ -137,8 +149,8 @@ func (d *updReqDecoder) decodeShallow() error { return errInvalidShallowObjId(err) } - if ok := d.s.Scan(); !ok { - return d.scanErrorOr(errNoCommands) + if err := d.readLine(errNoCommands); err != nil { + return err } d.req.Shallow = &h @@ -148,8 +160,8 @@ func (d *updReqDecoder) decodeShallow() error { func (d *updReqDecoder) decodeCommands() error { for { - b := d.s.Bytes() - if bytes.Equal(b, pktline.Flush) { + b := d.payload + if len(b) == 0 { return nil } @@ -160,14 +172,14 @@ func (d *updReqDecoder) decodeCommands() error { d.req.Commands = append(d.req.Commands, c) - if ok := d.s.Scan(); !ok { - return d.s.Err() + if err := d.readLine(nil); err != nil { + return err } } } func (d *updReqDecoder) decodeCommandAndCapabilities() error { - b := d.s.Bytes() + b := d.payload i := bytes.IndexByte(b, 0) if i == -1 { return errMissingCapabilitiesDelimiter @@ -239,11 +251,3 @@ func parseHash(s string) (plumbing.Hash, error) { h := plumbing.NewHash(s) return h, nil } - -func (d *updReqDecoder) scanErrorOr(origErr error) error { - if err := d.s.Err(); err != nil { - return err - } - - return origErr -} diff --git a/plumbing/protocol/packp/updreq_decode_test.go b/plumbing/protocol/packp/updreq_decode_test.go index bdcbdf503..3a08655df 100644 --- a/plumbing/protocol/packp/updreq_decode_test.go +++ b/plumbing/protocol/packp/updreq_decode_test.go @@ -31,35 +31,35 @@ func (s *UpdReqDecodeSuite) TestInvalidShadow(c *C) { payloads := []string{ "shallow", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 7$") payloads = []string{ "shallow ", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 8$") payloads = []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec65", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 44$") payloads = []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec6584e54", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow line length: expected 48, got 49$") payloads = []string{ "shallow 1ecf0ef2c2dffb796033e5a02219af86ec6584eu", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid shallow object id: invalid hash: .*") } @@ -67,14 +67,14 @@ func (s *UpdReqDecodeSuite) TestInvalidShadow(c *C) { func (s *UpdReqDecodeSuite) TestMalformedCommand(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5x2ecf0ef2c2dffb796033e5a02219af86ec6584e5xmyref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: malformed command: EOF$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5x2ecf0ef2c2dffb796033e5a02219af86ec6584e5xmyref", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: malformed command: EOF$") } @@ -82,31 +82,31 @@ func (s *UpdReqDecodeSuite) TestMalformedCommand(c *C) { func (s *UpdReqDecodeSuite) TestInvalidCommandInvalidHash(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid old object id: invalid hash size: expected 40, got 39$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid new object id: invalid hash size: expected 40, got 39$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86e 2ecf0ef2c2dffb796033e5a02219af86ec6 m\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command and capabilities line length: expected at least 84, got 72$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584eu 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid old object id: invalid hash: .*$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584eu myref\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid new object id: invalid hash: .*$") } @@ -114,7 +114,7 @@ func (s *UpdReqDecodeSuite) TestInvalidCommandInvalidHash(c *C) { func (s *UpdReqDecodeSuite) TestInvalidCommandMissingNullDelimiter(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "capabilities delimiter not found") } @@ -122,27 +122,27 @@ func (s *UpdReqDecodeSuite) TestInvalidCommandMissingNullDelimiter(c *C) { func (s *UpdReqDecodeSuite) TestInvalidCommandMissingName(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5\x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command and capabilities line length: expected at least 84, got 82$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 \x00", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command and capabilities line length: expected at least 84, got 83$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command line length: expected at least 83, got 81$") payloads = []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 ", - pktline.FlushString, + "", } s.testDecoderErrorMatches(c, toPktLines(c, payloads), "^malformed request: invalid command line length: expected at least 83, got 82$") } @@ -160,7 +160,7 @@ func (s *UpdReqDecodeSuite) TestOneUpdateCommand(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } s.testDecodeOkExpected(c, expected, payloads) @@ -182,7 +182,7 @@ func (s *UpdReqDecodeSuite) TestMultipleCommands(c *C) { "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", - pktline.FlushString, + "", } s.testDecodeOkExpected(c, expected, payloads) @@ -205,7 +205,7 @@ func (s *UpdReqDecodeSuite) TestMultipleCommandsAndCapabilities(c *C) { "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", - pktline.FlushString, + "", } s.testDecodeOkExpected(c, expected, payloads) @@ -230,7 +230,7 @@ func (s *UpdReqDecodeSuite) TestMultipleCommandsAndCapabilitiesShallow(c *C) { "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", - pktline.FlushString, + "", } s.testDecodeOkExpected(c, expected, payloads) @@ -250,11 +250,17 @@ func (s *UpdReqDecodeSuite) TestWithPackfile(c *C) { payloads := []string{ "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", } var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - c.Assert(e.EncodeString(payloads...), IsNil) + for _, p := range payloads { + if p == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, p) + c.Assert(err, IsNil) + } + } buf.Write(packfileContent) s.testDecodeOkRaw(c, expected, buf.Bytes()) @@ -267,9 +273,14 @@ func (s *UpdReqDecodeSuite) testDecoderErrorMatches(c *C, input io.Reader, patte func (s *UpdReqDecodeSuite) testDecodeOK(c *C, payloads []string) *ReferenceUpdateRequest { var buf bytes.Buffer - e := pktline.NewEncoder(&buf) - err := e.EncodeString(payloads...) - c.Assert(err, IsNil) + for _, p := range payloads { + if p == "" { + c.Assert(pktline.WriteFlush(&buf), IsNil) + } else { + _, err := pktline.WriteString(&buf, p) + c.Assert(err, IsNil) + } + } r := NewReferenceUpdateRequest() c.Assert(r.Decode(&buf), IsNil) diff --git a/plumbing/protocol/packp/updreq_encode.go b/plumbing/protocol/packp/updreq_encode.go index 1205cfaf1..e3401d668 100644 --- a/plumbing/protocol/packp/updreq_encode.go +++ b/plumbing/protocol/packp/updreq_encode.go @@ -15,18 +15,16 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { return err } - e := pktline.NewEncoder(w) - - if err := req.encodeShallow(e, req.Shallow); err != nil { + if err := req.encodeShallow(w, req.Shallow); err != nil { return err } - if err := req.encodeCommands(e, req.Commands, req.Capabilities); err != nil { + if err := req.encodeCommands(w, req.Commands, req.Capabilities); err != nil { return err } if req.Capabilities.Supports(capability.PushOptions) { - if err := req.encodeOptions(e, req.Options); err != nil { + if err := req.encodeOptions(w, req.Options); err != nil { return err } } @@ -42,32 +40,33 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { return nil } -func (req *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, - h *plumbing.Hash) error { - +func (req *ReferenceUpdateRequest) encodeShallow(w io.Writer, + h *plumbing.Hash, +) error { if h == nil { return nil } objId := []byte(h.String()) - return e.Encodef("%s%s", shallow, objId) + _, err := pktline.Writef(w, "%s%s", shallow, objId) + return err } -func (req *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, - cmds []*Command, cap *capability.List) error { - - if err := e.Encodef("%s\x00%s", +func (req *ReferenceUpdateRequest) encodeCommands(w io.Writer, + cmds []*Command, cap *capability.List, +) error { + if _, err := pktline.Writef(w, "%s\x00%s", formatCommand(cmds[0]), cap.String()); err != nil { return err } for _, cmd := range cmds[1:] { - if err := e.Encodef(formatCommand(cmd)); err != nil { + if _, err := pktline.Writef(w, formatCommand(cmd)); err != nil { return err } } - return e.Flush() + return pktline.WriteFlush(w) } func formatCommand(cmd *Command) string { @@ -76,14 +75,14 @@ func formatCommand(cmd *Command) string { return fmt.Sprintf("%s %s %s", o, n, cmd.Name) } -func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder, - opts []*Option) error { - +func (req *ReferenceUpdateRequest) encodeOptions(w io.Writer, + opts []*Option, +) error { for _, opt := range opts { - if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil { + if _, err := pktline.Writef(w, "%s=%s", opt.Key, opt.Value); err != nil { return err } } - return e.Flush() + return pktline.WriteFlush(w) } diff --git a/plumbing/protocol/packp/updreq_encode_test.go b/plumbing/protocol/packp/updreq_encode_test.go index 97868bd64..ad02c73e8 100644 --- a/plumbing/protocol/packp/updreq_encode_test.go +++ b/plumbing/protocol/packp/updreq_encode_test.go @@ -5,7 +5,6 @@ import ( "io" "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" . "gopkg.in/check.v1" @@ -47,7 +46,7 @@ func (s *UpdReqEncodeSuite) TestOneUpdateCommand(c *C) { expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", ) s.testEncode(c, r, expected) @@ -68,7 +67,7 @@ func (s *UpdReqEncodeSuite) TestMultipleCommands(c *C) { "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", - pktline.FlushString, + "", ) s.testEncode(c, r, expected) @@ -90,7 +89,7 @@ func (s *UpdReqEncodeSuite) TestMultipleCommandsAndCapabilities(c *C) { "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", - pktline.FlushString, + "", ) s.testEncode(c, r, expected) @@ -114,7 +113,7 @@ func (s *UpdReqEncodeSuite) TestMultipleCommandsAndCapabilitiesShallow(c *C) { "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref1\x00shallow", "0000000000000000000000000000000000000000 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref2", "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 0000000000000000000000000000000000000000 myref3", - pktline.FlushString, + "", ) s.testEncode(c, r, expected) @@ -137,7 +136,7 @@ func (s *UpdReqEncodeSuite) TestWithPackfile(c *C) { expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00", - pktline.FlushString, + "", ) expected = append(expected, packfileContent...) @@ -161,10 +160,10 @@ func (s *UpdReqEncodeSuite) TestPushOptions(c *C) { expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00push-options", - pktline.FlushString, + "", "SomeKey=SomeValue", "AnotherKey=AnotherValue", - pktline.FlushString, + "", ) s.testEncode(c, r, expected) @@ -183,7 +182,7 @@ func (s *UpdReqEncodeSuite) TestPushAtomic(c *C) { expected := pktlines(c, "1ecf0ef2c2dffb796033e5a02219af86ec6584e5 2ecf0ef2c2dffb796033e5a02219af86ec6584e5 myref\x00atomic", - pktline.FlushString, + "", ) s.testEncode(c, r, expected) diff --git a/plumbing/protocol/packp/uppackreq.go b/plumbing/protocol/packp/uppackreq.go index 48f443856..9f7f071e5 100644 --- a/plumbing/protocol/packp/uppackreq.go +++ b/plumbing/protocol/packp/uppackreq.go @@ -71,8 +71,6 @@ type UploadHaves struct { // Encode encodes the UploadHaves into the Writer. If flush is true, a flush // command will be encoded at the end of the writer content. func (u *UploadHaves) Encode(w io.Writer, flush bool) error { - e := pktline.NewEncoder(w) - plumbing.HashesSort(u.Haves) var last plumbing.Hash @@ -81,7 +79,7 @@ func (u *UploadHaves) Encode(w io.Writer, flush bool) error { continue } - if err := e.Encodef("have %s\n", have); err != nil { + if _, err := pktline.Writef(w, "have %s\n", have); err != nil { return fmt.Errorf("sending haves for %q: %s", have, err) } @@ -89,7 +87,7 @@ func (u *UploadHaves) Encode(w io.Writer, flush bool) error { } if flush && len(u.Haves) != 0 { - if err := e.Flush(); err != nil { + if err := pktline.WriteFlush(w); err != nil { return fmt.Errorf("sending flush-pkt after haves: %s", err) } } diff --git a/plumbing/protocol/packp/uppackresp.go b/plumbing/protocol/packp/uppackresp.go index a485cb7b2..4a5fb05d7 100644 --- a/plumbing/protocol/packp/uppackresp.go +++ b/plumbing/protocol/packp/uppackresp.go @@ -1,11 +1,10 @@ package packp import ( + "bufio" "errors" "io" - "bufio" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" "github.com/go-git/go-git/v5/utils/ioutil" ) @@ -17,6 +16,7 @@ var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be // UploadPackResponse contains all the information responded by the upload-pack // service, the response implements io.ReadCloser that allows to read the // packfile directly from it. +// TODO: v6, to be removed type UploadPackResponse struct { ShallowUpdate ServerResponse diff --git a/plumbing/transport/common.go b/plumbing/transport/common.go index 93d3fba0f..972341a63 100644 --- a/plumbing/transport/common.go +++ b/plumbing/transport/common.go @@ -87,15 +87,15 @@ func NewClient(runner Commander) Transport { // NewUploadPackSession creates a new UploadPackSession. func (c *client) NewUploadPackSession(ep *Endpoint, auth AuthMethod) ( - UploadPackSession, error) { - + UploadPackSession, error, +) { return c.newSession(UploadPackServiceName, ep, auth) } // NewReceivePackSession creates a new ReceivePackSession. func (c *client) NewReceivePackSession(ep *Endpoint, auth AuthMethod) ( - ReceivePackSession, error) { - + ReceivePackSession, error, +) { return c.newSession(ReceivePackServiceName, ep, auth) } @@ -381,8 +381,7 @@ func (s *session) finish() error { // gracefully by sending a flush packet to the server. If the server // operates correctly, it will exit with status 0. if !s.packRun { - _, err := s.Stdin.Write(pktline.FlushPkt) - return err + return pktline.WriteFlush(s.Stdin) } return nil @@ -472,9 +471,8 @@ func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) err } func sendDone(w io.Writer) error { - e := pktline.NewEncoder(w) - - return e.Encodef("done\n") + _, err := pktline.Writef(w, "done\n") + return err } // DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse diff --git a/plumbing/transport/http/upload_pack.go b/plumbing/transport/http/upload_pack.go index 1ab1713a1..90eb89d9c 100644 --- a/plumbing/transport/http/upload_pack.go +++ b/plumbing/transport/http/upload_pack.go @@ -34,7 +34,6 @@ func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.Adv func (s *upSession) UploadPack( ctx context.Context, req *packp.UploadPackRequest, ) (*packp.UploadPackResponse, error) { - if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } @@ -79,7 +78,6 @@ func (s *upSession) Close() error { func (s *upSession) doRequest( ctx context.Context, method, url string, content *bytes.Buffer, ) (*http.Response, error) { - var body io.Reader if content != nil { body = content @@ -107,8 +105,6 @@ func (s *upSession) doRequest( func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - if err := req.UploadRequest.Encode(buf); err != nil { return nil, fmt.Errorf("sending upload-req message: %s", err) } @@ -117,7 +113,7 @@ func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, err return nil, fmt.Errorf("sending haves message: %s", err) } - if err := e.EncodeString("done\n"); err != nil { + if _, err := pktline.Writef(buf, "done\n"); err != nil { return nil, err } diff --git a/utils/ioutil/common.go b/utils/ioutil/common.go index 235af717b..a6f391940 100644 --- a/utils/ioutil/common.go +++ b/utils/ioutil/common.go @@ -10,11 +10,17 @@ import ( ctxio "github.com/jbenet/go-context/io" ) -type readPeeker interface { - io.Reader +// Peeker is an interface for types that can peek at the next bytes. +type Peeker interface { Peek(int) ([]byte, error) } +// ReadPeeker is an interface that groups the basic Read and Peek methods. +type ReadPeeker interface { + io.Reader + Peeker +} + var ( ErrEmptyReader = errors.New("reader is empty") ) @@ -23,7 +29,7 @@ var ( // `ErrEmptyReader` if it is empty. If there is an error when reading the first // byte of the given reader, it will be propagated. func NonEmptyReader(r io.Reader) (io.Reader, error) { - pr, ok := r.(readPeeker) + pr, ok := r.(ReadPeeker) if !ok { pr = bufio.NewReader(r) }