)
type Decoder struct {
- R *bufio.Reader
+ R *bufio.Reader
+ // This must return *[]byte where the slices can fit data for piece messages. I think we store
+ // *[]byte in the pool to avoid an extra allocation every time we put the slice back into the
+ // pool. The chunk size should not change for the life of the decoder.
Pool *sync.Pool
MaxLength Integer // TODO: Should this include the length header or not?
}
length--
return d.R.ReadByte()
}
+ // From this point onwards, EOF is unexpected
+ defer func() {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
c, err := readByte()
if err != nil {
return
}
msg.Type = MessageType(c)
+ // Can return directly in cases when err is not nil, or length is known to be zero.
switch msg.Type {
case Choke, Unchoke, Interested, NotInterested, HaveAll, HaveNone:
case Have, AllowedFast, Suggest:
_, err = io.ReadFull(r, b)
length = 0
msg.Bitfield = unmarshalBitfield(b)
+ return
case Piece:
for _, pi := range []*Integer{&msg.Index, &msg.Begin} {
err := pi.Read(r)
}
length -= 8
dataLen := int64(length)
- msg.Piece = *d.Pool.Get().(*[]byte)
- if int64(cap(msg.Piece)) < dataLen {
- return errors.New("piece data longer than expected")
- }
- msg.Piece = msg.Piece[:dataLen]
- _, err := io.ReadFull(r, msg.Piece)
- if err != nil {
- return fmt.Errorf("reading piece data: %w", err)
+ if d.Pool == nil {
+ msg.Piece = make([]byte, dataLen)
+ } else {
+ msg.Piece = *d.Pool.Get().(*[]byte)
+ if int64(cap(msg.Piece)) < dataLen {
+ return errors.New("piece data longer than expected")
+ }
+ msg.Piece = msg.Piece[:dataLen]
}
+ _, err = io.ReadFull(r, msg.Piece)
length = 0
+ return
case Extended:
var b byte
b, err = readByte()
msg.ExtendedID = ExtensionNumber(b)
msg.ExtendedPayload = make([]byte, length)
_, err = io.ReadFull(r, msg.ExtendedPayload)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
length = 0
+ return
case Port:
err = binary.Read(r, binary.BigEndian, &msg.Port)
length -= 2
f.Add([]byte("\x00\x00\x00\x00"))
f.Add([]byte("\x00\x00\x00\x01\x00"))
f.Add([]byte("\x00\x00\x00\x03\x14\x00"))
+ f.Add([]byte("\x00\x00\x00\x01\x07"))
f.Fuzz(func(t *testing.T, b []byte) {
+ t.Logf("%q", b)
c := qt.New(t)
d := Decoder{
R: bufio.NewReader(bytes.NewReader(b)),
for _, m := range ms {
buf.Write(m.MustMarshalBinary())
}
- c.Assert(buf.Bytes(), qt.DeepEquals, b)
+ if len(b) == 0 {
+ c.Assert(buf.Bytes(), qt.HasLen, 0)
+ } else {
+ c.Assert(buf.Bytes(), qt.DeepEquals, b)
+ }
})
}
}
switch msg.Type {
case Choke, Unchoke, Interested, NotInterested, HaveAll, HaveNone:
- case Have:
+ case Have, AllowedFast, Suggest:
err = binary.Write(&buf, binary.BigEndian, msg.Index)
case Request, Cancel, Reject:
for _, i := range []Integer{msg.Index, msg.Begin, msg.Length} {
cfg.Seed = true
cfg.DataDir = t.TempDir()
cfg.DisableUTP = disableUtp
+ // Make sure the leecher-leecher doesn't connect directly to the seeder. This is because I
+ // wanted to see if having the higher chunk-sized leecher-leecher would cause the leecher to
+ // error decoding. However it shouldn't because a client should only be receiving pieces sized
+ // to the chunk size it expects.
+ cfg.DisablePEX = true
//cfg.Debug = true
cfg.Logger = log.Default.WithContextText("leecher")
leecher, err := torrent.NewClient(cfg)