12 "github.com/frankban/quicktest"
13 qt "github.com/frankban/quicktest"
14 "github.com/stretchr/testify/require"
15 "golang.org/x/time/rate"
17 "github.com/anacrolix/torrent/metainfo"
18 pp "github.com/anacrolix/torrent/peer_protocol"
19 "github.com/anacrolix/torrent/storage"
22 // Ensure that no race exists between sending a bitfield, and a subsequent
23 // Have that would potentially alter it.
24 func TestSendBitfieldThenHave(t *testing.T) {
26 cl.init(TestingConfig(t))
28 c := cl.newConnection(nil, newConnectionOpts{network: "io.Pipe"})
29 c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
30 if err := c.t.setInfo(&metainfo.Info{Pieces: make([]byte, metainfo.HashSize*3)}); err != nil {
36 c.startMessageWriter()
38 c.t._completedPieces.Add(1)
39 c.postBitfield( /*[]bool{false, true, false}*/ )
45 n, err := io.ReadFull(r, b)
47 // This will cause connection.writer to terminate.
50 require.NoError(t, err)
51 require.EqualValues(t, 15, n)
52 // Here we see that the bitfield doesn't have piece 2 set, as that should
53 // arrive in the following Have message.
54 require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
57 type torrentStorage struct {
61 func (me *torrentStorage) Close() error { return nil }
63 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
67 func (me *torrentStorage) Completion() storage.Completion {
68 return storage.Completion{}
71 func (me *torrentStorage) MarkComplete() error {
75 func (me *torrentStorage) MarkNotComplete() error {
79 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
80 panic("shouldn't be called")
83 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
84 if len(b) != defaultChunkSize {
91 func BenchmarkConnectionMainReadLoop(b *testing.B) {
94 cl.init(&ClientConfig{
95 DownloadRateLimiter: unlimited,
98 ts := &torrentStorage{}
99 t := cl.newTorrent(metainfo.Hash{}, nil)
100 t.initialPieceCheckDisabled = true
101 require.NoError(b, t.setInfo(&metainfo.Info{
102 Pieces: make([]byte, 20),
104 PieceLength: 1 << 20,
106 t.storage = &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}}
108 t._pendingPieces.Add(0)
110 cn := cl.newConnection(r, newConnectionOpts{
112 remoteAddr: r.RemoteAddr(),
113 network: r.RemoteAddr().Network(),
114 connString: regularNetConnPeerConnConnString(r),
117 mrlErrChan := make(chan error)
120 Piece: make([]byte, defaultChunkSize),
124 err := cn.mainReadLoop()
130 wb := msg.MustMarshalBinary()
131 b.SetBytes(int64(len(msg.Piece)))
134 for i := 0; i < b.N; i += 1 {
136 // The chunk must be written to storage everytime, to ensure the
137 // writeSem is unlocked.
138 t.pendAllChunkSpecs(0)
139 cn.validReceiveChunks = map[RequestIndex]int{
140 t.requestIndexFromRequest(newRequestFromMessage(&msg)): 1,
143 n, err := w.Write(wb)
144 require.NoError(b, err)
145 require.EqualValues(b, len(wb), n)
148 if err := w.Close(); err != nil {
152 mrlErr := <-mrlErrChan
153 if mrlErr != nil && !errors.Is(mrlErr, io.EOF) {
156 c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
159 func TestConnPexPeerFlags(t *testing.T) {
161 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
162 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
164 testcases := []struct {
168 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
169 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
170 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
171 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
172 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
173 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
174 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
175 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
177 for i, tc := range testcases {
178 f := tc.conn.pexPeerFlags()
179 require.EqualValues(t, tc.f, f, i)
183 func TestConnPexEvent(t *testing.T) {
186 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
187 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
188 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
189 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
191 testcases := []struct {
198 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
199 pexEvent{pexAdd, udpAddr.AddrPort(), pp.PexSupportsUtp, nil},
204 Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true},
205 PeerListenPort: dialTcpAddr.Port,
207 pexEvent{pexDrop, tcpAddr.AddrPort(), pp.PexOutgoingConn, nil},
212 Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()},
213 PeerListenPort: dialTcpAddr.Port,
215 pexEvent{pexAdd, dialTcpAddr.AddrPort(), 0, nil},
220 Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()},
221 PeerListenPort: dialUdpAddr.Port,
223 pexEvent{pexDrop, dialUdpAddr.AddrPort(), pp.PexSupportsUtp, nil},
226 for i, tc := range testcases {
227 c.Run(fmt.Sprintf("%v", i), func(c *qt.C) {
228 e, err := tc.c.pexEvent(tc.t)
229 c.Assert(err, qt.IsNil)
230 c.Check(e, qt.Equals, tc.e)
235 func TestHaveAllThenBitfield(t *testing.T) {
237 cl := newTestingClient(t)
238 tt := cl.newTorrentForTesting()
239 // cl.newConnection()
243 pc.initRequestState()
245 tt.conns[&pc] = struct{}{}
246 c.Assert(pc.onPeerSentHaveAll(), qt.IsNil)
247 c.Check(pc.t.connsWithAllPieces, qt.DeepEquals, map[*Peer]struct{}{&pc.Peer: {}})
248 pc.peerSentBitfield([]bool{false, false, true, false, true, true, false, false})
249 c.Check(pc.peerMinPieces, qt.Equals, 6)
250 c.Check(pc.t.connsWithAllPieces, qt.HasLen, 0)
251 c.Assert(pc.t.setInfo(&metainfo.Info{
253 Pieces: make([]byte, pieceHash.Size()*7),
256 c.Check(tt.numPieces(), qt.Equals, 7)
257 c.Check(tt.pieceAvailabilityRuns(), qt.DeepEquals, []pieceAvailabilityRun{
258 // The last element of the bitfield is irrelevant, as the Torrent actually only has 7
260 {2, 0}, {1, 1}, {1, 0}, {2, 1}, {1, 0},
264 func TestApplyRequestStateWriteBufferConstraints(t *testing.T) {
266 c.Check(interestedMsgLen, qt.Equals, 5)
267 c.Check(requestMsgLen, qt.Equals, 17)
268 c.Check(maxLocalToRemoteRequests >= 8, qt.IsTrue)
269 c.Logf("max local to remote requests: %v", maxLocalToRemoteRequests)
272 func peerConnForPreferredNetworkDirection(
273 localPeerId, remotePeerId int,
274 outgoing, utp, ipv6 bool,
277 pc.outgoing = outgoing
282 pc.RemoteAddr = &net.TCPAddr{IP: net.ParseIP(fmt.Sprintf("::420"))}
284 pc.RemoteAddr = &net.TCPAddr{IP: net.IPv4(1, 2, 3, 4)}
286 binary.BigEndian.PutUint64(pc.PeerID[:], uint64(remotePeerId))
288 binary.BigEndian.PutUint64(cl.peerID[:], uint64(localPeerId))
289 pc.t = &Torrent{cl: &cl}
293 func TestPreferredNetworkDirection(t *testing.T) {
294 pc := peerConnForPreferredNetworkDirection
297 // Prefer outgoing to lower peer ID
300 pc(1, 2, true, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)),
304 pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, true, false, false)),
308 pc(2, 1, false, false, false).hasPreferredNetworkOver(pc(2, 1, true, false, false)),
314 pc(1, 2, false, true, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)),
319 pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, true)),
324 pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)),
329 func TestReceiveLargeRequest(t *testing.T) {
331 cl := newTestingClient(t)
332 pc := cl.newConnection(nil, newConnectionOpts{network: "test"})
333 tor := cl.newTorrentForTesting()
334 tor.info = &metainfo.Info{PieceLength: 3 << 20}
336 tor._completedPieces.Add(0)
337 pc.PeerExtensionBytes.SetBit(pp.ExtensionBitFast, true)
339 pc.initMessageWriter()
341 req.Length = defaultChunkSize
342 c.Assert(pc.fastEnabled(), qt.IsTrue)
343 c.Check(pc.onReadRequest(req, false), qt.IsNil)
344 c.Check(pc.peerRequests, qt.HasLen, 1)
346 c.Check(pc.onReadRequest(req, false), qt.IsNil)
347 c.Check(pc.peerRequests, qt.HasLen, 2)
348 pc.peerRequests = nil
349 pc.t.cl.config.UploadRateLimiter = rate.NewLimiter(1, defaultChunkSize)
350 req.Length = defaultChunkSize
351 c.Check(pc.onReadRequest(req, false), qt.IsNil)
352 c.Check(pc.peerRequests, qt.HasLen, 1)
354 c.Check(pc.onReadRequest(req, false), qt.IsNil)
355 c.Check(pc.messageWriter.writeBuffer.Len(), qt.Equals, 17)
358 func TestChunkOverflowsPiece(t *testing.T) {
360 check := func(begin, length, limit pp.Integer, expected bool) {
361 c.Check(chunkOverflowsPiece(ChunkSpec{begin, length}, limit), qt.Equals, expected)
364 check(2, pp.IntegerMax, 1, true)
365 check(2, pp.IntegerMax, 3, true)
366 check(2, pp.IntegerMax, pp.IntegerMax, true)
367 check(2, pp.IntegerMax-2, pp.IntegerMax, false)