12 "github.com/frankban/quicktest"
13 qt "github.com/frankban/quicktest"
14 "github.com/stretchr/testify/require"
16 "github.com/anacrolix/torrent/metainfo"
17 pp "github.com/anacrolix/torrent/peer_protocol"
18 "github.com/anacrolix/torrent/storage"
21 // Ensure that no race exists between sending a bitfield, and a subsequent
22 // Have that would potentially alter it.
23 func TestSendBitfieldThenHave(t *testing.T) {
25 cl.init(TestingConfig(t))
27 c := cl.newConnection(nil, false, nil, "io.Pipe", "")
28 c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
29 if err := c.t.setInfo(&metainfo.Info{Pieces: make([]byte, metainfo.HashSize*3)}); err != nil {
37 c.t._completedPieces.Add(1)
38 c.postBitfield( /*[]bool{false, true, false}*/ )
44 n, err := io.ReadFull(r, b)
46 // This will cause connection.writer to terminate.
49 require.NoError(t, err)
50 require.EqualValues(t, 15, n)
51 // Here we see that the bitfield doesn't have piece 2 set, as that should
52 // arrive in the following Have message.
53 require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
56 type torrentStorage struct {
60 func (me *torrentStorage) Close() error { return nil }
62 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
66 func (me *torrentStorage) Completion() storage.Completion {
67 return storage.Completion{}
70 func (me *torrentStorage) MarkComplete() error {
74 func (me *torrentStorage) MarkNotComplete() error {
78 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
79 panic("shouldn't be called")
82 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
83 if len(b) != defaultChunkSize {
90 func BenchmarkConnectionMainReadLoop(b *testing.B) {
93 cl.init(&ClientConfig{
94 DownloadRateLimiter: unlimited,
97 ts := &torrentStorage{}
98 t := cl.newTorrent(metainfo.Hash{}, nil)
99 t.initialPieceCheckDisabled = true
100 require.NoError(b, t.setInfo(&metainfo.Info{
101 Pieces: make([]byte, 20),
103 PieceLength: 1 << 20,
105 t.storage = &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}}
107 t._pendingPieces.Add(0)
109 cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
111 mrlErrChan := make(chan error)
114 Piece: make([]byte, defaultChunkSize),
118 err := cn.mainReadLoop()
124 wb := msg.MustMarshalBinary()
125 b.SetBytes(int64(len(msg.Piece)))
128 for i := 0; i < b.N; i += 1 {
130 // The chunk must be written to storage everytime, to ensure the
131 // writeSem is unlocked.
132 t.pendAllChunkSpecs(0)
133 cn.validReceiveChunks = map[RequestIndex]int{
134 t.requestIndexFromRequest(newRequestFromMessage(&msg)): 1,
137 n, err := w.Write(wb)
138 require.NoError(b, err)
139 require.EqualValues(b, len(wb), n)
142 if err := w.Close(); err != nil {
146 mrlErr := <-mrlErrChan
147 if mrlErr != nil && !errors.Is(mrlErr, io.EOF) {
150 c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
153 func TestConnPexPeerFlags(t *testing.T) {
155 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
156 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
158 testcases := []struct {
162 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
163 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
164 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
165 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
166 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
167 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
168 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
169 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
171 for i, tc := range testcases {
172 f := tc.conn.pexPeerFlags()
173 require.EqualValues(t, tc.f, f, i)
177 func TestConnPexEvent(t *testing.T) {
179 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
180 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
181 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
182 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
184 testcases := []struct {
191 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
192 pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp, nil},
196 &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
197 pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn, nil},
201 &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
202 pexEvent{pexAdd, dialTcpAddr, 0, nil},
206 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
207 pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp, nil},
210 for i, tc := range testcases {
211 e := tc.c.pexEvent(tc.t)
212 require.EqualValues(t, tc.e, e, i)
216 func TestHaveAllThenBitfield(t *testing.T) {
218 cl := newTestingClient(t)
219 tt := cl.newTorrentForTesting()
220 // cl.newConnection()
224 pc.initRequestState()
226 tt.conns[&pc] = struct{}{}
227 c.Assert(pc.onPeerSentHaveAll(), qt.IsNil)
228 c.Check(pc.t.connsWithAllPieces, qt.DeepEquals, map[*Peer]struct{}{&pc.Peer: {}})
229 pc.peerSentBitfield([]bool{false, false, true, false, true, true, false, false})
230 c.Check(pc.peerMinPieces, qt.Equals, 6)
231 c.Check(pc.t.connsWithAllPieces, qt.HasLen, 0)
232 c.Assert(pc.t.setInfo(&metainfo.Info{
234 Pieces: make([]byte, pieceHash.Size()*7),
237 c.Check(tt.numPieces(), qt.Equals, 7)
238 c.Check(tt.pieceAvailabilityRuns(), qt.DeepEquals, []pieceAvailabilityRun{
239 // The last element of the bitfield is irrelevant, as the Torrent actually only has 7
241 {2, 0}, {1, 1}, {1, 0}, {2, 1}, {1, 0},
245 func TestApplyRequestStateWriteBufferConstraints(t *testing.T) {
247 c.Check(interestedMsgLen, qt.Equals, 5)
248 c.Check(requestMsgLen, qt.Equals, 17)
249 c.Check(maxLocalToRemoteRequests >= 8, qt.IsTrue)
250 c.Logf("max local to remote requests: %v", maxLocalToRemoteRequests)
253 func peerConnForPreferredNetworkDirection(localPeerId, remotePeerId int, outgoing, utp, ipv6 bool) *PeerConn {
255 pc.outgoing = outgoing
260 pc.RemoteAddr = &net.TCPAddr{IP: net.ParseIP(fmt.Sprintf("::420"))}
262 pc.RemoteAddr = &net.TCPAddr{IP: net.IPv4(1, 2, 3, 4)}
264 binary.BigEndian.PutUint64(pc.PeerID[:], uint64(remotePeerId))
266 binary.BigEndian.PutUint64(cl.peerID[:], uint64(localPeerId))
267 pc.t = &Torrent{cl: &cl}
271 func TestPreferredNetworkDirection(t *testing.T) {
272 pc := peerConnForPreferredNetworkDirection
274 // Prefer outgoing to higher peer ID
275 c.Assert(pc(1, 2, true, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)), qt.IsTrue)
276 c.Assert(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, true, false, false)), qt.IsFalse)
277 c.Assert(pc(2, 1, false, false, false).hasPreferredNetworkOver(pc(2, 1, true, false, false)), qt.IsTrue)
279 c.Assert(pc(1, 2, false, true, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)), qt.IsFalse)
281 c.Assert(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, true)), qt.IsFalse)
283 c.Assert(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false)), qt.IsFalse)