9 "github.com/anacrolix/missinggo/pubsub"
10 "github.com/bradfitz/iter"
11 "github.com/frankban/quicktest"
12 "github.com/stretchr/testify/require"
14 "github.com/anacrolix/torrent/metainfo"
15 pp "github.com/anacrolix/torrent/peer_protocol"
16 "github.com/anacrolix/torrent/storage"
19 // Ensure that no race exists between sending a bitfield, and a subsequent
20 // Have that would potentially alter it.
21 func TestSendBitfieldThenHave(t *testing.T) {
23 config: TestingConfig(t),
26 c := cl.newConnection(nil, false, nil, "io.Pipe", "")
27 c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
28 c.t.setInfo(&metainfo.Info{
29 Pieces: make([]byte, metainfo.HashSize*3),
36 c.t._completedPieces.Add(1)
37 c.postBitfield( /*[]bool{false, true, false}*/ )
43 n, err := io.ReadFull(r, b)
45 // This will cause connection.writer to terminate.
48 require.NoError(t, err)
49 require.EqualValues(t, 15, n)
50 // Here we see that the bitfield doesn't have piece 2 set, as that should
51 // arrive in the following Have message.
52 require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
55 type torrentStorage struct {
59 func (me *torrentStorage) Close() error { return nil }
61 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
65 func (me *torrentStorage) Completion() storage.Completion {
66 return storage.Completion{}
69 func (me *torrentStorage) MarkComplete() error {
73 func (me *torrentStorage) MarkNotComplete() error {
77 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
78 panic("shouldn't be called")
81 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
82 if len(b) != defaultChunkSize {
89 func BenchmarkConnectionMainReadLoop(b *testing.B) {
92 config: &ClientConfig{
93 DownloadRateLimiter: unlimited,
97 ts := &torrentStorage{}
100 storage: &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}},
101 pieceStateChanges: pubsub.NewPubSub(),
103 require.NoError(b, t.setInfo(&metainfo.Info{
104 Pieces: make([]byte, 20),
106 PieceLength: 1 << 20,
108 t.setChunkSize(defaultChunkSize)
109 t._pendingPieces.Set(0, PiecePriorityNormal.BitmapPriority())
111 cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
113 mrlErr := make(chan error)
116 Piece: make([]byte, defaultChunkSize),
120 err := cn.mainReadLoop()
126 wb := msg.MustMarshalBinary()
127 b.SetBytes(int64(len(msg.Piece)))
131 for range iter.N(b.N) {
133 // The chunk must be written to storage everytime, to ensure the
134 // writeSem is unlocked.
135 t.pieces[0]._dirtyChunks.Clear()
136 cn.validReceiveChunks = map[Request]int{newRequestFromMessage(&msg): 1}
138 n, err := w.Write(wb)
139 require.NoError(b, err)
140 require.EqualValues(b, len(wb), n)
144 c.Assert([]error{nil, io.EOF}, quicktest.Contains, <-mrlErr)
145 c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
148 func TestConnPexPeerFlags(t *testing.T) {
150 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
151 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
153 var testcases = []struct {
157 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
158 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
159 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
160 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
161 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
162 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
163 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
164 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
166 for i, tc := range testcases {
167 f := tc.conn.pexPeerFlags()
168 require.EqualValues(t, tc.f, f, i)
172 func TestConnPexEvent(t *testing.T) {
174 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
175 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
176 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
177 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
179 var testcases = []struct {
186 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
187 pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
191 &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
192 pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
196 &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
197 pexEvent{pexAdd, dialTcpAddr, 0},
201 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
202 pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
205 for i, tc := range testcases {
206 e := tc.c.pexEvent(tc.t)
207 require.EqualValues(t, tc.e, e, i)