10 "github.com/anacrolix/missinggo/pubsub"
11 "github.com/bradfitz/iter"
12 "github.com/frankban/quicktest"
13 "github.com/stretchr/testify/require"
15 "github.com/anacrolix/torrent/metainfo"
16 pp "github.com/anacrolix/torrent/peer_protocol"
17 "github.com/anacrolix/torrent/storage"
20 // Ensure that no race exists between sending a bitfield, and a subsequent
21 // Have that would potentially alter it.
22 func TestSendBitfieldThenHave(t *testing.T) {
24 config: TestingConfig(),
27 c := cl.newConnection(nil, false, nil, "io.Pipe", "")
28 c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
29 c.t.setInfo(&metainfo.Info{
30 Pieces: make([]byte, metainfo.HashSize*3),
35 go c.writer(time.Minute)
37 c.t._completedPieces.Add(1)
38 c.postBitfield( /*[]bool{false, true, false}*/ )
44 n, err := io.ReadFull(r, b)
46 // This will cause connection.writer to terminate.
49 require.NoError(t, err)
50 require.EqualValues(t, 15, n)
51 // Here we see that the bitfield doesn't have piece 2 set, as that should
52 // arrive in the following Have message.
53 require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
56 type torrentStorage struct {
60 func (me *torrentStorage) Close() error { return nil }
62 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
66 func (me *torrentStorage) Completion() storage.Completion {
67 return storage.Completion{}
70 func (me *torrentStorage) MarkComplete() error {
74 func (me *torrentStorage) MarkNotComplete() error {
78 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
79 panic("shouldn't be called")
82 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
83 if len(b) != defaultChunkSize {
90 func BenchmarkConnectionMainReadLoop(b *testing.B) {
93 config: &ClientConfig{
94 DownloadRateLimiter: unlimited,
98 ts := &torrentStorage{}
101 storage: &storage.Torrent{TorrentImpl: ts},
102 pieceStateChanges: pubsub.NewPubSub(),
104 require.NoError(b, t.setInfo(&metainfo.Info{
105 Pieces: make([]byte, 20),
107 PieceLength: 1 << 20,
109 t.setChunkSize(defaultChunkSize)
110 t._pendingPieces.Set(0, PiecePriorityNormal.BitmapPriority())
112 cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
114 mrlErr := make(chan error)
117 Piece: make([]byte, defaultChunkSize),
121 err := cn.mainReadLoop()
127 wb := msg.MustMarshalBinary()
128 b.SetBytes(int64(len(msg.Piece)))
132 for range iter.N(b.N) {
134 // The chunk must be written to storage everytime, to ensure the
135 // writeSem is unlocked.
136 t.pieces[0]._dirtyChunks.Clear()
137 cn.validReceiveChunks = map[request]int{newRequestFromMessage(&msg): 1}
139 n, err := w.Write(wb)
140 require.NoError(b, err)
141 require.EqualValues(b, len(wb), n)
145 c.Assert([]error{nil, io.EOF}, quicktest.Contains, <-mrlErr)
146 c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
149 func TestConnPexPeerFlags(t *testing.T) {
151 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
152 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
154 var testcases = []struct {
158 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
159 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
160 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
161 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
162 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
163 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
164 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
165 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
167 for i, tc := range testcases {
168 f := tc.conn.pexPeerFlags()
169 require.EqualValues(t, tc.f, f, i)
173 func TestConnPexEvent(t *testing.T) {
175 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
176 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
177 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
178 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
180 var testcases = []struct {
187 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
188 pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
192 &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
193 pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
197 &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
198 pexEvent{pexAdd, dialTcpAddr, 0},
202 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
203 pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
206 for i, tc := range testcases {
207 e := tc.c.pexEvent(tc.t)
208 require.EqualValues(t, tc.e, e, i)