]> Sergey Matveev's repositories - btrtrc.git/blob - peerconn_test.go
Track dirty chunks in a single bitmap on Torrent
[btrtrc.git] / peerconn_test.go
1 package torrent
2
3 import (
4         "io"
5         "net"
6         "sync"
7         "testing"
8
9         "github.com/anacrolix/missinggo/pubsub"
10         "github.com/frankban/quicktest"
11         "github.com/stretchr/testify/require"
12
13         "github.com/anacrolix/torrent/metainfo"
14         pp "github.com/anacrolix/torrent/peer_protocol"
15         "github.com/anacrolix/torrent/storage"
16 )
17
18 // Ensure that no race exists between sending a bitfield, and a subsequent
19 // Have that would potentially alter it.
20 func TestSendBitfieldThenHave(t *testing.T) {
21         var cl Client
22         cl.init(TestingConfig(t))
23         cl.initLogger()
24         c := cl.newConnection(nil, false, nil, "io.Pipe", "")
25         c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
26         if err := c.t.setInfo(&metainfo.Info{Pieces: make([]byte, metainfo.HashSize*3)}); err != nil {
27                 t.Log(err)
28         }
29         r, w := io.Pipe()
30         //c.r = r
31         c.w = w
32         c.startWriter()
33         c.locker().Lock()
34         c.t._completedPieces.Add(1)
35         c.postBitfield( /*[]bool{false, true, false}*/ )
36         c.locker().Unlock()
37         c.locker().Lock()
38         c.have(2)
39         c.locker().Unlock()
40         b := make([]byte, 15)
41         n, err := io.ReadFull(r, b)
42         c.locker().Lock()
43         // This will cause connection.writer to terminate.
44         c.closed.Set()
45         c.locker().Unlock()
46         require.NoError(t, err)
47         require.EqualValues(t, 15, n)
48         // Here we see that the bitfield doesn't have piece 2 set, as that should
49         // arrive in the following Have message.
50         require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
51 }
52
53 type torrentStorage struct {
54         writeSem sync.Mutex
55 }
56
57 func (me *torrentStorage) Close() error { return nil }
58
59 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
60         return me
61 }
62
63 func (me *torrentStorage) Completion() storage.Completion {
64         return storage.Completion{}
65 }
66
67 func (me *torrentStorage) MarkComplete() error {
68         return nil
69 }
70
71 func (me *torrentStorage) MarkNotComplete() error {
72         return nil
73 }
74
75 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
76         panic("shouldn't be called")
77 }
78
79 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
80         if len(b) != defaultChunkSize {
81                 panic(len(b))
82         }
83         me.writeSem.Unlock()
84         return len(b), nil
85 }
86
87 func BenchmarkConnectionMainReadLoop(b *testing.B) {
88         c := quicktest.New(b)
89         var cl Client
90         cl.init(&ClientConfig{
91                 DownloadRateLimiter: unlimited,
92         })
93         cl.initLogger()
94         ts := &torrentStorage{}
95         t := &Torrent{
96                 cl:                &cl,
97                 storage:           &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}},
98                 pieceStateChanges: pubsub.NewPubSub(),
99         }
100         require.NoError(b, t.setInfo(&metainfo.Info{
101                 Pieces:      make([]byte, 20),
102                 Length:      1 << 20,
103                 PieceLength: 1 << 20,
104         }))
105         t.setChunkSize(defaultChunkSize)
106         t._pendingPieces.Set(0, PiecePriorityNormal.BitmapPriority())
107         r, w := net.Pipe()
108         cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
109         cn.setTorrent(t)
110         mrlErr := make(chan error)
111         msg := pp.Message{
112                 Type:  pp.Piece,
113                 Piece: make([]byte, defaultChunkSize),
114         }
115         go func() {
116                 cl.lock()
117                 err := cn.mainReadLoop()
118                 if err != nil {
119                         mrlErr <- err
120                 }
121                 close(mrlErr)
122         }()
123         wb := msg.MustMarshalBinary()
124         b.SetBytes(int64(len(msg.Piece)))
125         go func() {
126                 ts.writeSem.Lock()
127                 for i := 0; i < b.N; i += 1 {
128                         cl.lock()
129                         // The chunk must be written to storage everytime, to ensure the
130                         // writeSem is unlocked.
131                         t.pendAllChunkSpecs(0)
132                         cn.validReceiveChunks = map[RequestIndex]int{
133                                 t.requestIndexFromRequest(newRequestFromMessage(&msg)): 1,
134                         }
135                         cl.unlock()
136                         n, err := w.Write(wb)
137                         require.NoError(b, err)
138                         require.EqualValues(b, len(wb), n)
139                         ts.writeSem.Lock()
140                 }
141                 if err := w.Close(); err != nil {
142                         panic(err)
143                 }
144         }()
145         c.Assert([]error{nil, io.EOF}, quicktest.Contains, <-mrlErr)
146         c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
147 }
148
149 func TestConnPexPeerFlags(t *testing.T) {
150         var (
151                 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
152                 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
153         )
154         var testcases = []struct {
155                 conn *PeerConn
156                 f    pp.PexPeerFlags
157         }{
158                 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
159                 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
160                 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
161                 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
162                 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
163                 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
164                 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
165                 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
166         }
167         for i, tc := range testcases {
168                 f := tc.conn.pexPeerFlags()
169                 require.EqualValues(t, tc.f, f, i)
170         }
171 }
172
173 func TestConnPexEvent(t *testing.T) {
174         var (
175                 udpAddr     = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
176                 tcpAddr     = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
177                 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
178                 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
179         )
180         var testcases = []struct {
181                 t pexEventType
182                 c *PeerConn
183                 e pexEvent
184         }{
185                 {
186                         pexAdd,
187                         &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
188                         pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
189                 },
190                 {
191                         pexDrop,
192                         &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
193                         pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
194                 },
195                 {
196                         pexAdd,
197                         &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
198                         pexEvent{pexAdd, dialTcpAddr, 0},
199                 },
200                 {
201                         pexDrop,
202                         &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
203                         pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
204                 },
205         }
206         for i, tc := range testcases {
207                 e := tc.c.pexEvent(tc.t)
208                 require.EqualValues(t, tc.e, e, i)
209         }
210 }