]> Sergey Matveev's repositories - btrtrc.git/blob - peerconn_test.go
Merge branch 'webtorrent'
[btrtrc.git] / peerconn_test.go
1 package torrent
2
3 import (
4         "io"
5         "net"
6         "sync"
7         "testing"
8         "time"
9
10         "github.com/anacrolix/missinggo/pubsub"
11         "github.com/bradfitz/iter"
12         "github.com/stretchr/testify/require"
13
14         "github.com/anacrolix/torrent/metainfo"
15         pp "github.com/anacrolix/torrent/peer_protocol"
16         "github.com/anacrolix/torrent/storage"
17 )
18
19 // Ensure that no race exists between sending a bitfield, and a subsequent
20 // Have that would potentially alter it.
21 func TestSendBitfieldThenHave(t *testing.T) {
22         cl := Client{
23                 config: TestingConfig(),
24         }
25         cl.initLogger()
26         c := cl.newConnection(nil, false, nil, "", "")
27         c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
28         c.t.setInfo(&metainfo.Info{
29                 Pieces: make([]byte, metainfo.HashSize*3),
30         })
31         r, w := io.Pipe()
32         c.r = r
33         c.w = w
34         go c.writer(time.Minute)
35         c.locker().Lock()
36         c.t._completedPieces.Add(1)
37         c.postBitfield( /*[]bool{false, true, false}*/ )
38         c.locker().Unlock()
39         c.locker().Lock()
40         c.have(2)
41         c.locker().Unlock()
42         b := make([]byte, 15)
43         n, err := io.ReadFull(r, b)
44         c.locker().Lock()
45         // This will cause connection.writer to terminate.
46         c.closed.Set()
47         c.locker().Unlock()
48         require.NoError(t, err)
49         require.EqualValues(t, 15, n)
50         // Here we see that the bitfield doesn't have piece 2 set, as that should
51         // arrive in the following Have message.
52         require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
53 }
54
55 type torrentStorage struct {
56         writeSem sync.Mutex
57 }
58
59 func (me *torrentStorage) Close() error { return nil }
60
61 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
62         return me
63 }
64
65 func (me *torrentStorage) Completion() storage.Completion {
66         return storage.Completion{}
67 }
68
69 func (me *torrentStorage) MarkComplete() error {
70         return nil
71 }
72
73 func (me *torrentStorage) MarkNotComplete() error {
74         return nil
75 }
76
77 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
78         panic("shouldn't be called")
79 }
80
81 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
82         if len(b) != defaultChunkSize {
83                 panic(len(b))
84         }
85         me.writeSem.Unlock()
86         return len(b), nil
87 }
88
89 func BenchmarkConnectionMainReadLoop(b *testing.B) {
90         cl := &Client{
91                 config: &ClientConfig{
92                         DownloadRateLimiter: unlimited,
93                 },
94         }
95         cl.initLogger()
96         ts := &torrentStorage{}
97         t := &Torrent{
98                 cl:                cl,
99                 storage:           &storage.Torrent{TorrentImpl: ts},
100                 pieceStateChanges: pubsub.NewPubSub(),
101         }
102         require.NoError(b, t.setInfo(&metainfo.Info{
103                 Pieces:      make([]byte, 20),
104                 Length:      1 << 20,
105                 PieceLength: 1 << 20,
106         }))
107         t.setChunkSize(defaultChunkSize)
108         t._pendingPieces.Set(0, PiecePriorityNormal.BitmapPriority())
109         r, w := net.Pipe()
110         cn := cl.newConnection(r, true, nil, "", "")
111         cn.setTorrent(t)
112         mrlErr := make(chan error)
113         msg := pp.Message{
114                 Type:  pp.Piece,
115                 Piece: make([]byte, defaultChunkSize),
116         }
117         go func() {
118                 cl.lock()
119                 err := cn.mainReadLoop()
120                 if err != nil {
121                         mrlErr <- err
122                 }
123                 close(mrlErr)
124         }()
125         wb := msg.MustMarshalBinary()
126         b.SetBytes(int64(len(msg.Piece)))
127         go func() {
128                 defer w.Close()
129                 ts.writeSem.Lock()
130                 for range iter.N(b.N) {
131                         cl.lock()
132                         // The chunk must be written to storage everytime, to ensure the
133                         // writeSem is unlocked.
134                         t.pieces[0]._dirtyChunks.Clear()
135                         cn.validReceiveChunks = map[request]struct{}{newRequestFromMessage(&msg): {}}
136                         cl.unlock()
137                         n, err := w.Write(wb)
138                         require.NoError(b, err)
139                         require.EqualValues(b, len(wb), n)
140                         ts.writeSem.Lock()
141                 }
142         }()
143         require.NoError(b, <-mrlErr)
144         require.EqualValues(b, b.N, cn._stats.ChunksReadUseful.Int64())
145 }
146
147 func TestConnPexPeerFlags(t *testing.T) {
148         var (
149                 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
150                 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
151         )
152         var testcases = []struct {
153                 conn *PeerConn
154                 f    pp.PexPeerFlags
155         }{
156                 {&PeerConn{outgoing: false, PeerPrefersEncryption: false}, 0},
157                 {&PeerConn{outgoing: false, PeerPrefersEncryption: true}, pp.PexPrefersEncryption},
158                 {&PeerConn{outgoing: true, PeerPrefersEncryption: false}, pp.PexOutgoingConn},
159                 {&PeerConn{outgoing: true, PeerPrefersEncryption: true}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
160                 {&PeerConn{remoteAddr: udpAddr}, pp.PexSupportsUtp},
161                 {&PeerConn{remoteAddr: udpAddr, outgoing: true}, pp.PexOutgoingConn | pp.PexSupportsUtp},
162                 {&PeerConn{remoteAddr: tcpAddr, outgoing: true}, pp.PexOutgoingConn},
163                 {&PeerConn{remoteAddr: tcpAddr}, 0},
164         }
165         for i, tc := range testcases {
166                 f := tc.conn.pexPeerFlags()
167                 require.EqualValues(t, tc.f, f, i)
168         }
169 }
170
171 func TestConnPexEvent(t *testing.T) {
172         var (
173                 udpAddr     = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
174                 tcpAddr     = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
175                 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
176                 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
177         )
178         var testcases = []struct {
179                 t pexEventType
180                 c *PeerConn
181                 e pexEvent
182         }{
183                 {
184                         pexAdd,
185                         &PeerConn{remoteAddr: udpAddr},
186                         pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
187                 },
188                 {
189                         pexDrop,
190                         &PeerConn{remoteAddr: tcpAddr, outgoing: true, PeerListenPort: dialTcpAddr.Port},
191                         pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
192                 },
193                 {
194                         pexAdd,
195                         &PeerConn{remoteAddr: tcpAddr, PeerListenPort: dialTcpAddr.Port},
196                         pexEvent{pexAdd, dialTcpAddr, 0},
197                 },
198                 {
199                         pexDrop,
200                         &PeerConn{remoteAddr: udpAddr, PeerListenPort: dialUdpAddr.Port},
201                         pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
202                 },
203         }
204         for i, tc := range testcases {
205                 e := tc.c.pexEvent(tc.t)
206                 require.EqualValues(t, tc.e, e, i)
207         }
208 }