]> Sergey Matveev's repositories - btrtrc.git/blob - peerconn_test.go
Fix mainReadLoop benchmark
[btrtrc.git] / peerconn_test.go
1 package torrent
2
3 import (
4         "errors"
5         "io"
6         "net"
7         "sync"
8         "testing"
9
10         "github.com/anacrolix/missinggo/pubsub"
11         "github.com/frankban/quicktest"
12         "github.com/stretchr/testify/require"
13
14         "github.com/anacrolix/torrent/metainfo"
15         pp "github.com/anacrolix/torrent/peer_protocol"
16         "github.com/anacrolix/torrent/storage"
17 )
18
19 // Ensure that no race exists between sending a bitfield, and a subsequent
20 // Have that would potentially alter it.
21 func TestSendBitfieldThenHave(t *testing.T) {
22         var cl Client
23         cl.init(TestingConfig(t))
24         cl.initLogger()
25         c := cl.newConnection(nil, false, nil, "io.Pipe", "")
26         c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
27         if err := c.t.setInfo(&metainfo.Info{Pieces: make([]byte, metainfo.HashSize*3)}); err != nil {
28                 t.Log(err)
29         }
30         r, w := io.Pipe()
31         //c.r = r
32         c.w = w
33         c.startWriter()
34         c.locker().Lock()
35         c.t._completedPieces.Add(1)
36         c.postBitfield( /*[]bool{false, true, false}*/ )
37         c.locker().Unlock()
38         c.locker().Lock()
39         c.have(2)
40         c.locker().Unlock()
41         b := make([]byte, 15)
42         n, err := io.ReadFull(r, b)
43         c.locker().Lock()
44         // This will cause connection.writer to terminate.
45         c.closed.Set()
46         c.locker().Unlock()
47         require.NoError(t, err)
48         require.EqualValues(t, 15, n)
49         // Here we see that the bitfield doesn't have piece 2 set, as that should
50         // arrive in the following Have message.
51         require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
52 }
53
54 type torrentStorage struct {
55         writeSem sync.Mutex
56 }
57
58 func (me *torrentStorage) Close() error { return nil }
59
60 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
61         return me
62 }
63
64 func (me *torrentStorage) Completion() storage.Completion {
65         return storage.Completion{}
66 }
67
68 func (me *torrentStorage) MarkComplete() error {
69         return nil
70 }
71
72 func (me *torrentStorage) MarkNotComplete() error {
73         return nil
74 }
75
76 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
77         panic("shouldn't be called")
78 }
79
80 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
81         if len(b) != defaultChunkSize {
82                 panic(len(b))
83         }
84         me.writeSem.Unlock()
85         return len(b), nil
86 }
87
88 func BenchmarkConnectionMainReadLoop(b *testing.B) {
89         c := quicktest.New(b)
90         var cl Client
91         cl.init(&ClientConfig{
92                 DownloadRateLimiter: unlimited,
93         })
94         cl.initLogger()
95         ts := &torrentStorage{}
96         t := &Torrent{
97                 cl:                &cl,
98                 storage:           &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}},
99                 pieceStateChanges: pubsub.NewPubSub(),
100         }
101         require.NoError(b, t.setInfo(&metainfo.Info{
102                 Pieces:      make([]byte, 20),
103                 Length:      1 << 20,
104                 PieceLength: 1 << 20,
105         }))
106         t.setChunkSize(defaultChunkSize)
107         t._pendingPieces.Set(0, PiecePriorityNormal.BitmapPriority())
108         r, w := net.Pipe()
109         cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
110         cn.setTorrent(t)
111         mrlErrChan := make(chan error)
112         msg := pp.Message{
113                 Type:  pp.Piece,
114                 Piece: make([]byte, defaultChunkSize),
115         }
116         go func() {
117                 cl.lock()
118                 err := cn.mainReadLoop()
119                 if err != nil {
120                         mrlErrChan <- err
121                 }
122                 close(mrlErrChan)
123         }()
124         wb := msg.MustMarshalBinary()
125         b.SetBytes(int64(len(msg.Piece)))
126         go func() {
127                 ts.writeSem.Lock()
128                 for i := 0; i < b.N; i += 1 {
129                         cl.lock()
130                         // The chunk must be written to storage everytime, to ensure the
131                         // writeSem is unlocked.
132                         t.pendAllChunkSpecs(0)
133                         cn.validReceiveChunks = map[RequestIndex]int{
134                                 t.requestIndexFromRequest(newRequestFromMessage(&msg)): 1,
135                         }
136                         cl.unlock()
137                         n, err := w.Write(wb)
138                         require.NoError(b, err)
139                         require.EqualValues(b, len(wb), n)
140                         ts.writeSem.Lock()
141                 }
142                 if err := w.Close(); err != nil {
143                         panic(err)
144                 }
145         }()
146         mrlErr := <-mrlErrChan
147         if mrlErr != nil && !errors.Is(mrlErr, io.EOF) {
148                 c.Fatal(mrlErr)
149         }
150         c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
151 }
152
153 func TestConnPexPeerFlags(t *testing.T) {
154         var (
155                 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
156                 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
157         )
158         var testcases = []struct {
159                 conn *PeerConn
160                 f    pp.PexPeerFlags
161         }{
162                 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
163                 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
164                 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
165                 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
166                 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
167                 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
168                 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
169                 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
170         }
171         for i, tc := range testcases {
172                 f := tc.conn.pexPeerFlags()
173                 require.EqualValues(t, tc.f, f, i)
174         }
175 }
176
177 func TestConnPexEvent(t *testing.T) {
178         var (
179                 udpAddr     = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
180                 tcpAddr     = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
181                 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
182                 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
183         )
184         var testcases = []struct {
185                 t pexEventType
186                 c *PeerConn
187                 e pexEvent
188         }{
189                 {
190                         pexAdd,
191                         &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
192                         pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
193                 },
194                 {
195                         pexDrop,
196                         &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
197                         pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
198                 },
199                 {
200                         pexAdd,
201                         &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
202                         pexEvent{pexAdd, dialTcpAddr, 0},
203                 },
204                 {
205                         pexDrop,
206                         &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
207                         pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
208                 },
209         }
210         for i, tc := range testcases {
211                 e := tc.c.pexEvent(tc.t)
212                 require.EqualValues(t, tc.e, e, i)
213         }
214 }