]> Sergey Matveev's repositories - btrtrc.git/blob - peerconn_test.go
Create default constructor for Client (#567)
[btrtrc.git] / peerconn_test.go
1 package torrent
2
3 import (
4         "io"
5         "net"
6         "sync"
7         "testing"
8
9         "github.com/anacrolix/missinggo/pubsub"
10         "github.com/frankban/quicktest"
11         "github.com/stretchr/testify/require"
12
13         "github.com/anacrolix/torrent/metainfo"
14         pp "github.com/anacrolix/torrent/peer_protocol"
15         "github.com/anacrolix/torrent/storage"
16 )
17
18 // Ensure that no race exists between sending a bitfield, and a subsequent
19 // Have that would potentially alter it.
20 func TestSendBitfieldThenHave(t *testing.T) {
21         var cl Client
22         cl.init(TestingConfig(t))
23         cl.initLogger()
24         c := cl.newConnection(nil, false, nil, "io.Pipe", "")
25         c.setTorrent(cl.newTorrent(metainfo.Hash{}, nil))
26         if err := c.t.setInfo(&metainfo.Info{ Pieces: make([]byte, metainfo.HashSize*3) }); err != nil {
27                 t.Log(err)
28         }
29         r, w := io.Pipe()
30         //c.r = r
31         c.w = w
32         c.startWriter()
33         c.locker().Lock()
34         c.t._completedPieces.Add(1)
35         c.postBitfield( /*[]bool{false, true, false}*/ )
36         c.locker().Unlock()
37         c.locker().Lock()
38         c.have(2)
39         c.locker().Unlock()
40         b := make([]byte, 15)
41         n, err := io.ReadFull(r, b)
42         c.locker().Lock()
43         // This will cause connection.writer to terminate.
44         c.closed.Set()
45         c.locker().Unlock()
46         require.NoError(t, err)
47         require.EqualValues(t, 15, n)
48         // Here we see that the bitfield doesn't have piece 2 set, as that should
49         // arrive in the following Have message.
50         require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b))
51 }
52
53 type torrentStorage struct {
54         writeSem sync.Mutex
55 }
56
57 func (me *torrentStorage) Close() error { return nil }
58
59 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl {
60         return me
61 }
62
63 func (me *torrentStorage) Completion() storage.Completion {
64         return storage.Completion{}
65 }
66
67 func (me *torrentStorage) MarkComplete() error {
68         return nil
69 }
70
71 func (me *torrentStorage) MarkNotComplete() error {
72         return nil
73 }
74
75 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) {
76         panic("shouldn't be called")
77 }
78
79 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) {
80         if len(b) != defaultChunkSize {
81                 panic(len(b))
82         }
83         me.writeSem.Unlock()
84         return len(b), nil
85 }
86
87 func BenchmarkConnectionMainReadLoop(b *testing.B) {
88         c := quicktest.New(b)
89         var cl Client
90         cl.init(&ClientConfig{
91                 DownloadRateLimiter: unlimited,
92         })
93         cl.initLogger()
94         ts := &torrentStorage{}
95         t := &Torrent{
96                 cl:                &cl,
97                 storage:           &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}},
98                 pieceStateChanges: pubsub.NewPubSub(),
99         }
100         require.NoError(b, t.setInfo(&metainfo.Info{
101                 Pieces:      make([]byte, 20),
102                 Length:      1 << 20,
103                 PieceLength: 1 << 20,
104         }))
105         t.setChunkSize(defaultChunkSize)
106         t._pendingPieces.Set(0, PiecePriorityNormal.BitmapPriority())
107         r, w := net.Pipe()
108         cn := cl.newConnection(r, true, r.RemoteAddr(), r.RemoteAddr().Network(), regularNetConnPeerConnConnString(r))
109         cn.setTorrent(t)
110         mrlErr := make(chan error)
111         msg := pp.Message{
112                 Type:  pp.Piece,
113                 Piece: make([]byte, defaultChunkSize),
114         }
115         go func() {
116                 cl.lock()
117                 err := cn.mainReadLoop()
118                 if err != nil {
119                         mrlErr <- err
120                 }
121                 close(mrlErr)
122         }()
123         wb := msg.MustMarshalBinary()
124         b.SetBytes(int64(len(msg.Piece)))
125         go func() {
126                 ts.writeSem.Lock()
127                 for i := 0; i < b.N; i += 1 {
128                         cl.lock()
129                         // The chunk must be written to storage everytime, to ensure the
130                         // writeSem is unlocked.
131                         t.pieces[0]._dirtyChunks.Clear()
132                         cn.validReceiveChunks = map[Request]int{newRequestFromMessage(&msg): 1}
133                         cl.unlock()
134                         n, err := w.Write(wb)
135                         require.NoError(b, err)
136                         require.EqualValues(b, len(wb), n)
137                         ts.writeSem.Lock()
138                 }
139                 if err := w.Close(); err != nil {
140                         panic(err)
141                 }
142         }()
143         c.Assert([]error{nil, io.EOF}, quicktest.Contains, <-mrlErr)
144         c.Assert(cn._stats.ChunksReadUseful.Int64(), quicktest.Equals, int64(b.N))
145 }
146
147 func TestConnPexPeerFlags(t *testing.T) {
148         var (
149                 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
150                 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
151         )
152         var testcases = []struct {
153                 conn *PeerConn
154                 f    pp.PexPeerFlags
155         }{
156                 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
157                 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
158                 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
159                 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
160                 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp},
161                 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
162                 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn},
163                 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0},
164         }
165         for i, tc := range testcases {
166                 f := tc.conn.pexPeerFlags()
167                 require.EqualValues(t, tc.f, f, i)
168         }
169 }
170
171 func TestConnPexEvent(t *testing.T) {
172         var (
173                 udpAddr     = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
174                 tcpAddr     = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
175                 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
176                 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
177         )
178         var testcases = []struct {
179                 t pexEventType
180                 c *PeerConn
181                 e pexEvent
182         }{
183                 {
184                         pexAdd,
185                         &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}},
186                         pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
187                 },
188                 {
189                         pexDrop,
190                         &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true, PeerListenPort: dialTcpAddr.Port}},
191                         pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
192                 },
193                 {
194                         pexAdd,
195                         &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), PeerListenPort: dialTcpAddr.Port}},
196                         pexEvent{pexAdd, dialTcpAddr, 0},
197                 },
198                 {
199                         pexDrop,
200                         &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), PeerListenPort: dialUdpAddr.Port}},
201                         pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
202                 },
203         }
204         for i, tc := range testcases {
205                 e := tc.c.pexEvent(tc.t)
206                 require.EqualValues(t, tc.e, e, i)
207         }
208 }