]> Sergey Matveev's repositories - btrtrc.git/blob - torrent_test.go
cmd/btrtrc client
[btrtrc.git] / torrent_test.go
1 package torrent
2
3 import (
4         "fmt"
5         "io"
6         "net"
7         "os"
8         "path/filepath"
9         "sync"
10         "testing"
11
12         g "github.com/anacrolix/generics"
13         "github.com/anacrolix/log"
14         "github.com/anacrolix/missinggo/v2"
15         "github.com/anacrolix/missinggo/v2/bitmap"
16         qt "github.com/frankban/quicktest"
17         "github.com/stretchr/testify/assert"
18         "github.com/stretchr/testify/require"
19
20         "github.com/anacrolix/torrent/bencode"
21         "github.com/anacrolix/torrent/internal/testutil"
22         "github.com/anacrolix/torrent/metainfo"
23         pp "github.com/anacrolix/torrent/peer_protocol"
24         "github.com/anacrolix/torrent/storage"
25 )
26
27 func r(i, b, l pp.Integer) Request {
28         return Request{i, ChunkSpec{b, l}}
29 }
30
31 // Check the given request is correct for various torrent offsets.
32 func TestTorrentRequest(t *testing.T) {
33         const s = 472183431 // Length of torrent.
34         for _, _case := range []struct {
35                 off int64   // An offset into the torrent.
36                 req Request // The expected request. The zero value means !ok.
37         }{
38                 // Invalid offset.
39                 {-1, Request{}},
40                 {0, r(0, 0, 16384)},
41                 // One before the end of a piece.
42                 {1<<18 - 1, r(0, 1<<18-16384, 16384)},
43                 // Offset beyond torrent length.
44                 {472 * 1 << 20, Request{}},
45                 // One before the end of the torrent. Complicates the chunk length.
46                 {s - 1, r((s-1)/(1<<18), (s-1)%(1<<18)/(16384)*(16384), 12935)},
47                 {1, r(0, 0, 16384)},
48                 // One before end of chunk.
49                 {16383, r(0, 0, 16384)},
50                 // Second chunk.
51                 {16384, r(0, 16384, 16384)},
52         } {
53                 req, ok := torrentOffsetRequest(472183431, 1<<18, 16384, _case.off)
54                 if (_case.req == Request{}) == ok {
55                         t.Fatalf("expected %v, got %v", _case.req, req)
56                 }
57                 if req != _case.req {
58                         t.Fatalf("expected %v, got %v", _case.req, req)
59                 }
60         }
61 }
62
63 func TestAppendToCopySlice(t *testing.T) {
64         orig := []int{1, 2, 3}
65         dupe := append([]int{}, orig...)
66         dupe[0] = 4
67         if orig[0] != 1 {
68                 t.FailNow()
69         }
70 }
71
72 func TestTorrentString(t *testing.T) {
73         tor := &Torrent{}
74         tor.infoHash.Ok = true
75         tor.infoHash.Value[0] = 1
76         s := tor.InfoHash().HexString()
77         if s != "0100000000000000000000000000000000000000" {
78                 t.FailNow()
79         }
80 }
81
82 // This benchmark is from the observation that a lot of overlapping Readers on
83 // a large torrent with small pieces had a lot of overhead in recalculating
84 // piece priorities everytime a reader (possibly in another Torrent) changed.
85 func BenchmarkUpdatePiecePriorities(b *testing.B) {
86         const (
87                 numPieces   = 13410
88                 pieceLength = 256 << 10
89         )
90         cl := &Client{config: TestingConfig(b)}
91         cl.initLogger()
92         t := cl.newTorrentForTesting()
93         require.NoError(b, t.setInfo(&metainfo.Info{
94                 Pieces:      make([]byte, metainfo.HashSize*numPieces),
95                 PieceLength: pieceLength,
96                 Length:      pieceLength * numPieces,
97         }))
98         t.onSetInfo()
99         assert.EqualValues(b, 13410, t.numPieces())
100         for i := 0; i < 7; i += 1 {
101                 r := t.NewReader()
102                 r.SetReadahead(32 << 20)
103                 r.Seek(3500000, io.SeekStart)
104         }
105         assert.Len(b, t.readers, 7)
106         for i := 0; i < t.numPieces(); i += 3 {
107                 t._completedPieces.Add(bitmap.BitIndex(i))
108         }
109         t.DownloadPieces(0, t.numPieces())
110         for i := 0; i < b.N; i += 1 {
111                 t.updateAllPiecePriorities("")
112         }
113 }
114
115 // Check that a torrent containing zero-length file(s) will start, and that
116 // they're created in the filesystem. The client storage is assumed to be
117 // file-based on the native filesystem based.
118 func testEmptyFilesAndZeroPieceLength(t *testing.T, cfg *ClientConfig) {
119         cl, err := NewClient(cfg)
120         require.NoError(t, err)
121         defer cl.Close()
122         ib, err := bencode.Marshal(metainfo.Info{
123                 Name:        "empty",
124                 Length:      0,
125                 PieceLength: 0,
126         })
127         require.NoError(t, err)
128         fp := filepath.Join(cfg.DataDir, "empty")
129         os.Remove(fp)
130         assert.False(t, missinggo.FilePathExists(fp))
131         tt, err := cl.AddTorrent(&metainfo.MetaInfo{
132                 InfoBytes: ib,
133         })
134         require.NoError(t, err)
135         defer tt.Drop()
136         tt.DownloadAll()
137         require.True(t, cl.WaitAll())
138         assert.True(t, tt.Complete.Bool())
139         assert.True(t, missinggo.FilePathExists(fp))
140 }
141
142 func TestEmptyFilesAndZeroPieceLengthWithFileStorage(t *testing.T) {
143         cfg := TestingConfig(t)
144         ci := storage.NewFile(cfg.DataDir)
145         defer ci.Close()
146         cfg.DefaultStorage = ci
147         testEmptyFilesAndZeroPieceLength(t, cfg)
148 }
149
150 func TestPieceHashFailed(t *testing.T) {
151         mi := testutil.GreetingMetaInfo()
152         cl := newTestingClient(t)
153         tt := cl.newTorrent(mi.HashInfoBytes(), badStorage{})
154         tt.setChunkSize(2)
155         require.NoError(t, tt.setInfoBytesLocked(mi.InfoBytes))
156         tt.cl.lock()
157         tt.dirtyChunks.AddRange(
158                 uint64(tt.pieceRequestIndexOffset(1)),
159                 uint64(tt.pieceRequestIndexOffset(1)+3))
160         require.True(t, tt.pieceAllDirty(1))
161         tt.pieceHashed(1, false, nil)
162         // Dirty chunks should be cleared so we can try again.
163         require.False(t, tt.pieceAllDirty(1))
164         tt.cl.unlock()
165 }
166
167 // Check the behaviour of Torrent.Metainfo when metadata is not completed.
168 func TestTorrentMetainfoIncompleteMetadata(t *testing.T) {
169         cfg := TestingConfig(t)
170         cfg.Debug = true
171         // Disable this just because we manually initiate a connection without it.
172         cfg.MinPeerExtensions.SetBit(pp.ExtensionBitFast, false)
173         cl, err := NewClient(cfg)
174         require.NoError(t, err)
175         defer cl.Close()
176
177         mi := testutil.GreetingMetaInfo()
178         ih := mi.HashInfoBytes()
179
180         tt, _ := cl.AddTorrentInfoHash(ih)
181         assert.Nil(t, tt.Metainfo().InfoBytes)
182         assert.False(t, tt.haveAllMetadataPieces())
183
184         nc, err := net.Dial("tcp", fmt.Sprintf(":%d", cl.LocalPort()))
185         require.NoError(t, err)
186         defer nc.Close()
187
188         var pex PeerExtensionBits
189         pex.SetBit(pp.ExtensionBitLtep, true)
190         hr, err := pp.Handshake(nc, &ih, [20]byte{}, pex)
191         require.NoError(t, err)
192         assert.True(t, hr.PeerExtensionBits.GetBit(pp.ExtensionBitLtep))
193         assert.EqualValues(t, cl.PeerID(), hr.PeerID)
194         assert.EqualValues(t, ih, hr.Hash)
195
196         assert.EqualValues(t, 0, tt.metadataSize())
197
198         func() {
199                 cl.lock()
200                 defer cl.unlock()
201                 go func() {
202                         _, err = nc.Write(pp.Message{
203                                 Type:       pp.Extended,
204                                 ExtendedID: pp.HandshakeExtendedID,
205                                 ExtendedPayload: func() []byte {
206                                         d := map[string]interface{}{
207                                                 "metadata_size": len(mi.InfoBytes),
208                                         }
209                                         b, err := bencode.Marshal(d)
210                                         if err != nil {
211                                                 panic(err)
212                                         }
213                                         return b
214                                 }(),
215                         }.MustMarshalBinary())
216                         require.NoError(t, err)
217                 }()
218                 tt.metadataChanged.Wait()
219         }()
220         assert.Equal(t, make([]byte, len(mi.InfoBytes)), tt.metadataBytes)
221         assert.False(t, tt.haveAllMetadataPieces())
222         assert.Nil(t, tt.Metainfo().InfoBytes)
223 }
224
225 func TestRelativeAvailabilityHaveNone(t *testing.T) {
226         c := qt.New(t)
227         var err error
228         cl := Client{
229                 config: TestingConfig(t),
230         }
231         tt := Torrent{
232                 cl:           &cl,
233                 logger:       log.Default,
234                 gotMetainfoC: make(chan struct{}),
235         }
236         tt.setChunkSize(2)
237         g.MakeMapIfNil(&tt.conns)
238         pc := PeerConn{}
239         pc.t = &tt
240         pc.peerImpl = &pc
241         pc.initRequestState()
242         g.InitNew(&pc.callbacks)
243         tt.conns[&pc] = struct{}{}
244         err = pc.peerSentHave(0)
245         c.Assert(err, qt.IsNil)
246         info := testutil.Greeting.Info(5)
247         err = tt.setInfo(&info)
248         c.Assert(err, qt.IsNil)
249         tt.onSetInfo()
250         err = pc.peerSentHaveNone()
251         c.Assert(err, qt.IsNil)
252         var wg sync.WaitGroup
253         tt.close(&wg)
254         tt.assertAllPiecesRelativeAvailabilityZero()
255 }