]> Sergey Matveev's repositories - btrtrc.git/blob - torrent_test.go
Drop support for go 1.20
[btrtrc.git] / torrent_test.go
1 package torrent
2
3 import (
4         "fmt"
5         "io"
6         "net"
7         "os"
8         "path/filepath"
9         "sync"
10         "testing"
11
12         g "github.com/anacrolix/generics"
13         "github.com/anacrolix/log"
14         "github.com/anacrolix/missinggo/v2"
15         "github.com/anacrolix/missinggo/v2/bitmap"
16         qt "github.com/frankban/quicktest"
17         "github.com/stretchr/testify/assert"
18         "github.com/stretchr/testify/require"
19
20         "github.com/anacrolix/torrent/bencode"
21         "github.com/anacrolix/torrent/internal/testutil"
22         "github.com/anacrolix/torrent/metainfo"
23         pp "github.com/anacrolix/torrent/peer_protocol"
24         "github.com/anacrolix/torrent/storage"
25 )
26
27 func r(i, b, l pp.Integer) Request {
28         return Request{i, ChunkSpec{b, l}}
29 }
30
31 // Check the given request is correct for various torrent offsets.
32 func TestTorrentRequest(t *testing.T) {
33         const s = 472183431 // Length of torrent.
34         for _, _case := range []struct {
35                 off int64   // An offset into the torrent.
36                 req Request // The expected request. The zero value means !ok.
37         }{
38                 // Invalid offset.
39                 {-1, Request{}},
40                 {0, r(0, 0, 16384)},
41                 // One before the end of a piece.
42                 {1<<18 - 1, r(0, 1<<18-16384, 16384)},
43                 // Offset beyond torrent length.
44                 {472 * 1 << 20, Request{}},
45                 // One before the end of the torrent. Complicates the chunk length.
46                 {s - 1, r((s-1)/(1<<18), (s-1)%(1<<18)/(16384)*(16384), 12935)},
47                 {1, r(0, 0, 16384)},
48                 // One before end of chunk.
49                 {16383, r(0, 0, 16384)},
50                 // Second chunk.
51                 {16384, r(0, 16384, 16384)},
52         } {
53                 req, ok := torrentOffsetRequest(472183431, 1<<18, 16384, _case.off)
54                 if (_case.req == Request{}) == ok {
55                         t.Fatalf("expected %v, got %v", _case.req, req)
56                 }
57                 if req != _case.req {
58                         t.Fatalf("expected %v, got %v", _case.req, req)
59                 }
60         }
61 }
62
63 func TestAppendToCopySlice(t *testing.T) {
64         orig := []int{1, 2, 3}
65         dupe := append([]int{}, orig...)
66         dupe[0] = 4
67         if orig[0] != 1 {
68                 t.FailNow()
69         }
70 }
71
72 func TestTorrentString(t *testing.T) {
73         tor := &Torrent{}
74         s := tor.InfoHash().HexString()
75         if s != "0000000000000000000000000000000000000000" {
76                 t.FailNow()
77         }
78 }
79
80 // This benchmark is from the observation that a lot of overlapping Readers on
81 // a large torrent with small pieces had a lot of overhead in recalculating
82 // piece priorities everytime a reader (possibly in another Torrent) changed.
83 func BenchmarkUpdatePiecePriorities(b *testing.B) {
84         const (
85                 numPieces   = 13410
86                 pieceLength = 256 << 10
87         )
88         cl := &Client{config: TestingConfig(b)}
89         cl.initLogger()
90         t := cl.newTorrent(metainfo.Hash{}, nil)
91         require.NoError(b, t.setInfo(&metainfo.Info{
92                 Pieces:      make([]byte, metainfo.HashSize*numPieces),
93                 PieceLength: pieceLength,
94                 Length:      pieceLength * numPieces,
95         }))
96         t.onSetInfo()
97         assert.EqualValues(b, 13410, t.numPieces())
98         for i := 0; i < 7; i += 1 {
99                 r := t.NewReader()
100                 r.SetReadahead(32 << 20)
101                 r.Seek(3500000, io.SeekStart)
102         }
103         assert.Len(b, t.readers, 7)
104         for i := 0; i < t.numPieces(); i += 3 {
105                 t._completedPieces.Add(bitmap.BitIndex(i))
106         }
107         t.DownloadPieces(0, t.numPieces())
108         for i := 0; i < b.N; i += 1 {
109                 t.updateAllPiecePriorities("")
110         }
111 }
112
113 // Check that a torrent containing zero-length file(s) will start, and that
114 // they're created in the filesystem. The client storage is assumed to be
115 // file-based on the native filesystem based.
116 func testEmptyFilesAndZeroPieceLength(t *testing.T, cfg *ClientConfig) {
117         cl, err := NewClient(cfg)
118         require.NoError(t, err)
119         defer cl.Close()
120         ib, err := bencode.Marshal(metainfo.Info{
121                 Name:        "empty",
122                 Length:      0,
123                 PieceLength: 0,
124         })
125         require.NoError(t, err)
126         fp := filepath.Join(cfg.DataDir, "empty")
127         os.Remove(fp)
128         assert.False(t, missinggo.FilePathExists(fp))
129         tt, err := cl.AddTorrent(&metainfo.MetaInfo{
130                 InfoBytes: ib,
131         })
132         require.NoError(t, err)
133         defer tt.Drop()
134         tt.DownloadAll()
135         require.True(t, cl.WaitAll())
136         assert.True(t, tt.Complete.Bool())
137         assert.True(t, missinggo.FilePathExists(fp))
138 }
139
140 func TestEmptyFilesAndZeroPieceLengthWithFileStorage(t *testing.T) {
141         cfg := TestingConfig(t)
142         ci := storage.NewFile(cfg.DataDir)
143         defer ci.Close()
144         cfg.DefaultStorage = ci
145         testEmptyFilesAndZeroPieceLength(t, cfg)
146 }
147
148 func TestPieceHashFailed(t *testing.T) {
149         mi := testutil.GreetingMetaInfo()
150         cl := newTestingClient(t)
151         tt := cl.newTorrent(mi.HashInfoBytes(), badStorage{})
152         tt.setChunkSize(2)
153         require.NoError(t, tt.setInfoBytesLocked(mi.InfoBytes))
154         tt.cl.lock()
155         tt.dirtyChunks.AddRange(
156                 uint64(tt.pieceRequestIndexOffset(1)),
157                 uint64(tt.pieceRequestIndexOffset(1)+3))
158         require.True(t, tt.pieceAllDirty(1))
159         tt.pieceHashed(1, false, nil)
160         // Dirty chunks should be cleared so we can try again.
161         require.False(t, tt.pieceAllDirty(1))
162         tt.cl.unlock()
163 }
164
165 // Check the behaviour of Torrent.Metainfo when metadata is not completed.
166 func TestTorrentMetainfoIncompleteMetadata(t *testing.T) {
167         cfg := TestingConfig(t)
168         cfg.Debug = true
169         // Disable this just because we manually initiate a connection without it.
170         cfg.MinPeerExtensions.SetBit(pp.ExtensionBitFast, false)
171         cl, err := NewClient(cfg)
172         require.NoError(t, err)
173         defer cl.Close()
174
175         mi := testutil.GreetingMetaInfo()
176         ih := mi.HashInfoBytes()
177
178         tt, _ := cl.AddTorrentInfoHash(ih)
179         assert.Nil(t, tt.Metainfo().InfoBytes)
180         assert.False(t, tt.haveAllMetadataPieces())
181
182         nc, err := net.Dial("tcp", fmt.Sprintf(":%d", cl.LocalPort()))
183         require.NoError(t, err)
184         defer nc.Close()
185
186         var pex PeerExtensionBits
187         pex.SetBit(pp.ExtensionBitLtep, true)
188         hr, err := pp.Handshake(nc, &ih, [20]byte{}, pex)
189         require.NoError(t, err)
190         assert.True(t, hr.PeerExtensionBits.GetBit(pp.ExtensionBitLtep))
191         assert.EqualValues(t, cl.PeerID(), hr.PeerID)
192         assert.EqualValues(t, ih, hr.Hash)
193
194         assert.EqualValues(t, 0, tt.metadataSize())
195
196         func() {
197                 cl.lock()
198                 defer cl.unlock()
199                 go func() {
200                         _, err = nc.Write(pp.Message{
201                                 Type:       pp.Extended,
202                                 ExtendedID: pp.HandshakeExtendedID,
203                                 ExtendedPayload: func() []byte {
204                                         d := map[string]interface{}{
205                                                 "metadata_size": len(mi.InfoBytes),
206                                         }
207                                         b, err := bencode.Marshal(d)
208                                         if err != nil {
209                                                 panic(err)
210                                         }
211                                         return b
212                                 }(),
213                         }.MustMarshalBinary())
214                         require.NoError(t, err)
215                 }()
216                 tt.metadataChanged.Wait()
217         }()
218         assert.Equal(t, make([]byte, len(mi.InfoBytes)), tt.metadataBytes)
219         assert.False(t, tt.haveAllMetadataPieces())
220         assert.Nil(t, tt.Metainfo().InfoBytes)
221 }
222
223 func TestRelativeAvailabilityHaveNone(t *testing.T) {
224         c := qt.New(t)
225         var err error
226         cl := Client{
227                 config: TestingConfig(t),
228         }
229         tt := Torrent{
230                 cl:           &cl,
231                 logger:       log.Default,
232                 gotMetainfoC: make(chan struct{}),
233         }
234         tt.setChunkSize(2)
235         g.MakeMapIfNil(&tt.conns)
236         pc := PeerConn{}
237         pc.t = &tt
238         pc.peerImpl = &pc
239         pc.initRequestState()
240         g.InitNew(&pc.callbacks)
241         tt.conns[&pc] = struct{}{}
242         err = pc.peerSentHave(0)
243         c.Assert(err, qt.IsNil)
244         info := testutil.Greeting.Info(5)
245         err = tt.setInfo(&info)
246         c.Assert(err, qt.IsNil)
247         tt.onSetInfo()
248         err = pc.peerSentHaveNone()
249         c.Assert(err, qt.IsNil)
250         var wg sync.WaitGroup
251         tt.close(&wg)
252         tt.assertAllPiecesRelativeAvailabilityZero()
253 }