]> Sergey Matveev's repositories - btrtrc.git/blob - request-strategy-impls_test.go
cmd/btrtrc client
[btrtrc.git] / request-strategy-impls_test.go
1 package torrent
2
3 import (
4         "io"
5         "runtime"
6         "testing"
7
8         g "github.com/anacrolix/generics"
9         "github.com/anacrolix/missinggo/v2/iter"
10         "github.com/davecgh/go-spew/spew"
11         qt "github.com/frankban/quicktest"
12
13         "github.com/anacrolix/torrent/metainfo"
14         request_strategy "github.com/anacrolix/torrent/request-strategy"
15         "github.com/anacrolix/torrent/storage"
16         infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
17 )
18
19 func makeRequestStrategyPiece(t request_strategy.Torrent) request_strategy.Piece {
20         return t.Piece(0)
21 }
22
23 func TestRequestStrategyPieceDoesntAlloc(t *testing.T) {
24         c := qt.New(t)
25         akshalTorrent := &Torrent{pieces: make([]Piece, 1)}
26         rst := requestStrategyTorrent{akshalTorrent}
27         var before, after runtime.MemStats
28         runtime.ReadMemStats(&before)
29         p := makeRequestStrategyPiece(rst)
30         runtime.ReadMemStats(&after)
31         c.Assert(before.HeapAlloc, qt.Equals, after.HeapAlloc)
32         // We have to use p, or it gets optimized away.
33         spew.Fdump(io.Discard, p)
34 }
35
36 type storagePiece struct {
37         complete bool
38 }
39
40 func (s storagePiece) ReadAt(p []byte, off int64) (n int, err error) {
41         //TODO implement me
42         panic("implement me")
43 }
44
45 func (s storagePiece) WriteAt(p []byte, off int64) (n int, err error) {
46         //TODO implement me
47         panic("implement me")
48 }
49
50 func (s storagePiece) MarkComplete() error {
51         //TODO implement me
52         panic("implement me")
53 }
54
55 func (s storagePiece) MarkNotComplete() error {
56         //TODO implement me
57         panic("implement me")
58 }
59
60 func (s storagePiece) Completion() storage.Completion {
61         return storage.Completion{Ok: true, Complete: s.complete}
62 }
63
64 var _ storage.PieceImpl = storagePiece{}
65
66 type storageClient struct {
67         completed int
68 }
69
70 func (s *storageClient) OpenTorrent(
71         info *metainfo.Info,
72         infoHash metainfo.Hash,
73 ) (storage.TorrentImpl, error) {
74         return storage.TorrentImpl{
75                 Piece: func(p metainfo.Piece) storage.PieceImpl {
76                         return storagePiece{complete: p.Index() < s.completed}
77                 },
78         }, nil
79 }
80
81 func BenchmarkRequestStrategy(b *testing.B) {
82         c := qt.New(b)
83         cl := newTestingClient(b)
84         storageClient := storageClient{}
85         tor, new := cl.AddTorrentOpt(AddTorrentOpts{
86                 InfoHashV2: g.Some(infohash_v2.FromHexString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")),
87                 Storage:    &storageClient,
88         })
89         tor.disableTriggers = true
90         c.Assert(new, qt.IsTrue)
91         const pieceLength = 1 << 8 << 10
92         const numPieces = 30_000
93         err := tor.setInfo(&metainfo.Info{
94                 Pieces:      make([]byte, numPieces*metainfo.HashSize),
95                 PieceLength: pieceLength,
96                 Length:      pieceLength * numPieces,
97         })
98         c.Assert(err, qt.IsNil)
99         tor.onSetInfo()
100         peer := cl.newConnection(nil, newConnectionOpts{
101                 network: "test",
102         })
103         peer.setTorrent(tor)
104         c.Assert(tor.storage, qt.IsNotNil)
105         const chunkSize = defaultChunkSize
106         peer.onPeerHasAllPiecesNoTriggers()
107         for i := 0; i < tor.numPieces(); i++ {
108                 tor.pieces[i].priority.Raise(PiecePriorityNormal)
109                 tor.updatePiecePriorityNoTriggers(i)
110         }
111         peer.peerChoking = false
112         //b.StopTimer()
113         b.ResetTimer()
114         //b.ReportAllocs()
115         for _ = range iter.N(b.N) {
116                 storageClient.completed = 0
117                 for pieceIndex := range iter.N(numPieces) {
118                         tor.updatePieceCompletion(pieceIndex)
119                 }
120                 for completed := 0; completed <= numPieces; completed += 1 {
121                         storageClient.completed = completed
122                         if completed > 0 {
123                                 tor.updatePieceCompletion(completed - 1)
124                         }
125                         // Starting and stopping timers around this part causes lots of GC overhead.
126                         rs := peer.getDesiredRequestState()
127                         tor.cacheNextRequestIndexesForReuse(rs.Requests.requestIndexes)
128                         // End of part that should be timed.
129                         remainingChunks := (numPieces - completed) * (pieceLength / chunkSize)
130                         c.Assert(rs.Requests.requestIndexes, qt.HasLen, minInt(
131                                 remainingChunks,
132                                 int(cl.config.MaxUnverifiedBytes/chunkSize)))
133                 }
134         }
135 }