]> Sergey Matveev's repositories - btrtrc.git/blob - request-strategy-impls_test.go
ad15166259fe962e8063f397d1b5275788a0e6ca
[btrtrc.git] / request-strategy-impls_test.go
1 package torrent
2
3 import (
4         "io"
5         "runtime"
6         "testing"
7
8         "github.com/anacrolix/missinggo/v2/iter"
9         "github.com/davecgh/go-spew/spew"
10         qt "github.com/frankban/quicktest"
11
12         "github.com/anacrolix/torrent/metainfo"
13         request_strategy "github.com/anacrolix/torrent/request-strategy"
14         "github.com/anacrolix/torrent/storage"
15 )
16
17 func makeRequestStrategyPiece(t request_strategy.Torrent) request_strategy.Piece {
18         return t.Piece(0)
19 }
20
21 func TestRequestStrategyPieceDoesntAlloc(t *testing.T) {
22         c := qt.New(t)
23         akshalTorrent := &Torrent{pieces: make([]Piece, 1)}
24         rst := requestStrategyTorrent{akshalTorrent}
25         var before, after runtime.MemStats
26         runtime.ReadMemStats(&before)
27         p := makeRequestStrategyPiece(rst)
28         runtime.ReadMemStats(&after)
29         c.Assert(before.HeapAlloc, qt.Equals, after.HeapAlloc)
30         // We have to use p, or it gets optimized away.
31         spew.Fdump(io.Discard, p)
32 }
33
34 type storagePiece struct {
35         complete bool
36 }
37
38 func (s storagePiece) ReadAt(p []byte, off int64) (n int, err error) {
39         //TODO implement me
40         panic("implement me")
41 }
42
43 func (s storagePiece) WriteAt(p []byte, off int64) (n int, err error) {
44         //TODO implement me
45         panic("implement me")
46 }
47
48 func (s storagePiece) MarkComplete() error {
49         //TODO implement me
50         panic("implement me")
51 }
52
53 func (s storagePiece) MarkNotComplete() error {
54         //TODO implement me
55         panic("implement me")
56 }
57
58 func (s storagePiece) Completion() storage.Completion {
59         return storage.Completion{Ok: true, Complete: s.complete}
60 }
61
62 var _ storage.PieceImpl = storagePiece{}
63
64 type storageClient struct {
65         completed int
66 }
67
68 func (s *storageClient) OpenTorrent(
69         info *metainfo.Info,
70         infoHash metainfo.Hash,
71 ) (storage.TorrentImpl, error) {
72         return storage.TorrentImpl{
73                 Piece: func(p metainfo.Piece) storage.PieceImpl {
74                         return storagePiece{complete: p.Index() < s.completed}
75                 },
76         }, nil
77 }
78
79 func BenchmarkRequestStrategy(b *testing.B) {
80         c := qt.New(b)
81         cl := newTestingClient(b)
82         storageClient := storageClient{}
83         tor, new := cl.AddTorrentOpt(AddTorrentOpts{
84                 Storage: &storageClient,
85         })
86         tor.disableTriggers = true
87         c.Assert(new, qt.IsTrue)
88         const pieceLength = 1 << 8 << 10
89         const numPieces = 30_000
90         err := tor.setInfo(&metainfo.Info{
91                 Pieces:      make([]byte, numPieces*metainfo.HashSize),
92                 PieceLength: pieceLength,
93                 Length:      pieceLength * numPieces,
94         })
95         c.Assert(err, qt.IsNil)
96         tor.onSetInfo()
97         peer := cl.newConnection(nil, newConnectionOpts{
98                 network: "test",
99         })
100         peer.setTorrent(tor)
101         c.Assert(tor.storage, qt.IsNotNil)
102         const chunkSize = defaultChunkSize
103         peer.onPeerHasAllPiecesNoTriggers()
104         for i := 0; i < tor.numPieces(); i++ {
105                 tor.pieces[i].priority.Raise(PiecePriorityNormal)
106                 tor.updatePiecePriorityNoTriggers(i)
107         }
108         peer.peerChoking = false
109         //b.StopTimer()
110         b.ResetTimer()
111         //b.ReportAllocs()
112         for _ = range iter.N(b.N) {
113                 storageClient.completed = 0
114                 for pieceIndex := range iter.N(numPieces) {
115                         tor.updatePieceCompletion(pieceIndex)
116                 }
117                 for completed := 0; completed <= numPieces; completed += 1 {
118                         storageClient.completed = completed
119                         if completed > 0 {
120                                 tor.updatePieceCompletion(completed - 1)
121                         }
122                         // Starting and stopping timers around this part causes lots of GC overhead.
123                         rs := peer.getDesiredRequestState()
124                         tor.cacheNextRequestIndexesForReuse(rs.Requests.requestIndexes)
125                         // End of part that should be timed.
126                         remainingChunks := (numPieces - completed) * (pieceLength / chunkSize)
127                         c.Assert(rs.Requests.requestIndexes, qt.HasLen, minInt(
128                                 remainingChunks,
129                                 int(cl.config.MaxUnverifiedBytes/chunkSize)))
130                 }
131         }
132 }