8 g "github.com/anacrolix/generics"
9 "github.com/anacrolix/missinggo/v2/iter"
10 "github.com/davecgh/go-spew/spew"
11 qt "github.com/frankban/quicktest"
13 "github.com/anacrolix/torrent/metainfo"
14 request_strategy "github.com/anacrolix/torrent/request-strategy"
15 "github.com/anacrolix/torrent/storage"
16 infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
19 func makeRequestStrategyPiece(t request_strategy.Torrent) request_strategy.Piece {
23 func TestRequestStrategyPieceDoesntAlloc(t *testing.T) {
25 akshalTorrent := &Torrent{pieces: make([]Piece, 1)}
26 rst := requestStrategyTorrent{akshalTorrent}
27 var before, after runtime.MemStats
28 runtime.ReadMemStats(&before)
29 p := makeRequestStrategyPiece(rst)
30 runtime.ReadMemStats(&after)
31 c.Assert(before.HeapAlloc, qt.Equals, after.HeapAlloc)
32 // We have to use p, or it gets optimized away.
33 spew.Fdump(io.Discard, p)
36 type storagePiece struct {
40 func (s storagePiece) ReadAt(p []byte, off int64) (n int, err error) {
45 func (s storagePiece) WriteAt(p []byte, off int64) (n int, err error) {
50 func (s storagePiece) MarkComplete() error {
55 func (s storagePiece) MarkNotComplete() error {
60 func (s storagePiece) Completion() storage.Completion {
61 return storage.Completion{Ok: true, Complete: s.complete}
64 var _ storage.PieceImpl = storagePiece{}
66 type storageClient struct {
70 func (s *storageClient) OpenTorrent(
72 infoHash metainfo.Hash,
73 ) (storage.TorrentImpl, error) {
74 return storage.TorrentImpl{
75 Piece: func(p metainfo.Piece) storage.PieceImpl {
76 return storagePiece{complete: p.Index() < s.completed}
81 func BenchmarkRequestStrategy(b *testing.B) {
83 cl := newTestingClient(b)
84 storageClient := storageClient{}
85 tor, new := cl.AddTorrentOpt(AddTorrentOpts{
86 InfoHashV2: g.Some(infohash_v2.FromHexString("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")),
87 Storage: &storageClient,
89 tor.disableTriggers = true
90 c.Assert(new, qt.IsTrue)
91 const pieceLength = 1 << 8 << 10
92 const numPieces = 30_000
93 err := tor.setInfo(&metainfo.Info{
94 Pieces: make([]byte, numPieces*metainfo.HashSize),
95 PieceLength: pieceLength,
96 Length: pieceLength * numPieces,
98 c.Assert(err, qt.IsNil)
100 peer := cl.newConnection(nil, newConnectionOpts{
104 c.Assert(tor.storage, qt.IsNotNil)
105 const chunkSize = defaultChunkSize
106 peer.onPeerHasAllPiecesNoTriggers()
107 for i := 0; i < tor.numPieces(); i++ {
108 tor.pieces[i].priority.Raise(PiecePriorityNormal)
109 tor.updatePiecePriorityNoTriggers(i)
111 peer.peerChoking = false
115 for _ = range iter.N(b.N) {
116 storageClient.completed = 0
117 for pieceIndex := range iter.N(numPieces) {
118 tor.updatePieceCompletion(pieceIndex)
120 for completed := 0; completed <= numPieces; completed += 1 {
121 storageClient.completed = completed
123 tor.updatePieceCompletion(completed - 1)
125 // Starting and stopping timers around this part causes lots of GC overhead.
126 rs := peer.getDesiredRequestState()
127 tor.cacheNextRequestIndexesForReuse(rs.Requests.requestIndexes)
128 // End of part that should be timed.
129 remainingChunks := (numPieces - completed) * (pieceLength / chunkSize)
130 c.Assert(rs.Requests.requestIndexes, qt.HasLen, minInt(
132 int(cl.config.MaxUnverifiedBytes/chunkSize)))