8 "github.com/anacrolix/missinggo/v2/iter"
9 "github.com/davecgh/go-spew/spew"
10 qt "github.com/frankban/quicktest"
12 "github.com/anacrolix/torrent/metainfo"
13 request_strategy "github.com/anacrolix/torrent/request-strategy"
14 "github.com/anacrolix/torrent/storage"
17 func makeRequestStrategyPiece(t request_strategy.Torrent) request_strategy.Piece {
21 func TestRequestStrategyPieceDoesntAlloc(t *testing.T) {
23 akshalTorrent := &Torrent{pieces: make([]Piece, 1)}
24 rst := requestStrategyTorrent{akshalTorrent}
25 var before, after runtime.MemStats
26 runtime.ReadMemStats(&before)
27 p := makeRequestStrategyPiece(rst)
28 runtime.ReadMemStats(&after)
29 c.Assert(before.HeapAlloc, qt.Equals, after.HeapAlloc)
30 // We have to use p, or it gets optimized away.
31 spew.Fdump(io.Discard, p)
34 type storagePiece struct {
38 func (s storagePiece) ReadAt(p []byte, off int64) (n int, err error) {
43 func (s storagePiece) WriteAt(p []byte, off int64) (n int, err error) {
48 func (s storagePiece) MarkComplete() error {
53 func (s storagePiece) MarkNotComplete() error {
58 func (s storagePiece) Completion() storage.Completion {
59 return storage.Completion{Ok: true, Complete: s.complete}
62 var _ storage.PieceImpl = storagePiece{}
64 type storageClient struct {
68 func (s *storageClient) OpenTorrent(
70 infoHash metainfo.Hash,
71 ) (storage.TorrentImpl, error) {
72 return storage.TorrentImpl{
73 Piece: func(p metainfo.Piece) storage.PieceImpl {
74 return storagePiece{complete: p.Index() < s.completed}
79 func BenchmarkRequestStrategy(b *testing.B) {
81 cl := newTestingClient(b)
82 storageClient := storageClient{}
83 tor, new := cl.AddTorrentOpt(AddTorrentOpts{
84 Storage: &storageClient,
86 tor.disableTriggers = true
87 c.Assert(new, qt.IsTrue)
88 const pieceLength = 1 << 8 << 10
89 const numPieces = 30_000
90 err := tor.setInfo(&metainfo.Info{
91 Pieces: make([]byte, numPieces*metainfo.HashSize),
92 PieceLength: pieceLength,
93 Length: pieceLength * numPieces,
95 c.Assert(err, qt.IsNil)
97 peer := cl.newConnection(nil, newConnectionOpts{
101 c.Assert(tor.storage, qt.IsNotNil)
102 const chunkSize = defaultChunkSize
103 peer.onPeerHasAllPiecesNoTriggers()
104 for i := 0; i < tor.numPieces(); i++ {
105 tor.pieces[i].priority.Raise(PiecePriorityNormal)
106 tor.updatePiecePriorityNoTriggers(i)
108 peer.peerChoking = false
112 for _ = range iter.N(b.N) {
113 storageClient.completed = 0
114 for pieceIndex := range iter.N(numPieces) {
115 tor.updatePieceCompletion(pieceIndex)
117 for completed := 0; completed <= numPieces; completed += 1 {
118 storageClient.completed = completed
120 tor.updatePieceCompletion(completed - 1)
122 // Starting and stopping timers around this part causes lots of GC overhead.
123 rs := peer.getDesiredRequestState()
124 tor.cacheNextRequestIndexesForReuse(rs.Requests.requestIndexes)
125 // End of part that should be timed.
126 remainingChunks := (numPieces - completed) * (pieceLength / chunkSize)
127 c.Assert(rs.Requests.requestIndexes, qt.HasLen, minInt(
129 int(cl.config.MaxUnverifiedBytes/chunkSize)))