]> Sergey Matveev's repositories - btrtrc.git/blob - storage/test/bench-piece-mark-complete.go
Update to multiple-blobs-per-value squirrel
[btrtrc.git] / storage / test / bench-piece-mark-complete.go
1 package test_storage
2
3 import (
4         "bytes"
5         "math/rand"
6         "sync"
7         "testing"
8
9         qt "github.com/frankban/quicktest"
10
11         "github.com/anacrolix/torrent/metainfo"
12         "github.com/anacrolix/torrent/storage"
13 )
14
15 const (
16         ChunkSize        = 1 << 14
17         DefaultPieceSize = 2 << 20
18         DefaultNumPieces = 16
19 )
20
21 // This writes chunks to the storage concurrently, and waits for them all to complete. This matches
22 // the behaviour from the peer connection read loop.
23 func BenchmarkPieceMarkComplete(
24         b *testing.B, ci storage.ClientImpl,
25         pieceSize int64, numPieces int,
26         // This drives any special handling around capacity that may be configured into the storage
27         // implementation.
28         capacity int64,
29 ) {
30         c := qt.New(b)
31         info := &metainfo.Info{
32                 Pieces:      make([]byte, numPieces*metainfo.HashSize),
33                 PieceLength: pieceSize,
34                 Length:      pieceSize * int64(numPieces),
35                 Name:        "TorrentName",
36         }
37         ti, err := ci.OpenTorrent(info, metainfo.Hash{})
38         c.Assert(err, qt.IsNil)
39         tw := storage.Torrent{ti}
40         defer tw.Close()
41         rand.Read(info.Pieces)
42         data := make([]byte, pieceSize)
43         readData := make([]byte, pieceSize)
44         b.SetBytes(int64(numPieces) * pieceSize)
45         oneIter := func() {
46                 for pieceIndex := 0; pieceIndex < numPieces; pieceIndex += 1 {
47                         pi := tw.Piece(info.Piece(pieceIndex))
48                         rand.Read(data)
49                         b.StartTimer()
50                         var wg sync.WaitGroup
51                         for off := int64(0); off < int64(len(data)); off += ChunkSize {
52                                 wg.Add(1)
53                                 go func(off int64) {
54                                         defer wg.Done()
55                                         n, err := pi.WriteAt(data[off:off+ChunkSize], off)
56                                         if err != nil {
57                                                 panic(err)
58                                         }
59                                         if n != ChunkSize {
60                                                 panic(n)
61                                         }
62                                 }(off)
63                         }
64                         wg.Wait()
65                         if capacity == 0 {
66                                 pi.MarkNotComplete()
67                         }
68                         // This might not apply if users of this benchmark don't cache with the expected capacity.
69                         c.Assert(pi.Completion(), qt.Equals, storage.Completion{Complete: false, Ok: true})
70                         c.Assert(pi.MarkComplete(), qt.IsNil)
71                         c.Assert(pi.Completion(), qt.Equals, storage.Completion{Complete: true, Ok: true})
72                         n, err := pi.WriteTo(bytes.NewBuffer(readData[:0]))
73                         b.StopTimer()
74                         c.Assert(err, qt.IsNil)
75                         c.Assert(n, qt.Equals, int64(len(data)))
76                         c.Assert(bytes.Equal(readData[:n], data), qt.IsTrue)
77                 }
78         }
79         // Fill the cache
80         if capacity > 0 {
81                 iterN := int((capacity + info.TotalLength() - 1) / info.TotalLength())
82                 for i := 0; i < iterN; i += 1 {
83                         oneIter()
84                 }
85         }
86         b.StopTimer()
87         b.ResetTimer()
88         for i := 0; i < b.N; i += 1 {
89                 oneIter()
90         }
91 }