]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Add MarkComplete benchmark for sqlite
authorMatt Joiner <anacrolix@gmail.com>
Mon, 18 Jan 2021 03:48:24 +0000 (14:48 +1100)
committerMatt Joiner <anacrolix@gmail.com>
Mon, 25 Jan 2021 04:54:37 +0000 (15:54 +1100)
metainfo/piece.go
storage/bench-resource-pieces.go [new file with mode: 0644]
storage/sqlite/sqlite-storage_test.go

index 8f50fa456c3911424779cba31cf09a0a595429b9..14cae2a3d37c50e714e603aec6f8daab8abfcb64 100644 (file)
@@ -5,7 +5,7 @@ import (
 )
 
 type Piece struct {
-       Info *Info
+       Info *Info // Can we embed the fields here instead, or is it something to do with saving memory?
        i    pieceIndex
 }
 
diff --git a/storage/bench-resource-pieces.go b/storage/bench-resource-pieces.go
new file mode 100644 (file)
index 0000000..b72f25c
--- /dev/null
@@ -0,0 +1,38 @@
+package storage
+
+import (
+       "bytes"
+       "io"
+       "io/ioutil"
+       "sync"
+       "testing"
+
+       qt "github.com/frankban/quicktest"
+)
+
+func BenchmarkPieceMarkComplete(tb testing.TB, pi PieceImpl, data []byte) {
+       c := qt.New(tb)
+       var wg sync.WaitGroup
+       for off := int64(0); off < int64(len(data)); off += chunkSize {
+               wg.Add(1)
+               go func(off int64) {
+                       defer wg.Done()
+                       n, err := pi.WriteAt(data[off:off+chunkSize], off)
+                       if err != nil {
+                               panic(err)
+                       }
+                       if n != chunkSize {
+                               panic(n)
+                       }
+               }(off)
+       }
+       wg.Wait()
+       // This might not apply if users of this benchmark don't cache with the expected capacity.
+       c.Assert(pi.Completion(), qt.Equals, Completion{Complete: false, Ok: true})
+       c.Assert(pi.MarkComplete(), qt.IsNil)
+       c.Assert(pi.Completion(), qt.Equals, Completion{true, true})
+       readData, err := ioutil.ReadAll(io.NewSectionReader(pi, 0, int64(len(data))))
+       c.Assert(err, qt.IsNil)
+       c.Assert(len(readData), qt.Equals, len(data))
+       c.Assert(bytes.Equal(readData, data), qt.IsTrue)
+}
index bb71d7febdf0a9161993e065087490f462180a7c..836cc385efa4d7b2fa8ecd9c0d11dc6b576ade80 100644 (file)
@@ -4,11 +4,16 @@ import (
        "bytes"
        "io"
        "io/ioutil"
+       "math/rand"
        "path/filepath"
        "sync"
        "testing"
 
        _ "github.com/anacrolix/envpprof"
+       "github.com/anacrolix/missinggo/iter"
+       "github.com/anacrolix/torrent/metainfo"
+       "github.com/anacrolix/torrent/storage"
+       qt "github.com/frankban/quicktest"
        "github.com/stretchr/testify/assert"
        "github.com/stretchr/testify/require"
 )
@@ -28,8 +33,10 @@ func newConnsAndProv(t *testing.T, opts NewPoolOpts) (ConnPool, *provider) {
 func TestTextBlobSize(t *testing.T) {
        _, prov := newConnsAndProv(t, NewPoolOpts{})
        a, _ := prov.NewInstance("a")
-       a.Put(bytes.NewBufferString("\x00hello"))
-       fi, _ := a.Stat()
+       err := a.Put(bytes.NewBufferString("\x00hello"))
+       qt.Assert(t, err, qt.IsNil)
+       fi, err := a.Stat()
+       qt.Assert(t, err, qt.IsNil)
        assert.EqualValues(t, 6, fi.Size())
 }
 
@@ -61,3 +68,29 @@ func TestSimultaneousIncrementalBlob(t *testing.T) {
        go doRead(&b1, &e1, rc1, 1)
        wg.Wait()
 }
+
+func BenchmarkMarkComplete(b *testing.B) {
+       const pieceSize = 8 << 20
+       c := qt.New(b)
+       data := make([]byte, pieceSize)
+       rand.Read(data)
+       dbPath := filepath.Join(b.TempDir(), "storage.db")
+       b.Logf("storage db path: %q", dbPath)
+       ci, err := NewPiecesStorage(NewPoolOpts{Path: dbPath, Capacity: pieceSize})
+       c.Assert(err, qt.IsNil)
+       defer ci.Close()
+       ti, err := ci.OpenTorrent(nil, metainfo.Hash{})
+       c.Assert(err, qt.IsNil)
+       defer ti.Close()
+       pi := ti.Piece(metainfo.Piece{
+               Info: &metainfo.Info{
+                       Pieces:      make([]byte, metainfo.HashSize),
+                       PieceLength: pieceSize,
+                       Length:      pieceSize,
+               },
+       })
+       b.ResetTimer()
+       for range iter.N(b.N) {
+               storage.BenchmarkPieceMarkComplete(b, pi, data)
+       }
+}