From 434dfdf6e0bddd6c904bb08738d2cf0a5a85c2ae Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Mon, 18 Jan 2021 14:48:24 +1100 Subject: [PATCH] Add MarkComplete benchmark for sqlite --- metainfo/piece.go | 2 +- storage/bench-resource-pieces.go | 38 +++++++++++++++++++++++++++ storage/sqlite/sqlite-storage_test.go | 37 ++++++++++++++++++++++++-- 3 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 storage/bench-resource-pieces.go diff --git a/metainfo/piece.go b/metainfo/piece.go index 8f50fa45..14cae2a3 100644 --- a/metainfo/piece.go +++ b/metainfo/piece.go @@ -5,7 +5,7 @@ import ( ) type Piece struct { - Info *Info + Info *Info // Can we embed the fields here instead, or is it something to do with saving memory? i pieceIndex } diff --git a/storage/bench-resource-pieces.go b/storage/bench-resource-pieces.go new file mode 100644 index 00000000..b72f25c0 --- /dev/null +++ b/storage/bench-resource-pieces.go @@ -0,0 +1,38 @@ +package storage + +import ( + "bytes" + "io" + "io/ioutil" + "sync" + "testing" + + qt "github.com/frankban/quicktest" +) + +func BenchmarkPieceMarkComplete(tb testing.TB, pi PieceImpl, data []byte) { + c := qt.New(tb) + var wg sync.WaitGroup + for off := int64(0); off < int64(len(data)); off += chunkSize { + wg.Add(1) + go func(off int64) { + defer wg.Done() + n, err := pi.WriteAt(data[off:off+chunkSize], off) + if err != nil { + panic(err) + } + if n != chunkSize { + panic(n) + } + }(off) + } + wg.Wait() + // This might not apply if users of this benchmark don't cache with the expected capacity. + c.Assert(pi.Completion(), qt.Equals, Completion{Complete: false, Ok: true}) + c.Assert(pi.MarkComplete(), qt.IsNil) + c.Assert(pi.Completion(), qt.Equals, Completion{true, true}) + readData, err := ioutil.ReadAll(io.NewSectionReader(pi, 0, int64(len(data)))) + c.Assert(err, qt.IsNil) + c.Assert(len(readData), qt.Equals, len(data)) + c.Assert(bytes.Equal(readData, data), qt.IsTrue) +} diff --git a/storage/sqlite/sqlite-storage_test.go b/storage/sqlite/sqlite-storage_test.go index bb71d7fe..836cc385 100644 --- a/storage/sqlite/sqlite-storage_test.go +++ b/storage/sqlite/sqlite-storage_test.go @@ -4,11 +4,16 @@ import ( "bytes" "io" "io/ioutil" + "math/rand" "path/filepath" "sync" "testing" _ "github.com/anacrolix/envpprof" + "github.com/anacrolix/missinggo/iter" + "github.com/anacrolix/torrent/metainfo" + "github.com/anacrolix/torrent/storage" + qt "github.com/frankban/quicktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -28,8 +33,10 @@ func newConnsAndProv(t *testing.T, opts NewPoolOpts) (ConnPool, *provider) { func TestTextBlobSize(t *testing.T) { _, prov := newConnsAndProv(t, NewPoolOpts{}) a, _ := prov.NewInstance("a") - a.Put(bytes.NewBufferString("\x00hello")) - fi, _ := a.Stat() + err := a.Put(bytes.NewBufferString("\x00hello")) + qt.Assert(t, err, qt.IsNil) + fi, err := a.Stat() + qt.Assert(t, err, qt.IsNil) assert.EqualValues(t, 6, fi.Size()) } @@ -61,3 +68,29 @@ func TestSimultaneousIncrementalBlob(t *testing.T) { go doRead(&b1, &e1, rc1, 1) wg.Wait() } + +func BenchmarkMarkComplete(b *testing.B) { + const pieceSize = 8 << 20 + c := qt.New(b) + data := make([]byte, pieceSize) + rand.Read(data) + dbPath := filepath.Join(b.TempDir(), "storage.db") + b.Logf("storage db path: %q", dbPath) + ci, err := NewPiecesStorage(NewPoolOpts{Path: dbPath, Capacity: pieceSize}) + c.Assert(err, qt.IsNil) + defer ci.Close() + ti, err := ci.OpenTorrent(nil, metainfo.Hash{}) + c.Assert(err, qt.IsNil) + defer ti.Close() + pi := ti.Piece(metainfo.Piece{ + Info: &metainfo.Info{ + Pieces: make([]byte, metainfo.HashSize), + PieceLength: pieceSize, + Length: pieceSize, + }, + }) + b.ResetTimer() + for range iter.N(b.N) { + storage.BenchmarkPieceMarkComplete(b, pi, data) + } +} -- 2.48.1