]> Sergey Matveev's repositories - btrtrc.git/blob - storage/sqlite/sqlite-storage_test.go
Improve configurability and add PutSized to sqlite storage
[btrtrc.git] / storage / sqlite / sqlite-storage_test.go
1 package sqliteStorage
2
3 import (
4         "bytes"
5         "io"
6         "io/ioutil"
7         "math/rand"
8         "path/filepath"
9         "sync"
10         "testing"
11
12         _ "github.com/anacrolix/envpprof"
13         "github.com/anacrolix/missinggo/iter"
14         "github.com/anacrolix/torrent/metainfo"
15         "github.com/anacrolix/torrent/storage"
16         qt "github.com/frankban/quicktest"
17         "github.com/stretchr/testify/assert"
18         "github.com/stretchr/testify/require"
19 )
20
21 func newConnsAndProv(t *testing.T, opts NewPoolOpts) (ConnPool, *provider) {
22         opts.Path = filepath.Join(t.TempDir(), "sqlite3.db")
23         conns, provOpts, err := NewPool(opts)
24         require.NoError(t, err)
25         // sqlitex.Pool.Close doesn't like being called more than once. Let it slide for now.
26         //t.Cleanup(func() { conns.Close() })
27         prov, err := NewProvider(conns, provOpts)
28         require.NoError(t, err)
29         t.Cleanup(func() { prov.Close() })
30         return conns, prov
31 }
32
33 func TestTextBlobSize(t *testing.T) {
34         _, prov := newConnsAndProv(t, NewPoolOpts{})
35         a, _ := prov.NewInstance("a")
36         err := a.Put(bytes.NewBufferString("\x00hello"))
37         qt.Assert(t, err, qt.IsNil)
38         fi, err := a.Stat()
39         qt.Assert(t, err, qt.IsNil)
40         assert.EqualValues(t, 6, fi.Size())
41 }
42
43 func TestSimultaneousIncrementalBlob(t *testing.T) {
44         _, p := newConnsAndProv(t, NewPoolOpts{
45                 NumConns:            3,
46                 ConcurrentBlobReads: true,
47         })
48         a, err := p.NewInstance("a")
49         require.NoError(t, err)
50         const contents = "hello, world"
51         require.NoError(t, a.Put(bytes.NewReader([]byte("hello, world"))))
52         rc1, err := a.Get()
53         require.NoError(t, err)
54         rc2, err := a.Get()
55         require.NoError(t, err)
56         var b1, b2 []byte
57         var e1, e2 error
58         var wg sync.WaitGroup
59         doRead := func(b *[]byte, e *error, rc io.ReadCloser, n int) {
60                 defer wg.Done()
61                 defer rc.Close()
62                 *b, *e = ioutil.ReadAll(rc)
63                 require.NoError(t, *e, n)
64                 assert.EqualValues(t, contents, *b)
65         }
66         wg.Add(2)
67         go doRead(&b2, &e2, rc2, 2)
68         go doRead(&b1, &e1, rc1, 1)
69         wg.Wait()
70 }
71
72 func BenchmarkMarkComplete(b *testing.B) {
73         const pieceSize = 8 << 20
74         c := qt.New(b)
75         data := make([]byte, pieceSize)
76         rand.Read(data)
77         dbPath := filepath.Join(b.TempDir(), "storage.db")
78         b.Logf("storage db path: %q", dbPath)
79         ci, err := NewPiecesStorage(NewPoolOpts{
80                 Path:                dbPath,
81                 Capacity:            pieceSize,
82                 ConcurrentBlobReads: true,
83         })
84         c.Assert(err, qt.IsNil)
85         defer ci.Close()
86         ti, err := ci.OpenTorrent(nil, metainfo.Hash{})
87         c.Assert(err, qt.IsNil)
88         defer ti.Close()
89         pi := ti.Piece(metainfo.Piece{
90                 Info: &metainfo.Info{
91                         Pieces:      make([]byte, metainfo.HashSize),
92                         PieceLength: pieceSize,
93                         Length:      pieceSize,
94                 },
95         })
96         // Do it untimed the first time to prime the cache.
97         storage.BenchmarkPieceMarkComplete(b, pi, data)
98         b.ResetTimer()
99         for range iter.N(b.N) {
100                 storage.BenchmarkPieceMarkComplete(b, pi, data)
101         }
102 }