12 _ "github.com/anacrolix/envpprof"
13 "github.com/anacrolix/missinggo/iter"
14 "github.com/anacrolix/torrent/metainfo"
15 "github.com/anacrolix/torrent/storage"
16 qt "github.com/frankban/quicktest"
17 "github.com/stretchr/testify/assert"
18 "github.com/stretchr/testify/require"
21 func newConnsAndProv(t *testing.T, opts NewPoolOpts) (ConnPool, *provider) {
22 opts.Path = filepath.Join(t.TempDir(), "sqlite3.db")
23 conns, provOpts, err := NewPool(opts)
24 require.NoError(t, err)
25 // sqlitex.Pool.Close doesn't like being called more than once. Let it slide for now.
26 //t.Cleanup(func() { conns.Close() })
27 prov, err := NewProvider(conns, provOpts)
28 require.NoError(t, err)
29 t.Cleanup(func() { prov.Close() })
33 func TestTextBlobSize(t *testing.T) {
34 _, prov := newConnsAndProv(t, NewPoolOpts{})
35 a, _ := prov.NewInstance("a")
36 err := a.Put(bytes.NewBufferString("\x00hello"))
37 qt.Assert(t, err, qt.IsNil)
39 qt.Assert(t, err, qt.IsNil)
40 assert.EqualValues(t, 6, fi.Size())
43 func TestSimultaneousIncrementalBlob(t *testing.T) {
44 _, p := newConnsAndProv(t, NewPoolOpts{
46 ConcurrentBlobReads: true,
48 a, err := p.NewInstance("a")
49 require.NoError(t, err)
50 const contents = "hello, world"
51 require.NoError(t, a.Put(bytes.NewReader([]byte("hello, world"))))
53 require.NoError(t, err)
55 require.NoError(t, err)
59 doRead := func(b *[]byte, e *error, rc io.ReadCloser, n int) {
62 *b, *e = ioutil.ReadAll(rc)
63 require.NoError(t, *e, n)
64 assert.EqualValues(t, contents, *b)
67 go doRead(&b2, &e2, rc2, 2)
68 go doRead(&b1, &e1, rc1, 1)
72 func BenchmarkMarkComplete(b *testing.B) {
73 const pieceSize = 8 << 20
75 data := make([]byte, pieceSize)
77 dbPath := filepath.Join(b.TempDir(), "storage.db")
78 b.Logf("storage db path: %q", dbPath)
79 ci, err := NewPiecesStorage(NewPoolOpts{
82 ConcurrentBlobReads: true,
84 c.Assert(err, qt.IsNil)
86 ti, err := ci.OpenTorrent(nil, metainfo.Hash{})
87 c.Assert(err, qt.IsNil)
89 pi := ti.Piece(metainfo.Piece{
91 Pieces: make([]byte, metainfo.HashSize),
92 PieceLength: pieceSize,
96 // Do it untimed the first time to prime the cache.
97 storage.BenchmarkPieceMarkComplete(b, pi, data)
99 for range iter.N(b.N) {
100 storage.BenchmarkPieceMarkComplete(b, pi, data)