14 _ "github.com/anacrolix/envpprof"
15 "github.com/anacrolix/torrent/storage"
16 test_storage "github.com/anacrolix/torrent/storage/test"
17 "github.com/dustin/go-humanize"
18 qt "github.com/frankban/quicktest"
19 "github.com/stretchr/testify/assert"
20 "github.com/stretchr/testify/require"
23 func newConnsAndProv(t *testing.T, opts NewPoolOpts) (ConnPool, *provider) {
24 opts.Path = filepath.Join(t.TempDir(), "sqlite3.db")
25 pool, err := NewPool(opts)
26 qt.Assert(t, err, qt.IsNil)
27 // sqlitex.Pool.Close doesn't like being called more than once. Let it slide for now.
28 //t.Cleanup(func() { pool.Close() })
29 qt.Assert(t, initPoolDatabase(pool, InitDbOpts{}), qt.IsNil)
30 prov, err := NewProvider(pool, ProviderOpts{BatchWrites: pool.NumConns() > 1})
31 require.NoError(t, err)
32 t.Cleanup(func() { prov.Close() })
36 func TestTextBlobSize(t *testing.T) {
37 _, prov := newConnsAndProv(t, NewPoolOpts{})
38 a, _ := prov.NewInstance("a")
39 err := a.Put(bytes.NewBufferString("\x00hello"))
40 qt.Assert(t, err, qt.IsNil)
42 qt.Assert(t, err, qt.IsNil)
43 assert.EqualValues(t, 6, fi.Size())
46 func TestSimultaneousIncrementalBlob(t *testing.T) {
47 _, p := newConnsAndProv(t, NewPoolOpts{
50 a, err := p.NewInstance("a")
51 require.NoError(t, err)
52 const contents = "hello, world"
53 require.NoError(t, a.Put(bytes.NewReader([]byte("hello, world"))))
55 require.NoError(t, err)
57 require.NoError(t, err)
61 doRead := func(b *[]byte, e *error, rc io.ReadCloser, n int) {
64 *b, *e = ioutil.ReadAll(rc)
65 require.NoError(t, *e, n)
66 assert.EqualValues(t, contents, *b)
69 go doRead(&b2, &e2, rc2, 2)
70 go doRead(&b1, &e1, rc1, 1)
74 func BenchmarkMarkComplete(b *testing.B) {
75 const pieceSize = test_storage.DefaultPieceSize
76 const noTriggers = false
77 var capacity int64 = test_storage.DefaultNumPieces * pieceSize / 2
79 // Since we won't push out old pieces, we have to mark them incomplete manually.
82 runBench := func(b *testing.B, ci storage.ClientImpl) {
83 test_storage.BenchmarkPieceMarkComplete(b, ci, pieceSize, test_storage.DefaultNumPieces, capacity)
86 for _, memory := range []bool{false, true} {
87 b.Run(fmt.Sprintf("Memory=%v", memory), func(b *testing.B) {
88 b.Run("Direct", func(b *testing.B) {
89 var opts NewDirectStorageOpts
91 opts.Path = filepath.Join(b.TempDir(), "storage.db")
92 opts.Capacity = capacity
93 opts.CacheBlobs = true
95 opts.BlobFlushInterval = time.Second
96 opts.NoTriggers = noTriggers
97 directBench := func(b *testing.B) {
98 ci, err := NewDirectStorage(opts)
99 if errors.Is(err, UnexpectedJournalMode) {
100 b.Skipf("setting journal mode %q: %v", opts.SetJournalMode, err)
102 c.Assert(err, qt.IsNil)
106 for _, journalMode := range []string{"", "wal", "off", "truncate", "delete", "persist", "memory"} {
107 opts.SetJournalMode = journalMode
108 b.Run("JournalMode="+journalMode, func(b *testing.B) {
109 for _, mmapSize := range []int64{-1, 0, 1 << 23, 1 << 24, 1 << 25} {
110 if memory && mmapSize >= 0 {
113 b.Run(fmt.Sprintf("MmapSize=%s", func() string {
117 return humanize.IBytes(uint64(mmapSize))
119 }()), func(b *testing.B) {
120 opts.MmapSize = mmapSize
121 opts.MmapSizeOk = true
128 b.Run("ResourcePieces", func(b *testing.B) {
129 for _, batchWrites := range []bool{false, true} {
130 b.Run(fmt.Sprintf("BatchWrites=%v", batchWrites), func(b *testing.B) {
131 var opts NewPiecesStorageOpts
132 opts.Path = filepath.Join(b.TempDir(), "storage.db")
133 //b.Logf("storage db path: %q", dbPath)
134 opts.Capacity = capacity
136 opts.ProvOpts = func(opts *ProviderOpts) {
137 opts.BatchWrites = batchWrites
139 ci, err := NewPiecesStorage(opts)
140 c.Assert(err, qt.IsNil)