type ResourcePiecesOpts struct {
LeaveIncompleteChunks bool
- AllowSizedPuts bool
+ NoSizedPuts bool
}
func NewResourcePieces(p PieceProvider) ClientImpl {
}()
completedInstance := s.completed()
err := func() error {
- if sp, ok := completedInstance.(SizedPutter); ok && s.opts.AllowSizedPuts {
+ if sp, ok := completedInstance.(SizedPutter); ok && !s.opts.NoSizedPuts {
return sp.PutSized(r, s.mp.Length())
} else {
return completedInstance.Put(r)
Memory bool
NumConns int
// Forces WAL, disables shared caching.
- ConcurrentBlobReads bool
- DontInitSchema bool
- PageSize int
+ NoConcurrentBlobReads bool
+ DontInitSchema bool
+ PageSize int
// If non-zero, overrides the existing setting.
Capacity int64
}
type ProviderOpts struct {
NumConns int
// Concurrent blob reads require WAL.
- ConcurrentBlobRead bool
- BatchWrites bool
+ NoConcurrentBlobReads bool
+ BatchWrites bool
}
// Remove any capacity limits.
opts.Path = ":memory:"
}
values := make(url.Values)
- if !opts.ConcurrentBlobReads {
+ if opts.NoConcurrentBlobReads || opts.Memory {
values.Add("cache", "shared")
}
path := fmt.Sprintf("file:%s?%s", opts.Path, values.Encode())
}
}
return conns, ProviderOpts{
- NumConns: opts.NumConns,
- ConcurrentBlobRead: opts.ConcurrentBlobReads,
- BatchWrites: true,
+ NumConns: opts.NumConns,
+ NoConcurrentBlobReads: opts.NoConcurrentBlobReads || opts.Memory,
+ BatchWrites: true,
}, nil
}
// Needs the ConnPool size so it can initialize all the connections with pragmas. Takes ownership of
// the ConnPool (since it has to initialize all the connections anyway).
func NewProvider(pool ConnPool, opts ProviderOpts) (_ *provider, err error) {
- _, err = initPoolConns(context.TODO(), pool, opts.NumConns, true)
+ _, err = initPoolConns(context.TODO(), pool, opts.NumConns, !opts.NoConcurrentBlobReads)
if err != nil {
+ err = fmt.Errorf("initing pool conns: %w", err)
return
}
prov := &provider{pool: pool, opts: opts}
"testing"
_ "github.com/anacrolix/envpprof"
+ "github.com/anacrolix/torrent/storage"
test_storage "github.com/anacrolix/torrent/storage/test"
qt "github.com/frankban/quicktest"
"github.com/stretchr/testify/assert"
func TestSimultaneousIncrementalBlob(t *testing.T) {
_, p := newConnsAndProv(t, NewPoolOpts{
- NumConns: 3,
- ConcurrentBlobReads: true,
+ NumConns: 3,
})
a, err := p.NewInstance("a")
require.NoError(t, err)
//b.Logf("storage db path: %q", dbPath)
ci, err := NewPiecesStorage(NewPiecesStorageOpts{
NewPoolOpts: NewPoolOpts{
- Path: dbPath,
- //Capacity: 4*pieceSize - 1,
- ConcurrentBlobReads: false,
- PageSize: 1 << 14,
- Memory: memory,
+ Path: dbPath,
+ Capacity: 4*pieceSize - 1,
+ NoConcurrentBlobReads: false,
+ PageSize: 1 << 14,
+ Memory: memory,
},
ProvOpts: func(opts *ProviderOpts) {
opts.BatchWrites = true
},
+ ResourcePiecesOpts: storage.ResourcePiecesOpts{
+ NoSizedPuts: false || memory,
+ },
})
c.Assert(err, qt.IsNil)
defer ci.Close()