14 "crawshaw.io/sqlite/sqlitex"
16 "github.com/anacrolix/torrent/metainfo"
17 "github.com/anacrolix/torrent/storage"
20 type NewDirectStorageOpts struct {
26 BlobFlushInterval time.Duration
29 // A convenience function that creates a connection pool, resource provider, and a pieces storage
30 // ClientImpl and returns them all with a Close attached.
31 func NewDirectStorage(opts NewDirectStorageOpts) (_ storage.ClientImplCloser, err error) {
32 conn, err := newConn(opts.NewConnOpts)
36 if opts.PageSize == 0 {
37 // The largest size sqlite supports. I think we want this to be the smallest piece size we
38 // can expect, which is probably 1<<17.
39 opts.PageSize = 1 << 16
41 err = initDatabase(conn, opts.InitDbOpts)
46 err = initConn(conn, opts.InitConnOpts)
51 if opts.BlobFlushInterval == 0 && !opts.GcBlobs {
52 // This is influenced by typical busy timeouts, of 5-10s. We want to give other connections
53 // a few chances at getting a transaction through.
54 opts.BlobFlushInterval = time.Second
58 blobs: make(map[string]*sqlite.Blob),
61 if opts.BlobFlushInterval != 0 {
62 cl.blobFlusher = time.AfterFunc(opts.BlobFlushInterval, cl.blobFlusherFunc)
64 cl.capacity = cl.getCapacity
68 func (cl *client) getCapacity() (ret *int64) {
71 err := sqlitex.Exec(cl.conn, "select value from setting where name='capacity'", func(stmt *sqlite.Stmt) error {
73 *ret = stmt.ColumnInt64(0)
85 blobs map[string]*sqlite.Blob
86 blobFlusher *time.Timer
87 opts NewDirectStorageOpts
89 capacity func() *int64
92 func (c *client) blobFlusherFunc() {
97 c.blobFlusher.Reset(c.opts.BlobFlushInterval)
101 func (c *client) flushBlobs() {
102 for key, b := range c.blobs {
103 // Need the lock to prevent racing with the GC finalizers.
109 func (c *client) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) {
111 return storage.TorrentImpl{Piece: t.Piece, Close: t.Close, Capacity: &c.capacity}, nil
114 func (c *client) Close() error {
119 if c.opts.BlobFlushInterval != 0 {
122 return c.conn.Close()
125 type torrent struct {
129 func rowidForBlob(c conn, name string, length int64, create bool) (rowid int64, err error) {
131 err = sqlitex.Exec(c, "select rowid from blob where name=?", func(stmt *sqlite.Stmt) error {
133 panic("expected at most one row")
135 // TODO: How do we know if we got this wrong?
136 rowid = stmt.ColumnInt64(0)
147 err = errors.New("no existing row")
150 err = sqlitex.Exec(c, "insert into blob(name, data) values(?, zeroblob(?))", nil, name, length)
154 rowid = c.LastInsertRowID()
158 func (t torrent) Piece(p metainfo.Piece) storage.PieceImpl {
161 name := p.Hash().HexString()
169 func (t torrent) Close() error {
179 func (p piece) doAtIoWithBlob(
180 atIo func(*sqlite.Blob) func([]byte, int64) (int, error),
184 ) (n int, err error) {
187 if p.opts.NoCacheBlobs {
190 blob, err := p.getBlob(create)
192 err = fmt.Errorf("getting blob: %w", err)
195 n, err = atIo(blob)(b, off)
200 if !errors.As(err, &se) {
203 // "ABORT" occurs if the row the blob is on is modified elsewhere. "ERROR: invalid blob" occurs
204 // if the blob has been closed. We don't forget blobs that are closed by our GC finalizers,
205 // because they may be attached to names that have since moved on to another blob.
206 if se.Code != sqlite.SQLITE_ABORT && !(p.opts.GcBlobs && se.Code == sqlite.SQLITE_ERROR && se.Msg == "invalid blob") {
210 // Try again, this time we're guaranteed to get a fresh blob, and so errors are no excuse. It
211 // might be possible to skip to this version if we don't cache blobs.
212 blob, err = p.getBlob(create)
214 err = fmt.Errorf("getting blob: %w", err)
217 return atIo(blob)(b, off)
220 func (p piece) ReadAt(b []byte, off int64) (n int, err error) {
221 return p.doAtIoWithBlob(func(blob *sqlite.Blob) func([]byte, int64) (int, error) {
226 func (p piece) WriteAt(b []byte, off int64) (n int, err error) {
227 return p.doAtIoWithBlob(func(blob *sqlite.Blob) func([]byte, int64) (int, error) {
232 func (p piece) MarkComplete() error {
235 err := sqlitex.Exec(p.conn, "update blob set verified=true where name=?", nil, p.name)
239 changes := p.conn.Changes()
246 func (p piece) forgetBlob() {
247 blob, ok := p.blobs[p.name]
252 delete(p.blobs, p.name)
255 func (p piece) MarkNotComplete() error {
258 return sqlitex.Exec(p.conn, "update blob set verified=false where name=?", nil, p.name)
261 func (p piece) Completion() (ret storage.Completion) {
264 err := sqlitex.Exec(p.conn, "select verified from blob where name=?", func(stmt *sqlite.Stmt) error {
265 ret.Complete = stmt.ColumnInt(0) != 0
275 func (p piece) getBlob(create bool) (*sqlite.Blob, error) {
276 blob, ok := p.blobs[p.name]
278 rowid, err := rowidForBlob(p.conn, p.name, p.length, create)
280 return nil, fmt.Errorf("getting rowid for blob: %w", err)
282 blob, err = p.conn.OpenBlob("main", "blob", "data", rowid, true)
288 runtime.SetFinalizer(herp, func(*byte) {
291 // Note there's no guarantee that the finalizer fired while this blob is the same
292 // one in the blob cache. It might be possible to rework this so that we check, or
293 // strip finalizers as appropriate.
297 p.blobs[p.name] = blob