]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Remove cast(data as blob) workaround
authorMatt Joiner <anacrolix@gmail.com>
Tue, 19 Jan 2021 06:54:17 +0000 (17:54 +1100)
committerMatt Joiner <anacrolix@gmail.com>
Mon, 25 Jan 2021 04:54:37 +0000 (15:54 +1100)
Upstream merged the fix. Good performance boost.

go.mod
storage/sqlite/sqlite-storage.go

diff --git a/go.mod b/go.mod
index 3ad748b28099e0cba50bd777b2d8894884859f9d..817c09dfa9adb321d7e879e601adc900046bbdac 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/anacrolix/torrent
 
 require (
        bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512
-       crawshaw.io/sqlite v0.3.2
+       crawshaw.io/sqlite v0.3.3-0.20201116044518-95be3f88ee0f
        github.com/RoaringBitmap/roaring v0.5.5 // indirect
        github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75 // indirect
        github.com/alexflint/go-arg v1.3.0
index 0e1edd1b9db463a454b94287e29876d43afb6bdf..c53d9c67883e99376bff7ef01de4fec26d130607 100644 (file)
@@ -77,7 +77,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                create index if not exists blob_last_used on blob(last_used);
                
                -- While sqlite *seems* to be faster to get sum(length(data)) instead of 
-               -- sum(length(cast(data as blob))), it may still require a large table scan at start-up or with a 
+               -- sum(length(data)), it may still require a large table scan at start-up or with a 
                -- cold-cache. With this we can be assured that it doesn't.
                insert or ignore into blob_meta values ('size', 0);
                
@@ -99,7 +99,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                                        (select value from blob_meta where key='size') as usage_with,
                                        last_used,
                                        rowid,
-                                       length(cast(data as blob))
+                                       length(data)
                                from blob order by last_used, rowid limit 1
                        )
                        where usage_with > (select value from setting where name='capacity')
@@ -108,7 +108,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error {
                                usage_with-data_length as new_usage_with,
                                blob.last_used,
                                blob.rowid,
-                               length(cast(data as blob))
+                               length(data)
                        from excess join blob
                        on blob.rowid=(select rowid from blob where (last_used, rowid) > (excess.last_used, blob_rowid))
                        where new_usage_with > (select value from setting where name='capacity')
@@ -361,7 +361,7 @@ func (p *provider) WriteConsecutiveChunks(prefix string, w io.Writer) (written i
                err = io.EOF
                err = sqlitex.Exec(conn, `
                                select
-                                       cast(data as blob),
+                                       data,
                                        cast(substr(name, ?+1) as integer) as offset
                                from blob
                                where name like ?||'%'
@@ -712,7 +712,7 @@ func (i instance) ReadAt(p []byte, off int64) (n int, err error) {
                        gotRow := false
                        err = sqlitex.Exec(
                                conn,
-                               "select substr(cast(data as blob), ?, ?) from blob where name=?",
+                               "select substr(data, ?, ?) from blob where name=?",
                                func(stmt *sqlite.Stmt) error {
                                        if gotRow {
                                                panic("found multiple matching blobs")