From 8df24008ea652e4288cd90bd8eeee9fe11153986 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Tue, 24 Aug 2021 22:24:39 +1000 Subject: [PATCH] Embed SQL --- storage/sqlite/init-triggers.sql | 25 ++++++++ storage/sqlite/init.sql | 64 +++++++++++++++++++ storage/sqlite/sqlite-storage.go | 104 +++---------------------------- 3 files changed, 99 insertions(+), 94 deletions(-) create mode 100644 storage/sqlite/init-triggers.sql create mode 100644 storage/sqlite/init.sql diff --git a/storage/sqlite/init-triggers.sql b/storage/sqlite/init-triggers.sql new file mode 100644 index 00000000..596486c8 --- /dev/null +++ b/storage/sqlite/init-triggers.sql @@ -0,0 +1,25 @@ +create trigger if not exists delete_blob_tags_before_blob_deleted +before delete on blob +begin + delete from tag where blob_name=old.name; +end; + +create trigger if not exists after_insert_blob +after insert on blob +begin + update blob_meta set value=value+length(cast(new.data as blob)) where key='size'; + delete from blob where rowid in (select blob_rowid from deletable_blob); +end; + +create trigger if not exists after_update_blob +after update of data on blob +begin + update blob_meta set value=value+length(cast(new.data as blob))-length(cast(old.data as blob)) where key='size'; + delete from blob where rowid in (select blob_rowid from deletable_blob); +end; + +create trigger if not exists after_delete_blob +after delete on blob +begin + update blob_meta set value=value-length(cast(old.data as blob)) where key='size'; +end; diff --git a/storage/sqlite/init.sql b/storage/sqlite/init.sql new file mode 100644 index 00000000..c8e88b06 --- /dev/null +++ b/storage/sqlite/init.sql @@ -0,0 +1,64 @@ +-- We have to opt into this before creating any tables, or before a vacuum to enable it. It means we +-- can trim the database file size with partial vacuums without having to do a full vacuum, which +-- locks everything. +pragma auto_vacuum=incremental; + +create table if not exists blob ( + name text, + last_used timestamp default (datetime('now')), + data blob, + primary key (name) +); + +create table if not exists blob_meta ( + key text primary key, + value +); + +create index if not exists blob_last_used on blob(last_used); + +-- While sqlite *seems* to be faster to get sum(length(data)) instead of +-- sum(length(data)), it may still require a large table scan at start-up or with a +-- cold-cache. With this we can be assured that it doesn't. +insert or ignore into blob_meta values ('size', 0); + +create table if not exists setting ( + name primary key on conflict replace, + value +); + +create table if not exists tag ( + blob_name references blob(name), + tag_name, + value, + primary key (blob_name, tag_name) +); + +create view if not exists deletable_blob as +with recursive excess ( + usage_with, + last_used, + blob_rowid, + data_length +) as ( + select * + from ( + select + (select value from blob_meta where key='size') as usage_with, + last_used, + rowid, + length(data) + from blob order by last_used, rowid limit 1 + ) + where usage_with > (select value from setting where name='capacity') + union all + select + usage_with-data_length as new_usage_with, + blob.last_used, + blob.rowid, + length(data) + from excess join blob + on blob.rowid=(select rowid from blob where (last_used, rowid) > (excess.last_used, blob_rowid)) + where new_usage_with > (select value from setting where name='capacity') +) +select * from excess; diff --git a/storage/sqlite/sqlite-storage.go b/storage/sqlite/sqlite-storage.go index 85ffdc46..38a8afcc 100644 --- a/storage/sqlite/sqlite-storage.go +++ b/storage/sqlite/sqlite-storage.go @@ -6,6 +6,7 @@ package sqliteStorage import ( "bytes" "context" + _ "embed" "errors" "expvar" "fmt" @@ -134,109 +135,24 @@ func setPageSize(conn conn, pageSize int) error { return nil } +var ( + //go:embed init.sql + initScript string + //go:embed init-triggers.sql + initTriggers string +) + func InitSchema(conn conn, pageSize int, triggers bool) error { err := setPageSize(conn, pageSize) if err != nil { return fmt.Errorf("setting page size: %w", err) } - err = sqlitex.ExecScript(conn, ` - -- We have to opt into this before creating any tables, or before a vacuum to enable it. It means we - -- can trim the database file size with partial vacuums without having to do a full vacuum, which - -- locks everything. - pragma auto_vacuum=incremental; - - create table if not exists blob ( - name text, - last_used timestamp default (datetime('now')), - data blob, - verified bool, - primary key (name) - ); - - create table if not exists blob_meta ( - key text primary key, - value - ); - - create index if not exists blob_last_used on blob(last_used); - - -- While sqlite *seems* to be faster to get sum(length(data)) instead of - -- sum(length(data)), it may still require a large table scan at start-up or with a - -- cold-cache. With this we can be assured that it doesn't. - insert or ignore into blob_meta values ('size', 0); - - create table if not exists setting ( - name primary key on conflict replace, - value - ); - - create table if not exists tag ( - blob_name references blob(name), - tag_name, - value, - primary key (blob_name, tag_name) - ); - - create view if not exists deletable_blob as - with recursive excess ( - usage_with, - last_used, - blob_rowid, - data_length - ) as ( - select * - from ( - select - (select value from blob_meta where key='size') as usage_with, - last_used, - rowid, - length(data) - from blob order by last_used, rowid limit 1 - ) - where usage_with > (select value from setting where name='capacity') - union all - select - usage_with-data_length as new_usage_with, - blob.last_used, - blob.rowid, - length(data) - from excess join blob - on blob.rowid=(select rowid from blob where (last_used, rowid) > (excess.last_used, blob_rowid)) - where new_usage_with > (select value from setting where name='capacity') - ) - select * from excess; - `) + err = sqlitex.ExecScript(conn, initScript) if err != nil { return err } if triggers { - err := sqlitex.ExecScript(conn, ` - create trigger if not exists delete_blob_tags_before_blob_deleted - before delete on blob - begin - delete from tag where blob_name=old.name; - end; - - create trigger if not exists after_insert_blob - after insert on blob - begin - update blob_meta set value=value+length(cast(new.data as blob)) where key='size'; - delete from blob where rowid in (select blob_rowid from deletable_blob); - end; - - create trigger if not exists after_update_blob - after update of data on blob - begin - update blob_meta set value=value+length(cast(new.data as blob))-length(cast(old.data as blob)) where key='size'; - delete from blob where rowid in (select blob_rowid from deletable_blob); - end; - - create trigger if not exists after_delete_blob - after delete on blob - begin - update blob_meta set value=value-length(cast(old.data as blob)) where key='size'; - end; - `) + err := sqlitex.ExecScript(conn, initTriggers) if err != nil { return err } -- 2.48.1