s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[0], outgoing: true}})
targ := &pexState{
ev: []pexEvent{
- pexEvent{pexAdd, addrs[0], pp.PexOutgoingConn},
+ {pexAdd, addrs[0], pp.PexOutgoingConn},
},
nc: 1,
}
t.Run("belowTarg", func(t *testing.T) {
s := &pexState{
hold: []pexEvent{
- pexEvent{pexDrop, addrs[1], 0},
+ {pexDrop, addrs[1], 0},
},
nc: 0,
}
s.Add(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}})
targ := &pexState{
hold: []pexEvent{
- pexEvent{pexDrop, addrs[1], 0},
+ {pexDrop, addrs[1], 0},
},
ev: []pexEvent{
- pexEvent{pexAdd, addrs[0], 0},
+ {pexAdd, addrs[0], 0},
},
nc: 1,
}
holdAddr := &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
s := &pexState{
hold: []pexEvent{
- pexEvent{pexDrop, holdAddr, 0},
+ {pexDrop, holdAddr, 0},
},
nc: pexTargAdded,
}
targ := &pexState{
hold: []pexEvent{},
ev: []pexEvent{
- pexEvent{pexDrop, holdAddr, 0},
- pexEvent{pexAdd, addrs[0], 0},
+ {pexDrop, holdAddr, 0},
+ {pexAdd, addrs[0], 0},
},
nc: pexTargAdded + 1,
}
s := &pexState{nc: 1}
s.Drop(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}, pex: pexConnState{Listed: true}})
targ := &pexState{
- hold: []pexEvent{pexEvent{pexDrop, addrs[0], 0}},
+ hold: []pexEvent{{pexDrop, addrs[0], 0}},
nc: 0,
}
require.EqualValues(t, targ, s)
s := &pexState{nc: pexTargAdded + 1}
s.Drop(&PeerConn{Peer: Peer{RemoteAddr: addrs[0]}, pex: pexConnState{Listed: true}})
targ := &pexState{
- ev: []pexEvent{pexEvent{pexDrop, addrs[0], 0}},
+ ev: []pexEvent{{pexDrop, addrs[0], 0}},
nc: pexTargAdded,
}
require.EqualValues(t, targ, s)
func TestPexReset(t *testing.T) {
s := &pexState{
- hold: []pexEvent{pexEvent{pexDrop, addrs[0], 0}},
- ev: []pexEvent{pexEvent{pexAdd, addrs[1], 0}},
+ hold: []pexEvent{{pexDrop, addrs[0], 0}},
+ ev: []pexEvent{{pexAdd, addrs[1], 0}},
nc: 1,
}
s.Reset()
name: "add4",
in: &pexState{
ev: []pexEvent{
- pexEvent{pexAdd, addrs[0], f},
- pexEvent{pexAdd, addrs[1], f},
- pexEvent{pexAdd, addrs[2], f},
- pexEvent{pexAdd, addrs[3], f},
+ {pexAdd, addrs[0], f},
+ {pexAdd, addrs[1], f},
+ {pexAdd, addrs[2], f},
+ {pexAdd, addrs[3], f},
},
},
arg: 0,
arg: 0,
in: &pexState{
ev: []pexEvent{
- pexEvent{pexDrop, addrs[0], f},
- pexEvent{pexDrop, addrs[2], f},
+ {pexDrop, addrs[0], f},
+ {pexDrop, addrs[2], f},
},
},
targM: pp.PexMsg{
arg: 0,
in: &pexState{
ev: []pexEvent{
- pexEvent{pexAdd, addrs[0], f},
- pexEvent{pexAdd, addrs[1], f},
- pexEvent{pexDrop, addrs[0], f},
+ {pexAdd, addrs[0], f},
+ {pexAdd, addrs[1], f},
+ {pexDrop, addrs[0], f},
},
},
targM: pp.PexMsg{
arg: 0,
in: &pexState{
ev: []pexEvent{
- pexEvent{pexAdd, addrs[0], f},
- pexEvent{pexAdd, addrs[1], f},
- pexEvent{pexAdd, addrs[2], f},
+ {pexAdd, addrs[0], f},
+ {pexAdd, addrs[1], f},
+ {pexAdd, addrs[2], f},
},
hold: []pexEvent{
- pexEvent{pexDrop, addrs[0], f},
- pexEvent{pexDrop, addrs[2], f},
- pexEvent{pexDrop, addrs[1], f},
+ {pexDrop, addrs[0], f},
+ {pexDrop, addrs[2], f},
+ {pexDrop, addrs[1], f},
},
},
targM: pp.PexMsg{
arg: 1,
in: &pexState{
ev: []pexEvent{
- pexEvent{pexAdd, addrs[0], f},
- pexEvent{pexAdd, addrs[1], f},
+ {pexAdd, addrs[0], f},
+ {pexAdd, addrs[1], f},
},
},
targM: pp.PexMsg{
}
err = sqlitex.ExecScript(conn, `
-- We have to opt into this before creating any tables, or before a vacuum to enable it. It means we
- -- can trim the database file size with partial vacuums without having to do a full vacuum, which
+ -- can trim the database file size with partial vacuums without having to do a full vacuum, which
-- locks everything.
pragma auto_vacuum=incremental;
-
+
create table if not exists blob (
name text,
last_used timestamp default (datetime('now')),
verified bool,
primary key (name)
);
-
+
create table if not exists blob_meta (
key text primary key,
value
);
create index if not exists blob_last_used on blob(last_used);
-
- -- While sqlite *seems* to be faster to get sum(length(data)) instead of
- -- sum(length(data)), it may still require a large table scan at start-up or with a
+
+ -- While sqlite *seems* to be faster to get sum(length(data)) instead of
+ -- sum(length(data)), it may still require a large table scan at start-up or with a
-- cold-cache. With this we can be assured that it doesn't.
insert or ignore into blob_meta values ('size', 0);
-
+
create table if not exists setting (
name primary key on conflict replace,
value
);
-
+
create view if not exists deletable_blob as
with recursive excess (
usage_with,
blob_rowid,
data_length
) as (
- select *
+ select *
from (
- select
+ select
(select value from blob_meta where key='size') as usage_with,
last_used,
rowid,
)
where usage_with > (select value from setting where name='capacity')
union all
- select
+ select
usage_with-data_length as new_usage_with,
blob.last_used,
blob.rowid,
update blob_meta set value=value+length(cast(new.data as blob)) where key='size';
delete from blob where rowid in (select blob_rowid from deletable_blob);
end;
-
+
create trigger if not exists after_update_blob
after update of data on blob
begin
update blob_meta set value=value+length(cast(new.data as blob))-length(cast(old.data as blob)) where key='size';
delete from blob where rowid in (select blob_rowid from deletable_blob);
end;
-
+
create trigger if not exists after_delete_blob
after delete on blob
begin
return me.conn.Close()
}
-func (poolFromConn) NumConns() int { return 1 }
+func (me *poolFromConn) NumConns() int { return 1 }
type ProviderOpts struct {
BatchWrites bool