From 9d6bf7a4f06078c5890edbeaf975975c512e6cf5 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Tue, 19 Jan 2021 17:54:17 +1100 Subject: [PATCH] Remove cast(data as blob) workaround Upstream merged the fix. Good performance boost. --- go.mod | 2 +- storage/sqlite/sqlite-storage.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 3ad748b2..817c09df 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/anacrolix/torrent require ( bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512 - crawshaw.io/sqlite v0.3.2 + crawshaw.io/sqlite v0.3.3-0.20201116044518-95be3f88ee0f github.com/RoaringBitmap/roaring v0.5.5 // indirect github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75 // indirect github.com/alexflint/go-arg v1.3.0 diff --git a/storage/sqlite/sqlite-storage.go b/storage/sqlite/sqlite-storage.go index 0e1edd1b..c53d9c67 100644 --- a/storage/sqlite/sqlite-storage.go +++ b/storage/sqlite/sqlite-storage.go @@ -77,7 +77,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error { create index if not exists blob_last_used on blob(last_used); -- While sqlite *seems* to be faster to get sum(length(data)) instead of - -- sum(length(cast(data as blob))), it may still require a large table scan at start-up or with a + -- sum(length(data)), it may still require a large table scan at start-up or with a -- cold-cache. With this we can be assured that it doesn't. insert or ignore into blob_meta values ('size', 0); @@ -99,7 +99,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error { (select value from blob_meta where key='size') as usage_with, last_used, rowid, - length(cast(data as blob)) + length(data) from blob order by last_used, rowid limit 1 ) where usage_with > (select value from setting where name='capacity') @@ -108,7 +108,7 @@ func InitSchema(conn conn, pageSize int, triggers bool) error { usage_with-data_length as new_usage_with, blob.last_used, blob.rowid, - length(cast(data as blob)) + length(data) from excess join blob on blob.rowid=(select rowid from blob where (last_used, rowid) > (excess.last_used, blob_rowid)) where new_usage_with > (select value from setting where name='capacity') @@ -361,7 +361,7 @@ func (p *provider) WriteConsecutiveChunks(prefix string, w io.Writer) (written i err = io.EOF err = sqlitex.Exec(conn, ` select - cast(data as blob), + data, cast(substr(name, ?+1) as integer) as offset from blob where name like ?||'%' @@ -712,7 +712,7 @@ func (i instance) ReadAt(p []byte, off int64) (n int, err error) { gotRow := false err = sqlitex.Exec( conn, - "select substr(cast(data as blob), ?, ?) from blob where name=?", + "select substr(data, ?, ?) from blob where name=?", func(stmt *sqlite.Stmt) error { if gotRow { panic("found multiple matching blobs")