Made webext_storage use the open_database module

Removed support for a couple things that didn't seem to be used:
    - Code to delete and recreate the database.  As far as I can tell,
      DatabaseUpgradeError was never raised.
    - init_sql_connection with is_writable=false
This commit is contained in:
Ben Dean-Kawamura 2021-06-14 14:53:37 -04:00 коммит произвёл bendk
Родитель b6903d97f9
Коммит fb8d360e0b
4 изменённых файлов: 147 добавлений и 165 удалений

Просмотреть файл

@ -0,0 +1,57 @@
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this
-- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-- This is a very simple schema for a chrome.storage.* implementation. At time
-- of writing, only chrome.storage.sync is supported, but this can be trivially
-- enhanced to support chrome.storage.local (the api is identical, it's just a
-- different "bucket" and doesn't sync).
--
-- Even though the spec allows for a single extension to have any number of
-- "keys", we've made the decision to store all keys for a given extension in a
-- single row as a JSON representation of all keys and values.
-- We've done this primarily due to:
-- * The shape of the API is very JSON, and it almost encourages multiple keys
-- to be fetched at one time.
-- * The defined max sizes that extensions are allowed to store using this API
-- is sufficiently small that we don't have many concerns around record sizes.
-- * We'd strongly prefer to keep one record per extension when syncing this
-- data, so having the local store in this shape makes syncing easier.
CREATE TABLE IF NOT EXISTS storage_sync_data (
ext_id TEXT NOT NULL PRIMARY KEY,
/* The JSON payload. NULL means it's a tombstone */
data TEXT,
/* Same "sync change counter" strategy used by other components. */
sync_change_counter INTEGER NOT NULL DEFAULT 1
);
CREATE TABLE IF NOT EXISTS storage_sync_mirror (
guid TEXT NOT NULL PRIMARY KEY,
/* The extension_id is explicitly not the GUID used on the server.
It can't be a regular foreign-key relationship back to storage_sync_data
as items with no data on the server (ie, deleted items) will not appear
in storage_sync_data.
*/
ext_id TEXT NOT NULL UNIQUE,
/* The JSON payload. We *do* allow NULL here - it means "deleted" */
data TEXT
);
-- This table holds key-value metadata - primarily for sync.
CREATE TABLE IF NOT EXISTS meta (
key TEXT PRIMARY KEY,
value NOT NULL
) WITHOUT ROWID;
-- Insert a couple test rows
INSERT INTO storage_sync_mirror(guid, ext_id, data)
VALUES
('guid-1', 'ext-id-1', 'data-1'),
('guid-2', 'ext-id-2', 'data-2');
PRAGMA user_version = 1;

Просмотреть файл

@ -7,8 +7,8 @@ use crate::schema;
use rusqlite::types::{FromSql, ToSql};
use rusqlite::Connection;
use rusqlite::OpenFlags;
use sql_support::open_database::open_database_with_flags;
use sql_support::{ConnExt, SqlInterruptHandle, SqlInterruptScope};
use std::fs;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::result;
@ -50,22 +50,11 @@ impl StorageDb {
| OpenFlags::SQLITE_OPEN_CREATE
| OpenFlags::SQLITE_OPEN_READ_WRITE;
let conn = Connection::open_with_flags(db_path.clone(), flags)?;
match init_sql_connection(&conn, true) {
Ok(()) => Ok(Self {
writer: conn,
interrupt_counter: Arc::new(AtomicUsize::new(0)),
}),
Err(e) => {
// like with places, failure to upgrade means "you lose your data"
if let ErrorKind::DatabaseUpgradeError = e.kind() {
fs::remove_file(&db_path)?;
Self::new_named(db_path)
} else {
Err(e)
}
}
}
let conn = open_database_with_flags(db_path, flags, &schema::WebExtMigrationLogin)?;
Ok(Self {
writer: conn,
interrupt_counter: Arc::new(AtomicUsize::new(0)),
})
}
/// Returns an interrupt handle for this database connection. This handle
@ -129,38 +118,6 @@ impl DerefMut for StorageDb {
}
}
fn init_sql_connection(conn: &Connection, is_writable: bool) -> Result<()> {
let initial_pragmas = "
-- We don't care about temp tables being persisted to disk.
PRAGMA temp_store = 2;
-- we unconditionally want write-ahead-logging mode
PRAGMA journal_mode=WAL;
-- foreign keys seem worth enforcing!
PRAGMA foreign_keys = ON;
";
conn.execute_batch(initial_pragmas)?;
define_functions(&conn)?;
conn.set_prepared_statement_cache_capacity(128);
if is_writable {
let tx = conn.unchecked_transaction()?;
schema::init(&conn)?;
tx.commit()?;
};
Ok(())
}
fn define_functions(c: &Connection) -> Result<()> {
use rusqlite::functions::FunctionFlags;
c.create_scalar_function(
"generate_guid",
0,
FunctionFlags::SQLITE_UTF8,
sql_fns::generate_guid,
)?;
Ok(())
}
pub(crate) mod sql_fns {
use rusqlite::{functions::Context, Result};
use sync_guid::Guid as SyncGuid;

Просмотреть файл

@ -47,11 +47,8 @@ pub enum ErrorKind {
#[error("UTF8 Error: {0}")]
Utf8Error(#[from] std::str::Utf8Error),
#[error("Database cannot be upgraded")]
DatabaseUpgradeError,
#[error("Database version {0} is not supported")]
UnsupportedDatabaseVersion(i64),
#[error("Error opening database: {0}")]
OpenDatabaseError(#[from] sql_support::open_database::Error),
#[error("{0}")]
IncomingPayloadError(#[from] bridged_engine::PayloadError),
@ -64,6 +61,7 @@ error_support::define_error! {
(IoError, std::io::Error),
(InterruptedError, Interrupted),
(Utf8Error, std::str::Utf8Error),
(IncomingPayloadError, bridged_engine::PayloadError)
(IncomingPayloadError, bridged_engine::PayloadError),
(OpenDatabaseError, sql_support::open_database::Error),
}
}

Просмотреть файл

@ -2,83 +2,72 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// XXXXXX - This has been cloned from places/src/schema.rs, which has the
// comment:
// // This has been cloned from logins/src/schema.rs, on Thom's
// // wip-sync-sql-store branch.
// // We should work out how to turn this into something that can use a shared
// // db.rs.
//
// And we really should :) But not now.
use crate::db::sql_fns;
use crate::error::Result;
use rusqlite::{Connection, NO_PARAMS};
use sql_support::ConnExt;
const VERSION: i64 = 2;
use rusqlite::Connection;
use sql_support::open_database::{MigrationLogic, Result as MigrationResult};
const CREATE_SCHEMA_SQL: &str = include_str!("../sql/create_schema.sql");
const CREATE_SYNC_TEMP_TABLES_SQL: &str = include_str!("../sql/create_sync_temp_tables.sql");
fn get_current_schema_version(db: &Connection) -> Result<i64> {
Ok(db.query_one::<i64>("PRAGMA user_version")?)
}
pub struct WebExtMigrationLogin;
pub fn init(db: &Connection) -> Result<()> {
let user_version = get_current_schema_version(db)?;
if user_version == 0 {
create(db)?;
} else if user_version != VERSION {
if user_version < VERSION {
upgrade(db, user_version)?;
} else {
log::warn!(
"Loaded future schema version {} (we only understand version {}). \
Optimistically ",
user_version,
VERSION
);
// Downgrade the schema version, so that anything added with our
// schema is migrated forward when the newer library reads our
// database.
db.execute_batch(&format!("PRAGMA user_version = {};", VERSION))?;
}
create(db)?;
impl MigrationLogic for WebExtMigrationLogin {
const NAME: &'static str = "webext storage db";
const END_VERSION: u32 = 2;
fn prepare(&self, conn: &Connection) -> MigrationResult<()> {
let initial_pragmas = "
-- We don't care about temp tables being persisted to disk.
PRAGMA temp_store = 2;
-- we unconditionally want write-ahead-logging mode
PRAGMA journal_mode=WAL;
-- foreign keys seem worth enforcing!
PRAGMA foreign_keys = ON;
";
conn.execute_batch(initial_pragmas)?;
define_functions(&conn)?;
conn.set_prepared_statement_cache_capacity(128);
Ok(())
}
Ok(())
}
fn create(db: &Connection) -> Result<()> {
log::debug!("Creating schema");
db.execute_batch(CREATE_SCHEMA_SQL)?;
db.execute(
&format!("PRAGMA user_version = {version}", version = VERSION),
NO_PARAMS,
)?;
Ok(())
}
fn upgrade(db: &Connection, from: i64) -> Result<()> {
log::debug!("Upgrading schema from {} to {}", from, VERSION);
if from == VERSION {
return Ok(());
}
// Places has a cute `migration` helper we can consider using if we get
// a few complicated updates, but let's KISS for now.
if from == 1 {
// We changed a not null constraint
log::debug!("Upgrading schema from 1 to 2");
db.execute_batch("ALTER TABLE storage_sync_mirror RENAME TO old_mirror;")?;
// just re-run the full schema commands to recreate the able.
fn init(&self, db: &Connection) -> MigrationResult<()> {
log::debug!("Creating schema");
db.execute_batch(CREATE_SCHEMA_SQL)?;
db.execute_batch(
"INSERT OR IGNORE INTO storage_sync_mirror(guid, ext_id, data)
SELECT guid, ext_id, data FROM old_mirror;",
)?;
db.execute_batch("DROP TABLE old_mirror;")?;
db.execute_batch("PRAGMA user_version = 2;")?;
Ok(())
}
fn upgrade_from(&self, db: &Connection, version: u32) -> MigrationResult<()> {
match version {
1 => upgrade_from_1(db),
_ => panic!("Unexpected upgrade version"),
}
}
}
fn define_functions(c: &Connection) -> MigrationResult<()> {
use rusqlite::functions::FunctionFlags;
c.create_scalar_function(
"generate_guid",
0,
FunctionFlags::SQLITE_UTF8,
sql_fns::generate_guid,
)?;
Ok(())
}
fn upgrade_from_1(db: &Connection) -> MigrationResult<()> {
// We changed a not null constraint
db.execute_batch("ALTER TABLE storage_sync_mirror RENAME TO old_mirror;")?;
// just re-run the full schema commands to recreate the able.
db.execute_batch(CREATE_SCHEMA_SQL)?;
db.execute_batch(
"INSERT OR IGNORE INTO storage_sync_mirror(guid, ext_id, data)
SELECT guid, ext_id, data FROM old_mirror;",
)?;
db.execute_batch("DROP TABLE old_mirror;")?;
db.execute_batch("PRAGMA user_version = 2;")?;
Ok(())
}
@ -132,6 +121,10 @@ mod tests {
use super::*;
use crate::db::test::new_mem_db;
use rusqlite::Error;
use sql_support::open_database::test_utils::MigratedDatabaseFile;
use sql_support::ConnExt;
const CREATE_SCHEMA_V1_SQL: &str = include_str!("../sql/tests/create_schema_v1.sql");
#[test]
fn test_create_schema_twice() {
@ -174,48 +167,10 @@ mod tests {
}
#[test]
fn test_upgrade_1_2() -> Result<()> {
let _ = env_logger::try_init();
// This is the table definition of the table we are migrating away
// from - note the NOT NULL on ext_id, which no longer exists.
let old_create_table = "CREATE TABLE IF NOT EXISTS storage_sync_mirror (
guid TEXT NOT NULL PRIMARY KEY,
ext_id TEXT NOT NULL UNIQUE,
data TEXT
);";
// Create a named in-memory database.
let db = new_mem_db();
db.execute_batch("DROP TABLE storage_sync_mirror;")?;
db.execute_batch(old_create_table)?;
db.execute_batch(
"INSERT INTO storage_sync_mirror(guid, ext_id, data)
VALUES
('guid-1', 'ext-id-1', 'data-1'),
('guid-2', 'ext-id-2', 'data-2')",
)?;
db.execute_batch("PRAGMA user_version = 1")?;
// We are back to what a v1 database looks like. Make sure the NOT NULL
// constraint is honoured.
assert!(db
.execute_batch(
"INSERT INTO storage_sync_mirror(guid, ext_id, data)
VALUES ('guid-3', NULL, NULL);"
)
.is_err());
// Ugrade
upgrade(&db, 1)?;
assert_eq!(get_current_schema_version(&db)?, VERSION);
// Should be able to insert the new row.
db.execute_batch(
"INSERT INTO storage_sync_mirror(guid, ext_id, data)
VALUES ('guid-3', NULL, NULL);",
)?;
fn test_all_upgrades() -> Result<()> {
let db_file = MigratedDatabaseFile::new(WebExtMigrationLogin, CREATE_SCHEMA_V1_SQL);
db_file.run_all_upgrades();
let db = db_file.open();
let get_id_data = |guid: &str| -> Result<(Option<String>, Option<String>)> {
let (ext_id, data) = db
@ -236,7 +191,22 @@ mod tests {
get_id_data("guid-2")?,
(Some("ext-id-2".to_string()), Some("data-2".to_string()))
);
assert_eq!(get_id_data("guid-3")?, (None, None));
Ok(())
}
#[test]
fn test_upgrade_2() -> Result<()> {
let _ = env_logger::try_init();
let db_file = MigratedDatabaseFile::new(WebExtMigrationLogin, CREATE_SCHEMA_V1_SQL);
db_file.upgrade_to(2);
let db = db_file.open();
// Should be able to insert a new with a NULL ext_id
db.execute_batch(
"INSERT INTO storage_sync_mirror(guid, ext_id, data)
VALUES ('guid-3', NULL, NULL);",
)?;
Ok(())
}
}