зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1820478 - Set a default Sqlite journal size limit, and persist auxiliary files. r=asuth
Set a default journal_size_limit, so journals are always truncated to a sensible max size. Change existing consumers to just use the default, but Places that is using a larger 4MiB limit. Change auxiliary files (-shm, -journal, -wal, ...) persistance on disk, to avoid the cost of creating and removing them. Since there is a journal_size_limit they will be truncated instead of deleted. Differential Revision: https://phabricator.services.mozilla.com/D172185
This commit is contained in:
Родитель
f0cfc85ed3
Коммит
4de92044b6
|
@ -581,14 +581,13 @@ nsresult InitializeConnection(mozIStorageConnection& aConn) {
|
|||
|
||||
// Enable WAL journaling. This must be performed in a separate transaction
|
||||
// after changing the page_size and enabling auto_vacuum.
|
||||
// Note there is a default journal_size_limit set by mozStorage.
|
||||
QM_TRY(MOZ_TO_RESULT(aConn.ExecuteSimpleSQL(nsPrintfCString(
|
||||
// WAL journal can grow to given number of *pages*
|
||||
"PRAGMA wal_autocheckpoint = %u; "
|
||||
// Always truncate the journal back to given number of *bytes*
|
||||
"PRAGMA journal_size_limit = %u; "
|
||||
// WAL must be enabled at the end to allow page size to be changed, etc.
|
||||
"PRAGMA journal_mode = WAL; ",
|
||||
kWalAutoCheckpointPages, kWalAutoCheckpointSize))));
|
||||
kWalAutoCheckpointPages))));
|
||||
|
||||
// Verify that we successfully set the vacuum mode to incremental. It
|
||||
// is very easy to put the database in a state where the auto_vacuum
|
||||
|
|
|
@ -174,7 +174,8 @@ class IDBEncryptionPBM(MarionetteTestCase):
|
|||
"""
|
||||
if not os.path.exists(self.getIDBStoragePath()):
|
||||
return False
|
||||
return self.findDirObj(self.idbStoragePath, ".sqlite-wal", True) is None
|
||||
walPath = self.findDirObj(self.idbStoragePath, ".sqlite-wal", True)
|
||||
return walPath is None or os.stat(walPath).st_size == 0
|
||||
|
||||
def ensureInvariantHolds(self, op):
|
||||
maxWaitTime = 60
|
||||
|
|
|
@ -334,8 +334,6 @@ const uint32_t kPreparedDatastoreTimeoutMs = 20000;
|
|||
// Shadow database Write Ahead Log's maximum size is 512KB
|
||||
const uint32_t kShadowMaxWALSize = 512 * 1024;
|
||||
|
||||
const uint32_t kShadowJournalSizeLimit = kShadowMaxWALSize * 3;
|
||||
|
||||
bool IsOnGlobalConnectionThread();
|
||||
|
||||
void AssertIsOnGlobalConnectionThread();
|
||||
|
@ -813,15 +811,10 @@ nsresult SetShadowJournalMode(mozIStorageConnection* aConnection) {
|
|||
|
||||
MOZ_ASSERT(pageSize >= 512 && pageSize <= 65536);
|
||||
|
||||
// Note there is a default journal_size_limit set by mozStorage.
|
||||
QM_TRY(MOZ_TO_RESULT(aConnection->ExecuteSimpleSQL(
|
||||
"PRAGMA wal_autocheckpoint = "_ns +
|
||||
IntToCString(static_cast<int32_t>(kShadowMaxWALSize / pageSize)))));
|
||||
|
||||
// Set the maximum WAL log size to reduce footprint on mobile (large empty
|
||||
// WAL files will be truncated)
|
||||
QM_TRY(MOZ_TO_RESULT(
|
||||
aConnection->ExecuteSimpleSQL("PRAGMA journal_size_limit = "_ns +
|
||||
IntToCString(kShadowJournalSizeLimit))));
|
||||
} else {
|
||||
QM_TRY(MOZ_TO_RESULT(
|
||||
aConnection->ExecuteSimpleSQL(journalModeQueryStart + "truncate"_ns)));
|
||||
|
|
|
@ -701,6 +701,7 @@ nsresult StorageDBThread::ConfigureWALBehavior() {
|
|||
|
||||
// Set the threshold for auto-checkpointing the WAL.
|
||||
// We don't want giant logs slowing down reads & shutdown.
|
||||
// Note there is a default journal_size_limit set by mozStorage.
|
||||
int32_t thresholdInPages =
|
||||
static_cast<int32_t>(MAX_WAL_SIZE_BYTES / pageSize);
|
||||
nsAutoCString thresholdPragma("PRAGMA wal_autocheckpoint = ");
|
||||
|
@ -708,15 +709,6 @@ nsresult StorageDBThread::ConfigureWALBehavior() {
|
|||
rv = mWorkerConnection->ExecuteSimpleSQL(thresholdPragma);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
// Set the maximum WAL log size to reduce footprint on mobile (large empty
|
||||
// WAL files will be truncated)
|
||||
nsAutoCString journalSizePragma("PRAGMA journal_size_limit = ");
|
||||
// bug 600307: mak recommends setting this to 3 times the auto-checkpoint
|
||||
// threshold
|
||||
journalSizePragma.AppendInt(MAX_WAL_SIZE_BYTES * 3);
|
||||
rv = mWorkerConnection->ExecuteSimpleSQL(journalSizePragma);
|
||||
NS_ENSURE_SUCCESS(rv, rv);
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,20 @@ int BaseCheckReservedLock(sqlite3_file* pFile, int* pResOut) {
|
|||
}
|
||||
|
||||
int BaseFileControl(sqlite3_file* pFile, int op, void* pArg) {
|
||||
#ifdef EARLY_BETA_OR_EARLIER
|
||||
// Persist auxiliary files (-shm and -wal) on disk, because creating and
|
||||
// deleting them may be expensive on slow storage.
|
||||
// Only do this when there is a journal size limit, so the journal is
|
||||
// truncated instead of deleted on shutdown, that feels safer if the user
|
||||
// moves a database file around without its auxiliary files.
|
||||
MOZ_ASSERT(
|
||||
::sqlite3_compileoption_used("DEFAULT_JOURNAL_SIZE_LIMIT"),
|
||||
"A journal size limit ensures the journal is truncated on shutdown");
|
||||
if (op == SQLITE_FCNTL_PERSIST_WAL) {
|
||||
*static_cast<int*>(pArg) = 1;
|
||||
return SQLITE_OK;
|
||||
}
|
||||
#endif
|
||||
BaseFile* p = (BaseFile*)pFile;
|
||||
return p->pReal->pMethods->xFileControl(p->pReal, op, pArg);
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ var { AppConstants } = ChromeUtils.importESModule(
|
|||
);
|
||||
|
||||
ChromeUtils.defineESModuleGetters(this, {
|
||||
FileUtils: "resource://gre/modules/FileUtils.sys.mjs",
|
||||
Sqlite: "resource://gre/modules/Sqlite.sys.mjs",
|
||||
TelemetryTestUtils: "resource://testing-common/TelemetryTestUtils.sys.mjs",
|
||||
TestUtils: "resource://testing-common/TestUtils.sys.mjs",
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/* Any copyright is dedicated to the Public Domain.
|
||||
* https://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
// Tests defaul journal_size_limit
|
||||
|
||||
async function check_journal_size(db) {
|
||||
let stmt = db.createAsyncStatement("PRAGMA journal_size_limit");
|
||||
let value = await new Promise((resolve, reject) => {
|
||||
stmt.executeAsync({
|
||||
handleResult(resultSet) {
|
||||
resolve(resultSet.getNextRow().getResultByIndex(0));
|
||||
},
|
||||
handleError(error) {
|
||||
reject();
|
||||
},
|
||||
handleCompletion() {},
|
||||
});
|
||||
});
|
||||
Assert.greater(value, 0, "There is a positive journal_size_limit");
|
||||
stmt.finalize();
|
||||
await new Promise(resolve => db.asyncClose(resolve));
|
||||
}
|
||||
|
||||
async function getDbPath(name) {
|
||||
let path = PathUtils.join(PathUtils.profileDir, name + ".sqlite");
|
||||
Assert.ok(!(await IOUtils.exists(path)));
|
||||
return path;
|
||||
}
|
||||
|
||||
add_task(async function() {
|
||||
await check_journal_size(
|
||||
Services.storage.openDatabase(
|
||||
new FileUtils.File(await getDbPath("journal"))
|
||||
)
|
||||
);
|
||||
await check_journal_size(
|
||||
Services.storage.openUnsharedDatabase(
|
||||
new FileUtils.File(await getDbPath("journalUnshared"))
|
||||
)
|
||||
);
|
||||
await check_journal_size(
|
||||
await openAsyncDatabase(new FileUtils.File(await getDbPath("journalAsync")))
|
||||
);
|
||||
});
|
|
@ -0,0 +1,89 @@
|
|||
/* Any copyright is dedicated to the Public Domain.
|
||||
* https://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
// Tests the journal persists on close.
|
||||
|
||||
async function check_journal_persists(db, journal) {
|
||||
let path = db.databaseFile.path;
|
||||
info(`testing ${path}`);
|
||||
await new Promise(resolve => {
|
||||
db.executeSimpleSQLAsync(`PRAGMA journal_mode = ${journal}`, {
|
||||
handleCompletion: resolve,
|
||||
});
|
||||
});
|
||||
|
||||
await new Promise(resolve => {
|
||||
db.executeSimpleSQLAsync("CREATE TABLE test (id INTEGER PRIMARY KEY)", {
|
||||
handleCompletion: resolve,
|
||||
});
|
||||
});
|
||||
|
||||
if (journal == "wal") {
|
||||
Assert.ok(await IOUtils.exists(path + "-wal"), "-wal exists before close");
|
||||
Assert.greater(
|
||||
(await IOUtils.stat(path + "-wal")).size,
|
||||
0,
|
||||
"-wal size is non-zero"
|
||||
);
|
||||
} else {
|
||||
Assert.ok(
|
||||
await IOUtils.exists(path + "-journal"),
|
||||
"-journal exists before close"
|
||||
);
|
||||
Assert.equal(
|
||||
(await IOUtils.stat(path + "-journal")).size,
|
||||
0,
|
||||
"-journal is truncated after every transaction"
|
||||
);
|
||||
}
|
||||
|
||||
await new Promise(resolve => db.asyncClose(resolve));
|
||||
|
||||
if (journal == "wal") {
|
||||
Assert.ok(await IOUtils.exists(path + "-wal"), "-wal persists after close");
|
||||
Assert.equal(
|
||||
(await IOUtils.stat(path + "-wal")).size,
|
||||
0,
|
||||
"-wal has been truncated"
|
||||
);
|
||||
} else {
|
||||
Assert.ok(
|
||||
await IOUtils.exists(path + "-journal"),
|
||||
"-journal persists after close"
|
||||
);
|
||||
Assert.equal(
|
||||
(await IOUtils.stat(path + "-journal")).size,
|
||||
0,
|
||||
"-journal has been truncated"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async function getDbPath(name) {
|
||||
let path = PathUtils.join(PathUtils.profileDir, name + ".sqlite");
|
||||
Assert.ok(!(await IOUtils.exists(path)), "database should not exist");
|
||||
return path;
|
||||
}
|
||||
|
||||
add_task(async function() {
|
||||
for (let journal of ["truncate", "wal"]) {
|
||||
await check_journal_persists(
|
||||
Services.storage.openDatabase(
|
||||
new FileUtils.File(await getDbPath(`shared-${journal}`))
|
||||
),
|
||||
journal
|
||||
);
|
||||
await check_journal_persists(
|
||||
Services.storage.openUnsharedDatabase(
|
||||
new FileUtils.File(await getDbPath(`unshared-${journal}`))
|
||||
),
|
||||
journal
|
||||
);
|
||||
await check_journal_persists(
|
||||
await openAsyncDatabase(
|
||||
new FileUtils.File(await getDbPath(`async-${journal}`))
|
||||
),
|
||||
journal
|
||||
);
|
||||
}
|
||||
});
|
|
@ -24,6 +24,7 @@ fail-if = os == "android"
|
|||
# on debug builds, so we can only test on non-debug builds.
|
||||
skip-if = debug
|
||||
[test_connection_interrupt.js]
|
||||
[test_default_journal_size_limit.js]
|
||||
[test_js_helpers.js]
|
||||
[test_levenshtein.js]
|
||||
[test_like.js]
|
||||
|
@ -31,6 +32,8 @@ skip-if = debug
|
|||
[test_locale_collation.js]
|
||||
[test_minimizeMemory.js]
|
||||
[test_page_size_is_32k.js]
|
||||
[test_persist_journal.js]
|
||||
skip-if = release_or_beta
|
||||
[test_readonly-immutable-nolock_vfs.js]
|
||||
[test_retry_on_busy.js]
|
||||
[test_sqlite_secure_delete.js]
|
||||
|
|
|
@ -62,6 +62,8 @@ if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'uikit':
|
|||
if CONFIG['MOZ_DEBUG']:
|
||||
DEFINES['SQLITE_DEBUG'] = 1
|
||||
DEFINES['SQLITE_ENABLE_API_ARMOR'] = True
|
||||
else:
|
||||
DEFINES['SQLITE_OMIT_COMPILEOPTION_DIAGS'] = 1
|
||||
|
||||
if CONFIG['OS_TARGET'] == 'Android':
|
||||
# default to user readable only to fit Android security model
|
||||
|
@ -107,3 +109,14 @@ if CONFIG['CC_TYPE'] in ('clang', 'gcc'):
|
|||
'-Wno-sign-compare',
|
||||
'-Wno-type-limits',
|
||||
]
|
||||
|
||||
# Set a default journal size limit. Note an hot journal can grow over this
|
||||
# limit, but if it does Sqlite will truncate it once it returns being idle.
|
||||
# Also note growing a journal has a cost, so a too strict limit may affect
|
||||
# performance.
|
||||
# Also note this is necessary for safely supporting SQLITE_FCNTL_PERSIST_WAL
|
||||
# that our base VFS uses, indeed when a journal limit is set, the journal will
|
||||
# be truncated to 0 on shutdown, reducing the likelihood of corruption if the
|
||||
# user doesn't move auxiliary files along with the main database.
|
||||
# This is in bytes.
|
||||
DEFINES['SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT'] = 1572864
|
||||
|
|
|
@ -39,6 +39,9 @@ sqlite3_column_text16
|
|||
sqlite3_column_type
|
||||
sqlite3_column_value
|
||||
sqlite3_commit_hook
|
||||
#ifdef DEBUG
|
||||
sqlite3_compileoption_used
|
||||
#endif
|
||||
sqlite3_complete
|
||||
sqlite3_complete16
|
||||
sqlite3_config
|
||||
|
|
Загрузка…
Ссылка в новой задаче