зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1588329 - Use the new `variableLimit` getter to chunk binding params in Places. r=mak
Differential Revision: https://phabricator.services.mozilla.com/D49073 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
a0d2e56628
Коммит
367e4411cc
|
@ -103,7 +103,6 @@ async function promiseTagsFolderId() {
|
||||||
const MATCH_ANYWHERE_UNMODIFIED =
|
const MATCH_ANYWHERE_UNMODIFIED =
|
||||||
Ci.mozIPlacesAutoComplete.MATCH_ANYWHERE_UNMODIFIED;
|
Ci.mozIPlacesAutoComplete.MATCH_ANYWHERE_UNMODIFIED;
|
||||||
const BEHAVIOR_BOOKMARK = Ci.mozIPlacesAutoComplete.BEHAVIOR_BOOKMARK;
|
const BEHAVIOR_BOOKMARK = Ci.mozIPlacesAutoComplete.BEHAVIOR_BOOKMARK;
|
||||||
const SQLITE_MAX_VARIABLE_NUMBER = 999;
|
|
||||||
|
|
||||||
var Bookmarks = Object.freeze({
|
var Bookmarks = Object.freeze({
|
||||||
/**
|
/**
|
||||||
|
@ -2217,16 +2216,10 @@ function insertBookmarkTree(items, source, parent, urls, lastAddedForParent) {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Remove stale tombstones for new items.
|
// Remove stale tombstones for new items.
|
||||||
for (let chunk of PlacesUtils.chunkArray(
|
for (let chunk of PlacesUtils.chunkArray(items, db.variableLimit)) {
|
||||||
items,
|
|
||||||
SQLITE_MAX_VARIABLE_NUMBER
|
|
||||||
)) {
|
|
||||||
await db.executeCached(
|
await db.executeCached(
|
||||||
`DELETE FROM moz_bookmarks_deleted WHERE guid IN (${new Array(
|
`DELETE FROM moz_bookmarks_deleted
|
||||||
chunk.length
|
WHERE guid IN (${sqlBindPlaceholders(chunk)})`,
|
||||||
)
|
|
||||||
.fill("?")
|
|
||||||
.join(",")})`,
|
|
||||||
chunk.map(item => item.guid)
|
chunk.map(item => item.guid)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -2628,10 +2621,7 @@ function removeBookmarks(items, options) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (let chunk of PlacesUtils.chunkArray(
|
for (let chunk of PlacesUtils.chunkArray(items, db.variableLimit)) {
|
||||||
items,
|
|
||||||
SQLITE_MAX_VARIABLE_NUMBER
|
|
||||||
)) {
|
|
||||||
// We don't go through the annotations service for this cause otherwise
|
// We don't go through the annotations service for this cause otherwise
|
||||||
// we'd get a pointless onItemChanged notification and it would also
|
// we'd get a pointless onItemChanged notification and it would also
|
||||||
// set lastModified to an unexpected value.
|
// set lastModified to an unexpected value.
|
||||||
|
@ -2639,9 +2629,8 @@ function removeBookmarks(items, options) {
|
||||||
|
|
||||||
// Remove the bookmarks.
|
// Remove the bookmarks.
|
||||||
await db.executeCached(
|
await db.executeCached(
|
||||||
`DELETE FROM moz_bookmarks WHERE guid IN (${new Array(chunk.length)
|
`DELETE FROM moz_bookmarks
|
||||||
.fill("?")
|
WHERE guid IN (${sqlBindPlaceholders(chunk)})`,
|
||||||
.join(",")})`,
|
|
||||||
chunk.map(item => item.guid)
|
chunk.map(item => item.guid)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,6 @@ var PlacesSyncUtils = {};
|
||||||
const { SOURCE_SYNC } = Ci.nsINavBookmarksService;
|
const { SOURCE_SYNC } = Ci.nsINavBookmarksService;
|
||||||
|
|
||||||
const MICROSECONDS_PER_SECOND = 1000000;
|
const MICROSECONDS_PER_SECOND = 1000000;
|
||||||
const SQLITE_MAX_VARIABLE_NUMBER = 999;
|
|
||||||
|
|
||||||
const MOBILE_BOOKMARKS_PREF = "browser.bookmarks.showMobileBookmarks";
|
const MOBILE_BOOKMARKS_PREF = "browser.bookmarks.showMobileBookmarks";
|
||||||
|
|
||||||
|
@ -238,10 +237,7 @@ const HistorySyncUtils = (PlacesSyncUtils.history = Object.freeze({
|
||||||
// aren't stored in the database.
|
// aren't stored in the database.
|
||||||
let db = await PlacesUtils.promiseDBConnection();
|
let db = await PlacesUtils.promiseDBConnection();
|
||||||
let nonSyncableGuids = [];
|
let nonSyncableGuids = [];
|
||||||
for (let chunk of PlacesUtils.chunkArray(
|
for (let chunk of PlacesUtils.chunkArray(guids, db.variableLimit)) {
|
||||||
guids,
|
|
||||||
SQLITE_MAX_VARIABLE_NUMBER
|
|
||||||
)) {
|
|
||||||
let rows = await db.execute(
|
let rows = await db.execute(
|
||||||
`
|
`
|
||||||
SELECT DISTINCT p.guid FROM moz_places p
|
SELECT DISTINCT p.guid FROM moz_places p
|
||||||
|
|
|
@ -80,8 +80,6 @@ const SyncedBookmarksMerger = Components.Constructor(
|
||||||
const DB_URL_LENGTH_MAX = 65536;
|
const DB_URL_LENGTH_MAX = 65536;
|
||||||
const DB_TITLE_LENGTH_MAX = 4096;
|
const DB_TITLE_LENGTH_MAX = 4096;
|
||||||
|
|
||||||
const SQLITE_MAX_VARIABLE_NUMBER = 999;
|
|
||||||
|
|
||||||
// The current mirror database schema version. Bump for migrations, then add
|
// The current mirror database schema version. Bump for migrations, then add
|
||||||
// migration code to `migrateMirrorSchema`.
|
// migration code to `migrateMirrorSchema`.
|
||||||
const MIRROR_SCHEMA_VERSION = 7;
|
const MIRROR_SCHEMA_VERSION = 7;
|
||||||
|
@ -1008,7 +1006,7 @@ class SyncedBookmarksMirror {
|
||||||
let offset = 0;
|
let offset = 0;
|
||||||
for (let chunk of PlacesUtils.chunkArray(
|
for (let chunk of PlacesUtils.chunkArray(
|
||||||
children,
|
children,
|
||||||
SQLITE_MAX_VARIABLE_NUMBER - 1
|
this.db.variableLimit - 1
|
||||||
)) {
|
)) {
|
||||||
if (signal.aborted) {
|
if (signal.aborted) {
|
||||||
throw new SyncedBookmarksMirror.InterruptedError(
|
throw new SyncedBookmarksMirror.InterruptedError(
|
||||||
|
|
|
@ -17,8 +17,6 @@ use crate::error::{Error, Result};
|
||||||
|
|
||||||
pub const LMANNO_FEEDURI: &'static str = "livemark/feedURI";
|
pub const LMANNO_FEEDURI: &'static str = "livemark/feedURI";
|
||||||
|
|
||||||
const SQLITE_MAX_VARIABLE_NUMBER: usize = 999;
|
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
fn NS_NavBookmarksTotalSyncChanges() -> i64;
|
fn NS_NavBookmarksTotalSyncChanges() -> i64;
|
||||||
}
|
}
|
||||||
|
@ -487,7 +485,7 @@ fn update_local_items_in_places<'t>(
|
||||||
// do this before inserting new remote items, since we need Place IDs for
|
// do this before inserting new remote items, since we need Place IDs for
|
||||||
// both old and new URLs.
|
// both old and new URLs.
|
||||||
debug!(driver, "Inserting Places for new items");
|
debug!(driver, "Inserting Places for new items");
|
||||||
for chunk in ops.apply_remote_items.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in ops.apply_remote_items.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT OR IGNORE INTO moz_places(url, url_hash, rev_host, hidden,
|
"INSERT OR IGNORE INTO moz_places(url, url_hash, rev_host, hidden,
|
||||||
frecency, guid)
|
frecency, guid)
|
||||||
|
@ -517,10 +515,7 @@ fn update_local_items_in_places<'t>(
|
||||||
|
|
||||||
// Build a table of new and updated items.
|
// Build a table of new and updated items.
|
||||||
debug!(driver, "Staging apply remote item ops");
|
debug!(driver, "Staging apply remote item ops");
|
||||||
for chunk in ops
|
for chunk in ops.apply_remote_items.chunks(db.variable_limit()? / 3) {
|
||||||
.apply_remote_items
|
|
||||||
.chunks(SQLITE_MAX_VARIABLE_NUMBER / 3)
|
|
||||||
{
|
|
||||||
// CTEs in `WITH` clauses aren't indexed, so this query needs a full
|
// CTEs in `WITH` clauses aren't indexed, so this query needs a full
|
||||||
// table scan on `ops`. But that's okay; a separate temp table for ops
|
// table scan on `ops`. But that's okay; a separate temp table for ops
|
||||||
// would also need a full scan. Note that we need both the local _and_
|
// would also need a full scan. Note that we need both the local _and_
|
||||||
|
@ -594,7 +589,7 @@ fn update_local_items_in_places<'t>(
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(driver, "Staging change GUID ops");
|
debug!(driver, "Staging change GUID ops");
|
||||||
for chunk in ops.change_guids.chunks(SQLITE_MAX_VARIABLE_NUMBER / 2) {
|
for chunk in ops.change_guids.chunks(db.variable_limit()? / 2) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT INTO changeGuidOps(localGuid, mergedGuid, syncStatus, level,
|
"INSERT INTO changeGuidOps(localGuid, mergedGuid, syncStatus, level,
|
||||||
lastModifiedMicroseconds)
|
lastModifiedMicroseconds)
|
||||||
|
@ -636,7 +631,7 @@ fn update_local_items_in_places<'t>(
|
||||||
debug!(driver, "Staging apply new local structure ops");
|
debug!(driver, "Staging apply new local structure ops");
|
||||||
for chunk in ops
|
for chunk in ops
|
||||||
.apply_new_local_structure
|
.apply_new_local_structure
|
||||||
.chunks(SQLITE_MAX_VARIABLE_NUMBER / 2)
|
.chunks(db.variable_limit()? / 2)
|
||||||
{
|
{
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT INTO applyNewLocalStructureOps(mergedGuid, mergedParentGuid,
|
"INSERT INTO applyNewLocalStructureOps(mergedGuid, mergedParentGuid,
|
||||||
|
@ -663,10 +658,7 @@ fn update_local_items_in_places<'t>(
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(driver, "Removing tombstones for revived items");
|
debug!(driver, "Removing tombstones for revived items");
|
||||||
for chunk in ops
|
for chunk in ops.delete_local_tombstones.chunks(db.variable_limit()?) {
|
||||||
.delete_local_tombstones
|
|
||||||
.chunks(SQLITE_MAX_VARIABLE_NUMBER)
|
|
||||||
{
|
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"DELETE FROM moz_bookmarks_deleted
|
"DELETE FROM moz_bookmarks_deleted
|
||||||
WHERE guid IN ({})",
|
WHERE guid IN ({})",
|
||||||
|
@ -683,10 +675,7 @@ fn update_local_items_in_places<'t>(
|
||||||
driver,
|
driver,
|
||||||
"Inserting new tombstones for non-syncable and invalid items"
|
"Inserting new tombstones for non-syncable and invalid items"
|
||||||
);
|
);
|
||||||
for chunk in ops
|
for chunk in ops.insert_local_tombstones.chunks(db.variable_limit()?) {
|
||||||
.insert_local_tombstones
|
|
||||||
.chunks(SQLITE_MAX_VARIABLE_NUMBER)
|
|
||||||
{
|
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT INTO moz_bookmarks_deleted(guid, dateRemoved)
|
"INSERT INTO moz_bookmarks_deleted(guid, dateRemoved)
|
||||||
VALUES {}",
|
VALUES {}",
|
||||||
|
@ -703,7 +692,7 @@ fn update_local_items_in_places<'t>(
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(driver, "Removing local items");
|
debug!(driver, "Removing local items");
|
||||||
for chunk in ops.delete_local_items.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in ops.delete_local_items.chunks(db.variable_limit()?) {
|
||||||
remove_local_items(&db, driver, controller, chunk)?;
|
remove_local_items(&db, driver, controller, chunk)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -729,7 +718,7 @@ fn update_local_items_in_places<'t>(
|
||||||
driver,
|
driver,
|
||||||
"Resetting change counters for items that shouldn't be uploaded"
|
"Resetting change counters for items that shouldn't be uploaded"
|
||||||
);
|
);
|
||||||
for chunk in ops.set_local_merged.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in ops.set_local_merged.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"UPDATE moz_bookmarks SET
|
"UPDATE moz_bookmarks SET
|
||||||
syncChangeCounter = 0
|
syncChangeCounter = 0
|
||||||
|
@ -747,7 +736,7 @@ fn update_local_items_in_places<'t>(
|
||||||
driver,
|
driver,
|
||||||
"Bumping change counters for items that should be uploaded"
|
"Bumping change counters for items that should be uploaded"
|
||||||
);
|
);
|
||||||
for chunk in ops.set_local_unmerged.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in ops.set_local_unmerged.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"UPDATE moz_bookmarks SET
|
"UPDATE moz_bookmarks SET
|
||||||
syncChangeCounter = 1
|
syncChangeCounter = 1
|
||||||
|
@ -762,7 +751,7 @@ fn update_local_items_in_places<'t>(
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(driver, "Flagging applied remote items as merged");
|
debug!(driver, "Flagging applied remote items as merged");
|
||||||
for chunk in ops.set_remote_merged.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in ops.set_remote_merged.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"UPDATE items SET
|
"UPDATE items SET
|
||||||
needsMerge = 0
|
needsMerge = 0
|
||||||
|
@ -1032,7 +1021,7 @@ fn stage_items_to_upload(
|
||||||
db.exec("DELETE FROM itemsToUpload")?;
|
db.exec("DELETE FROM itemsToUpload")?;
|
||||||
|
|
||||||
debug!(driver, "Staging weak uploads");
|
debug!(driver, "Staging weak uploads");
|
||||||
for chunk in weak_upload.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in weak_upload.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT INTO itemsToUpload(id, guid, syncChangeCounter, parentGuid,
|
"INSERT INTO itemsToUpload(id, guid, syncChangeCounter, parentGuid,
|
||||||
parentTitle, dateAdded, type, title,
|
parentTitle, dateAdded, type, title,
|
||||||
|
@ -1067,7 +1056,7 @@ fn stage_items_to_upload(
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
debug!(driver, "Staging remaining locally changed items for upload");
|
debug!(driver, "Staging remaining locally changed items for upload");
|
||||||
for chunk in upload_items.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in upload_items.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT OR IGNORE INTO itemsToUpload(id, guid, syncChangeCounter,
|
"INSERT OR IGNORE INTO itemsToUpload(id, guid, syncChangeCounter,
|
||||||
parentGuid, parentTitle,
|
parentGuid, parentTitle,
|
||||||
|
@ -1112,7 +1101,7 @@ fn stage_items_to_upload(
|
||||||
// Finally, stage tombstones for deleted items. Ignore conflicts if we have
|
// Finally, stage tombstones for deleted items. Ignore conflicts if we have
|
||||||
// tombstones for undeleted items; Places Maintenance should clean these up.
|
// tombstones for undeleted items; Places Maintenance should clean these up.
|
||||||
debug!(driver, "Staging tombstones to upload");
|
debug!(driver, "Staging tombstones to upload");
|
||||||
for chunk in upload_tombstones.chunks(SQLITE_MAX_VARIABLE_NUMBER) {
|
for chunk in upload_tombstones.chunks(db.variable_limit()?) {
|
||||||
let mut statement = db.prepare(format!(
|
let mut statement = db.prepare(format!(
|
||||||
"INSERT OR IGNORE INTO itemsToUpload(guid, syncChangeCounter, isDeleted)
|
"INSERT OR IGNORE INTO itemsToUpload(guid, syncChangeCounter, isDeleted)
|
||||||
VALUES {}",
|
VALUES {}",
|
||||||
|
|
|
@ -49,8 +49,6 @@ int64_t NS_NavBookmarksTotalSyncChanges() {
|
||||||
|
|
||||||
PLACES_FACTORY_SINGLETON_IMPLEMENTATION(nsNavBookmarks, gBookmarksService)
|
PLACES_FACTORY_SINGLETON_IMPLEMENTATION(nsNavBookmarks, gBookmarksService)
|
||||||
|
|
||||||
#define SQLITE_MAX_VARIABLE_NUMBER 999
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
#define SKIP_TAGS(condition) ((condition) ? SkipTags : DontSkip)
|
#define SKIP_TAGS(condition) ((condition) ? SkipTags : DontSkip)
|
||||||
|
@ -1274,7 +1272,14 @@ nsresult nsNavBookmarks::InsertTombstones(
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t maxRowsPerChunk = SQLITE_MAX_VARIABLE_NUMBER / 2;
|
nsCOMPtr<mozIStorageConnection> conn = mDB->MainConn();
|
||||||
|
NS_ENSURE_STATE(conn);
|
||||||
|
|
||||||
|
int32_t variableLimit = 0;
|
||||||
|
nsresult rv = conn->GetVariableLimit(&variableLimit);
|
||||||
|
NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
|
||||||
|
size_t maxRowsPerChunk = variableLimit / 2;
|
||||||
for (uint32_t startIndex = 0; startIndex < aTombstones.Length();
|
for (uint32_t startIndex = 0; startIndex < aTombstones.Length();
|
||||||
startIndex += maxRowsPerChunk) {
|
startIndex += maxRowsPerChunk) {
|
||||||
size_t rowsPerChunk =
|
size_t rowsPerChunk =
|
||||||
|
@ -1300,7 +1305,6 @@ nsresult nsNavBookmarks::InsertTombstones(
|
||||||
mozStorageStatementScoper scoper(stmt);
|
mozStorageStatementScoper scoper(stmt);
|
||||||
|
|
||||||
uint32_t paramIndex = 0;
|
uint32_t paramIndex = 0;
|
||||||
nsresult rv;
|
|
||||||
for (uint32_t i = 0; i < rowsPerChunk; ++i) {
|
for (uint32_t i = 0; i < rowsPerChunk; ++i) {
|
||||||
const TombstoneData& tombstone = aTombstones[startIndex + i];
|
const TombstoneData& tombstone = aTombstones[startIndex + i];
|
||||||
rv = stmt->BindUTF8StringByIndex(paramIndex++, tombstone.guid);
|
rv = stmt->BindUTF8StringByIndex(paramIndex++, tombstone.guid);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче