Bug 1631484 - make bulk imports / deletes in remote settings faster, r=asuth,leplatrem

Differential Revision: https://phabricator.services.mozilla.com/D71588
This commit is contained in:
Gijs Kruitbosch 2020-04-22 00:04:19 +00:00
Родитель 3a6da68bf2
Коммит 9655c6cc65
2 изменённых файлов: 76 добавлений и 39 удалений

Просмотреть файл

@ -40,6 +40,34 @@ class ShutdownError extends IndexedDBError {
}
}
// We batch operations in order to reduce round-trip latency to the IndexedDB
// database thread. The trade-offs are that the more records in the batch, the
// more time we spend on this thread in structured serialization, and the
// greater the chance to jank PBackground and this thread when the responses
// come back. The initial choice of 250 was made targeting 2-3ms on a fast
// machine and 10-15ms on a slow machine.
// Every chunk waits for success before starting the next, and
// the final chunk's completion will fire transaction.oncomplete .
function bulkOperationHelper(store, operation, list, listIndex = 0) {
const CHUNK_LENGTH = 250;
const max = Math.min(listIndex + CHUNK_LENGTH, list.length);
let request;
for (; listIndex < max; listIndex++) {
request = store[operation](list[listIndex]);
}
if (listIndex < list.length) {
// On error, `transaction.onerror` is called.
request.onsuccess = bulkOperationHelper.bind(
null,
store,
operation,
list,
listIndex
);
}
// otherwise, we're done, and the transaction will complete on its own.
}
/**
* Database is a tiny wrapper with the objective
* of providing major kinto-offline-client collection API.
@ -100,19 +128,13 @@ class Database {
await executeIDB(
"records",
store => {
// Chain the put operations together, the last one will be waited by
// the `transaction.oncomplete` callback.
let i = 0;
putNext();
function putNext() {
if (i == toInsert.length) {
return;
}
const entry = { ...toInsert[i], _cid };
store.put(entry).onsuccess = putNext; // On error, `transaction.onerror` is called.
++i;
}
bulkOperationHelper(
store,
"put",
toInsert.map(item => {
return Object.assign({ _cid }, item);
})
);
},
{ desc: "importBulk() in " + this.identifier }
);
@ -127,18 +149,13 @@ class Database {
await executeIDB(
"records",
store => {
// Chain the delete operations together, the last one will be waited by
// the `transaction.oncomplete` callback.
let i = 0;
deleteNext();
function deleteNext() {
if (i == toDelete.length) {
return;
}
store.delete([_cid, toDelete[i].id]).onsuccess = deleteNext; // On error, `transaction.onerror` is called.
++i;
}
bulkOperationHelper(
store,
"delete",
toDelete.map(item => {
return [_cid, item.id];
})
);
},
{ desc: "deleteBulk() in " + this.identifier }
);

Просмотреть файл

@ -21,6 +21,34 @@ const IDB_VERSION = 2;
const IDB_RECORDS_STORE = "records";
const IDB_TIMESTAMPS_STORE = "timestamps";
// We batch operations in order to reduce round-trip latency to the IndexedDB
// database thread. The trade-offs are that the more records in the batch, the
// more time we spend on this thread in structured serialization, and the
// greater the chance to jank PBackground and this thread when the responses
// come back. The initial choice of 250 was made targeting 2-3ms on a fast
// machine and 10-15ms on a slow machine.
// Every chunk waits for success before starting the next, and
// the final chunk's completion will fire transaction.oncomplete .
function bulkOperationHelper(store, operation, list, listIndex = 0) {
const CHUNK_LENGTH = 250;
const max = Math.min(listIndex + CHUNK_LENGTH, list.length);
let request;
for (; listIndex < max; listIndex++) {
request = store[operation](list[listIndex]);
}
if (listIndex < list.length) {
// On error, `transaction.onerror` is called.
request.onsuccess = bulkOperationHelper.bind(
null,
store,
operation,
list,
listIndex
);
}
// otherwise, we're done, and the transaction will complete on its own.
}
const Agent = {
/**
* Return the canonical JSON serialization of the specified records.
@ -161,22 +189,14 @@ async function importDumpIDB(bucket, collection, records) {
const db = await openIDB(IDB_NAME, IDB_VERSION);
// Each entry of the dump will be stored in the records store.
// They are indexed by `_cid`, and their status is `synced`.
// They are indexed by `_cid`.
const cid = bucket + "/" + collection;
await executeIDB(db, IDB_RECORDS_STORE, store => {
// Chain the put operations together, the last one will be waited by
// the `transaction.oncomplete` callback.
let i = 0;
putNext();
function putNext() {
if (i == records.length) {
return;
}
const entry = { ...records[i], _status: "synced", _cid: cid };
store.put(entry).onsuccess = putNext; // On error, `transaction.onerror` is called.
++i;
}
// We can just modify the items in-place, as we got them from loadJSONDump.
records.forEach(item => {
item._cid = cid;
});
bulkOperationHelper(store, "put", records);
});
// Store the highest timestamp as the collection timestamp (or zero if dump is empty).