Bug 1362206 - Have android abort bookmark syncs if a record is too large to upload to the server r=Grisha

MozReview-Commit-ID: JBggAu9Ajbu

--HG--
extra : rebase_source : b8ec16ac467f33ab81ed914c63af8ab1d7e29eef
This commit is contained in:
Thom Chiovoloni 2017-09-06 17:48:18 -04:00
Родитель 0af7d9de10
Коммит 5e4fd13a04
3 изменённых файлов: 90 добавлений и 23 удалений

Просмотреть файл

@ -102,6 +102,9 @@ public class BatchingUploader {
// maintain this limit for a single sanity check.
private final long maxPayloadFieldBytes;
// Set if this channel should ignore further calls to process.
private volatile boolean aborted = false;
public BatchingUploader(
final RepositorySession repositorySession, final ExecutorService workQueue,
final RepositorySessionStoreDelegate sessionStoreDelegate, final Uri baseCollectionUri,
@ -129,7 +132,7 @@ public class BatchingUploader {
final String guid = record.guid;
// If store failed entirely, just bail out. We've already told our delegate that we failed.
if (payloadDispatcher.storeFailed.get()) {
if (payloadDispatcher.storeFailed.get() || aborted) {
return;
}
@ -137,9 +140,7 @@ public class BatchingUploader {
final String payloadField = (String) recordJSON.get(CryptoRecord.KEY_PAYLOAD);
if (payloadField == null) {
sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(
new IllegalRecordException(), guid
);
failRecordStore(new IllegalRecordException(), record, false);
return;
}
@ -147,17 +148,13 @@ public class BatchingUploader {
// UTF-8 uses 1 byte per character for the ASCII range. Contents of the payloadField are
// base64 and hex encoded, so character count is sufficient.
if (payloadField.length() > this.maxPayloadFieldBytes) {
sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(
new PayloadTooLargeToUpload(), guid
);
failRecordStore(new PayloadTooLargeToUpload(), record, true);
return;
}
final byte[] recordBytes = Record.stringToJSONBytes(recordJSON.toJSONString());
if (recordBytes == null) {
sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(
new IllegalRecordException(), guid
);
failRecordStore(new IllegalRecordException(), record, false);
return;
}
@ -166,9 +163,7 @@ public class BatchingUploader {
// We can't upload individual records which exceed our payload total byte limit.
if ((recordDeltaByteCount + PER_PAYLOAD_OVERHEAD_BYTE_COUNT) > payload.maxBytes) {
sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(
new RecordTooLargeToUpload(), guid
);
failRecordStore(new RecordTooLargeToUpload(), record, true);
return;
}
@ -181,7 +176,7 @@ public class BatchingUploader {
Logger.debug(LOG_TAG, "Record fits into the current batch and payload");
addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
// Payload won't fit the record.
// Payload won't fit the record.
} else if (canFitRecordIntoBatch) {
Logger.debug(LOG_TAG, "Current payload won't fit incoming record, uploading payload.");
flush(false, false);
@ -191,7 +186,7 @@ public class BatchingUploader {
// Keep track of the overflow record.
addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
// Batch won't fit the record.
// Batch won't fit the record.
} else {
Logger.debug(LOG_TAG, "Current batch won't fit incoming record, committing batch.");
flush(true, false);
@ -238,6 +233,13 @@ public class BatchingUploader {
});
}
// We fail the batch for bookmark records because uploading only a subset of bookmark records is
// very likely to cause corruption (e.g. uploading a parent without its children or vice versa).
@VisibleForTesting
/* package-local */ boolean shouldFailBatchOnFailure(Record record) {
return record instanceof BookmarkRecord;
}
/* package-local */ void setLastStoreTimestamp(AtomicLong lastModifiedTimestamp) {
repositorySession.setLastStoreTimestamp(lastModifiedTimestamp.get());
}
@ -246,6 +248,35 @@ public class BatchingUploader {
sessionStoreDelegate.deferredStoreDelegate(executor).onStoreCompleted();
}
// Common handling for marking a record failure and calling our delegate's onRecordStoreFailed.
private void failRecordStore(final Exception e, final Record record, boolean sizeOverflow) {
// There are three cases we're handling here. See bug 1362206 for some rationale here.
// 1. If `record` is not a bookmark and it failed sanity checks for reasons other than
// "it's too large" (say, `record`'s json is 0 bytes),
// - Then mark record's store as 'failed' and continue uploading
// 2. If `record` is not a bookmark and it failed sanity checks because it's too large,
// - Continue uploading, and don't fail synchronization because of this one.
// 3. If `record` *is* a bookmark, and it failed for any reason
// - Stop uploading.
if (shouldFailBatchOnFailure(record)) {
// case 3
Logger.debug(LOG_TAG, "Batch failed with exception: " + e.toString());
// Start ignoring records, and send off to our delegate that we failed.
aborted = true;
executor.execute(new PayloadDispatcher.NonPayloadContextRunnable() {
@Override
public void run() {
sessionStoreDelegate.onRecordStoreFailed(e, record.guid);
payloadDispatcher.doStoreFailed(e);
}
});
} else if (!sizeOverflow) {
// case 1
sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(e, record.guid);
}
// case 2 is an implicit empty else {} here.
}
// Will be called from a thread dispatched by PayloadDispatcher.
// NB: Access to `uploaderMeta.isUnlimited` is guarded by the payloadLock.
/* package-local */ void setUnlimitedMode(boolean isUnlimited) {

Просмотреть файл

@ -140,7 +140,7 @@ class PayloadDispatcher {
batchWhiteboard = batchWhiteboard.nextBatchMeta();
}
private void doStoreFailed(Exception reason) {
/* package-local */ void doStoreFailed(Exception reason) {
if (storeFailed.getAndSet(true)) {
return;
}

Просмотреть файл

@ -25,6 +25,8 @@ import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
import org.mozilla.gecko.sync.repositories.Server15Repository;
import org.mozilla.gecko.sync.repositories.Server15RepositorySession;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
import org.mozilla.gecko.sync.repositories.domain.Record;
import java.net.URISyntaxException;
import java.util.ArrayList;
@ -390,6 +392,7 @@ public class BatchingUploaderTest {
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testRandomPayloadSizesBatching() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
@ -441,18 +444,39 @@ public class BatchingUploaderTest {
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 10));
assertEquals(0, ((MockStoreDelegate) storeDelegate).storeFailed);
// Check that we don't complain when we're configured not to abort
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 11));
assertEquals(1, ((MockStoreDelegate) storeDelegate).storeFailed);
assertTrue(((MockStoreDelegate) storeDelegate).lastRecordStoreFailedException instanceof BatchingUploader.PayloadTooLargeToUpload);
assertEquals(0, ((MockStoreDelegate) storeDelegate).storeFailed);
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 1000));
assertEquals(2, ((MockStoreDelegate) storeDelegate).storeFailed);
assertTrue(((MockStoreDelegate) storeDelegate).lastRecordStoreFailedException instanceof BatchingUploader.PayloadTooLargeToUpload);
assertEquals(0, ((MockStoreDelegate) storeDelegate).storeFailed);
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 9));
assertEquals(2, ((MockStoreDelegate) storeDelegate).storeFailed);
assertEquals(0, ((MockStoreDelegate) storeDelegate).storeFailed);
}
@Test
public void testFailAbortsBatch() {
BatchingUploader uploader = makeConstrainedUploader(2, 4, 10, false, true);
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 10));
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 10));
assertEquals(0, ((MockStoreDelegate) storeDelegate).storeFailed);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(((MockStoreDelegate) storeDelegate).lastStoreFailedException, null);
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 11));
assertTrue(((MockStoreDelegate) storeDelegate).lastStoreFailedException instanceof BatchingUploader.PayloadTooLargeToUpload);
// Ensure that further calls to process do nothing.
for (int i = 0; i < 5; ++i) {
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 1));
}
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
}
@Test
public void testNoMoreRecordsAfterPayloadPost() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
@ -549,6 +573,10 @@ public class BatchingUploaderTest {
}
private BatchingUploader makeConstrainedUploader(long maxPostRecords, long maxTotalRecords, long maxPayloadBytes, boolean firstSync) {
return makeConstrainedUploader(maxPostRecords, maxTotalRecords, maxPayloadBytes, firstSync, false);
}
private BatchingUploader makeConstrainedUploader(long maxPostRecords, long maxTotalRecords, long maxPayloadBytes, boolean firstSync, boolean abortOnRecordFail) {
ExtendedJSONObject infoConfigurationJSON = new ExtendedJSONObject();
infoConfigurationJSON.put(InfoConfiguration.MAX_TOTAL_BYTES, 4096L);
infoConfigurationJSON.put(InfoConfiguration.MAX_TOTAL_RECORDS, maxTotalRecords);
@ -563,9 +591,10 @@ public class BatchingUploaderTest {
server15RepositorySession.setStoreDelegate(storeDelegate);
return new MockUploader(
server15RepositorySession, workQueue, storeDelegate, Uri.EMPTY, 0L,
new InfoConfiguration(infoConfigurationJSON), null);
new InfoConfiguration(infoConfigurationJSON), null, abortOnRecordFail);
}
class MockPayloadDispatcher extends PayloadDispatcher {
MockPayloadDispatcher(final Executor workQueue, final BatchingUploader uploader, Long lastModified) {
super(workQueue, uploader, lastModified);
@ -582,18 +611,25 @@ public class BatchingUploaderTest {
}
class MockUploader extends BatchingUploader {
boolean abortOnRecordFail;
MockUploader(final RepositorySession repositorySession, final ExecutorService workQueue,
final RepositorySessionStoreDelegate sessionStoreDelegate, final Uri baseCollectionUri,
final Long localCollectionLastModified, final InfoConfiguration infoConfiguration,
final AuthHeaderProvider authHeaderProvider) {
final AuthHeaderProvider authHeaderProvider, final boolean abortOnRecordFail) {
super(repositorySession, workQueue, sessionStoreDelegate, baseCollectionUri,
localCollectionLastModified, infoConfiguration, authHeaderProvider);
this.abortOnRecordFail = abortOnRecordFail;
}
@Override
PayloadDispatcher createPayloadDispatcher(ExecutorService workQueue, Long localCollectionLastModified) {
return new MockPayloadDispatcher(workQueue, this, localCollectionLastModified);
}
@Override
boolean shouldFailBatchOnFailure(Record r) {
return abortOnRecordFail;
}
}
private Server15Repository makeConstrainedRepository(boolean firstSync) {