Bug 1445462 - Ensure tasks aren't scheduled during upload after flow has been aborted r=rnewman
This should have been a part of
Bug 1408710, but alas, here we are.
Patch changes two things:
- serializes process failures in BatchingUploader if record-to-be-uploaded fails sanity checks and server requirements
-- this helps us short-circuit flow in RecordsChannel
- avoids performing any work in ServerSession's storeDone if flow has been aborted
MozReview-Commit-ID: 9qevdzRvHEx
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
@@ -216,16 +216,23 @@ public class BatchingUploader {
if (isBatchFull) {
flush(true, false);
} else if (isPayloadFull) {
flush(false, false);
}
}
public void noMoreRecordsToUpload() {
+ // It's possible that we've hit a failure during an upload.
+ // If that's the case, bail out. Our delegate chain has already been notified.
+ if (shouldIgnoreFurtherRecords()) {
+ Logger.debug(LOG_TAG, "Ignoring 'no more records to upload' signal due to previous failure.");
+ return;
+ }
+
Logger.debug(LOG_TAG, "Received 'no more records to upload' signal.");
// If we have any pending records in the Payload, flush them!
if (!payload.isEmpty()) {
flush(true, true);
return;
}
@@ -264,27 +271,22 @@ public class BatchingUploader {
// - Then mark record's store as 'failed' and continue uploading
// 2. shouldFailBatchOnFailure is false, and it failed sanity checks because it's too large,
// - Continue uploading, and don't fail synchronization because of this one.
// 3. shouldFailBatchOnFailure is true, and it failed for any reason
// - Stop uploading.
if (shouldFailBatchOnFailure) {
// case 3
Logger.debug(LOG_TAG, "Batch failed with exception: " + e.toString());
- executor.execute(new PayloadDispatcher.NonPayloadContextRunnable() {
- @Override
- public void run() {
- sessionStoreDelegate.onRecordStoreFailed(e, record.guid);
- payloadDispatcher.doStoreFailed(e);
- }
- });
// Send off to our delegate that we failed.
+ sessionStoreDelegate.onRecordStoreFailed(e, record.guid);
+ payloadDispatcher.doStoreFailed(e);
} else if (!sizeOverflow) {
// case 1
- sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(e, record.guid);
+ sessionStoreDelegate.onRecordStoreFailed(e, record.guid);
}
// case 2 is an implicit empty else {} here.
}
// Will be called from a thread dispatched by PayloadDispatcher.
// NB: Access to `uploaderMeta.isUnlimited` is guarded by the payloadLock.
/* package-local */ void setUnlimitedMode(boolean isUnlimited) {
// If we know for sure that we're not in a batching mode,
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/RecordsChannel.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/synchronizer/RecordsChannel.java
@@ -226,16 +226,23 @@ public class RecordsChannel implements
} catch (NoStoreDelegateException e) {
// Must not happen, bail out.
throw new IllegalStateException(e);
}
// Allow this buffer to be reclaimed.
toProcess = null;
+ // It's possible that one of the delegate-driven `store` calls above failed.
+ // In that case, 'onStoreFailed' would have been already called, and we have nothing left to do.
+ if (storeFailed.get()) {
+ Logger.info(LOG_TAG, "Store failed while processing records via sink.store(...); bailing out.");
+ return;
+ }
+
// Now we wait for onStoreComplete
Logger.trace(LOG_TAG, "onFetchCompleted. Calling storeDone.");
sink.storeDone();
}
// Sent for "store" batches.
@Override
public void onBatchCommitted() {