Bug 1445462 - Pre: Clean up "ignore records on batch failure" code r=rnewman draft
authorGrigory Kruglov <gkruglov@mozilla.com>
Wed, 14 Mar 2018 14:12:48 -0400
changeset 767484 4e46be5f67317f6bd5ea0c4701587908a3628634
parent 767005 deb7714a7bcd3448952440e92d0209abec6b886d
child 767485 2e30aa7e222916acb791a9803bab0d4d95e5e491
push id102617
push userbmo:gkruglov@mozilla.com
push dateWed, 14 Mar 2018 18:17:37 +0000
reviewersrnewman
bugs1445462
milestone61.0a1
Bug 1445462 - Pre: Clean up "ignore records on batch failure" code r=rnewman No functional change; added tests to cover the decision tree a bit better, renamed stuff. MozReview-Commit-ID: LwvyBaAg421
mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
mobile/android/services/src/test/java/org/mozilla/gecko/background/testhelpers/NullPayloadRecord.java
mobile/android/services/src/test/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploaderTest.java
--- a/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
+++ b/mobile/android/services/src/main/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploader.java
@@ -6,21 +6,19 @@ package org.mozilla.gecko.sync.repositor
 
 import android.net.Uri;
 import android.support.annotation.VisibleForTesting;
 
 import org.json.simple.JSONObject;
 import org.mozilla.gecko.background.common.log.Logger;
 import org.mozilla.gecko.sync.CryptoRecord;
 import org.mozilla.gecko.sync.InfoConfiguration;
-import org.mozilla.gecko.sync.Server15PreviousPostFailedException;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
-import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
 import org.mozilla.gecko.sync.repositories.domain.Record;
 
 import java.util.ArrayList;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * Uploader which implements batching introduced in Sync 1.5.
@@ -97,18 +95,20 @@ public class BatchingUploader {
     // In context of the uploader, a "payload" is an entirety of what gets POST-ed to the server in
     // an individual request.
     // In context of Sync Storage, "payload" is a BSO (basic storage object) field defined as: "a
     // string containing the data of the record."
     // Sync Storage servers place a hard limit on how large a payload _field_ might be, and so we
     // maintain this limit for a single sanity check.
     private final long maxPayloadFieldBytes;
 
-    // Set if this channel should ignore further calls to process.
-    private volatile boolean aborted = false;
+    // Depending on the 'shouldFailBatchOnFailure' flag below, "invalid" records (too large or fail
+    // to serialize correctly) will cause uploader to start ignoring any future records in the current
+    // flow.
+    private volatile boolean encounteredInvalidRecord = false;
 
     // Whether or not we should set aborted if there are any issues with the record.
     // This is used to prevent corruption with bookmark records, as uploading
     // only a subset of the bookmarks is very likely to cause corruption, (e.g.
     // uploading a parent without its children or vice versa).
     @VisibleForTesting
     protected final boolean shouldFailBatchOnFailure;
 
@@ -135,48 +135,48 @@ public class BatchingUploader {
         this.executor = workQueue;
     }
 
     // Called concurrently from the threads running off of a record consumer thread pool.
     public void process(final Record record) {
         final String guid = record.guid;
 
         // If store failed entirely, just bail out. We've already told our delegate that we failed.
-        if (payloadDispatcher.storeFailed.get() || aborted) {
+        if (shouldIgnoreFurtherRecords()) {
             return;
         }
 
         final JSONObject recordJSON = record.toJSONObject();
 
         final String payloadField = (String) recordJSON.get(CryptoRecord.KEY_PAYLOAD);
         if (payloadField == null) {
-            failRecordStore(new IllegalRecordException(), record, false);
+            processInvalidRecord(new IllegalRecordException(), record, false);
             return;
         }
 
         // We can't upload individual records whose payload fields exceed our payload field byte limit.
         // UTF-8 uses 1 byte per character for the ASCII range. Contents of the payloadField are
         // base64 and hex encoded, so character count is sufficient.
         if (payloadField.length() > this.maxPayloadFieldBytes) {
-            failRecordStore(new PayloadTooLargeToUpload(), record, true);
+            processInvalidRecord(new PayloadTooLargeToUpload(), record, true);
             return;
         }
 
         final byte[] recordBytes = Record.stringToJSONBytes(recordJSON.toJSONString());
         if (recordBytes == null) {
-            failRecordStore(new IllegalRecordException(), record, false);
+            processInvalidRecord(new IllegalRecordException(), record, false);
             return;
         }
 
         final long recordDeltaByteCount = recordBytes.length + PER_RECORD_OVERHEAD_BYTE_COUNT;
         Logger.debug(LOG_TAG, "Processing a record with guid: " + guid);
 
         // We can't upload individual records which exceed our payload total byte limit.
         if ((recordDeltaByteCount + PER_PAYLOAD_OVERHEAD_BYTE_COUNT) > payload.maxBytes) {
-            failRecordStore(new RecordTooLargeToUpload(), record, true);
+            processInvalidRecord(new RecordTooLargeToUpload(), record, true);
             return;
         }
 
         synchronized (payloadLock) {
             final boolean canFitRecordIntoBatch = uploaderMeta.canFit(recordDeltaByteCount);
             final boolean canFitRecordIntoPayload = payload.canFit(recordDeltaByteCount);
 
             // Record fits!
@@ -245,38 +245,43 @@ public class BatchingUploader {
     /* package-local */ void setLastStoreTimestamp(AtomicLong lastModifiedTimestamp) {
         repositorySession.setLastStoreTimestamp(lastModifiedTimestamp.get());
     }
 
     /* package-local */ void finished() {
         sessionStoreDelegate.deferredStoreDelegate(executor).onStoreCompleted();
     }
 
+    private boolean shouldIgnoreFurtherRecords() {
+        return (shouldFailBatchOnFailure && encounteredInvalidRecord) || payloadDispatcher.storeFailed.get();
+    }
+
     // Common handling for marking a record failure and calling our delegate's onRecordStoreFailed.
-    private void failRecordStore(final Exception e, final Record record, boolean sizeOverflow) {
+    private void processInvalidRecord(final Exception e, final Record record, boolean sizeOverflow) {
+        encounteredInvalidRecord = true;
+
         // There are three cases we're handling here. See bug 1362206 for some rationale here.
         // 1. shouldFailBatchOnFailure is false, and it failed sanity checks for reasons other than
         //    "it's too large" (say, `record`'s json is 0 bytes),
         //     - Then mark record's store as 'failed' and continue uploading
         // 2. shouldFailBatchOnFailure is false, and it failed sanity checks because it's too large,
         //     - Continue uploading, and don't fail synchronization because of this one.
         // 3. shouldFailBatchOnFailure is true, and it failed for any reason
         //     - Stop uploading.
         if (shouldFailBatchOnFailure) {
             // case 3
             Logger.debug(LOG_TAG, "Batch failed with exception: " + e.toString());
-            // Start ignoring records, and send off to our delegate that we failed.
-            aborted = true;
             executor.execute(new PayloadDispatcher.NonPayloadContextRunnable() {
                 @Override
                 public void run() {
                     sessionStoreDelegate.onRecordStoreFailed(e, record.guid);
                     payloadDispatcher.doStoreFailed(e);
                 }
             });
+            // Send off to our delegate that we failed.
         } else if (!sizeOverflow) {
             // case 1
             sessionStoreDelegate.deferredStoreDelegate(executor).onRecordStoreFailed(e, record.guid);
         }
         // case 2 is an implicit empty else {} here.
     }
 
     // Will be called from a thread dispatched by PayloadDispatcher.
@@ -337,12 +342,13 @@ public class BatchingUploader {
     // doesn't depend on this behaviour.
     private static class RecordTooLargeToUpload extends BatchingUploaderException {
         private static final long serialVersionUID = 1L;
     }
     @VisibleForTesting
     /* package-local */ static class PayloadTooLargeToUpload extends BatchingUploaderException {
         private static final long serialVersionUID = 1L;
     }
-    private static class IllegalRecordException extends BatchingUploaderException {
+    @VisibleForTesting
+    /* package-local */ static class IllegalRecordException extends BatchingUploaderException {
         private static final long serialVersionUID = 1L;
     }
 }
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/mobile/android/services/src/test/java/org/mozilla/gecko/background/testhelpers/NullPayloadRecord.java
@@ -0,0 +1,18 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+package org.mozilla.gecko.background.testhelpers;
+
+import org.json.simple.JSONObject;
+import org.mozilla.gecko.sync.Utils;
+
+public class NullPayloadRecord extends MockRecord {
+    public NullPayloadRecord() {
+        super(Utils.generateGuid(), null, 0, false, 0);
+    }
+
+    @Override
+    public JSONObject toJSONObject() {
+        return new JSONObject();
+    }
+}
--- a/mobile/android/services/src/test/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploaderTest.java
+++ b/mobile/android/services/src/test/java/org/mozilla/gecko/sync/repositories/uploaders/BatchingUploaderTest.java
@@ -9,29 +9,28 @@ import android.support.annotation.NonNul
 
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
 
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mozilla.gecko.background.testhelpers.MockRecord;
+import org.mozilla.gecko.background.testhelpers.NullPayloadRecord;
 import org.mozilla.gecko.background.testhelpers.TestRunner;
 import org.mozilla.gecko.sync.ExtendedJSONObject;
 import org.mozilla.gecko.sync.InfoCollections;
 import org.mozilla.gecko.sync.InfoConfiguration;
 import org.mozilla.gecko.sync.Utils;
 import org.mozilla.gecko.sync.net.AuthHeaderProvider;
 import org.mozilla.gecko.sync.repositories.RepositorySession;
 import org.mozilla.gecko.sync.repositories.NonPersistentRepositoryStateProvider;
 import org.mozilla.gecko.sync.repositories.Server15Repository;
 import org.mozilla.gecko.sync.repositories.Server15RepositorySession;
 import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
-import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
-import org.mozilla.gecko.sync.repositories.domain.Record;
 
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -475,16 +474,44 @@ public class BatchingUploaderTest {
         // Ensure that further calls to process do nothing.
         for (int i = 0; i < 5; ++i) {
             uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 1));
         }
 
         assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
     }
 
+    // Same as above, except that uploader is configured to ignore failures due to invalid records.
+    @Test
+    public void testFailDoesNotAbortBatch() {
+        BatchingUploader uploader = makeConstrainedUploader(2, 10, 10, false, false);
+        uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 10));
+        uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 10));
+        assertEquals(0, ((MockStoreDelegate) storeDelegate).storeFailed);
+        assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
+        assertEquals(((MockStoreDelegate) storeDelegate).lastStoreFailedException, null);
+
+        // "Record too large" failures are not recorded.
+        uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 11));
+        assertEquals(((MockStoreDelegate) storeDelegate).lastRecordStoreFailedException, null);
+        assertEquals(((MockStoreDelegate) storeDelegate).lastStoreFailedException, null);
+
+        // Null record failures are recorded.
+        uploader.process(new NullPayloadRecord());
+        assertTrue(((MockStoreDelegate) storeDelegate).lastRecordStoreFailedException instanceof BatchingUploader.IllegalRecordException);
+        assertEquals(((MockStoreDelegate) storeDelegate).lastStoreFailedException, null);
+
+        // Ensure that further calls to process still work.
+        for (int i = 0; i < 6; ++i) {
+            uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, 1));
+        }
+
+        assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
+    }
+
 
     @Test
     public void testNoMoreRecordsAfterPayloadPost() {
         BatchingUploader uploader = makeConstrainedUploader(2, 4);
 
         // Process two records (payload limit is also two, batch is four),
         // and ensure that 'no more records' commits.
         MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);