Bug 1429250 - Avoid repeated traversals of toFetch in _processIncoming. r?kitcambridge,eoger draft
authorThom Chiovoloni <tchiovoloni@mozilla.com>
Tue, 09 Jan 2018 19:22:33 -0500
changeset 724475 2f214c3530d9abda53bd3c5cd1548064798c9fe4
parent 723788 c5461973d6ee7845b3f560c05e1502429fd63184
child 747161 af7e2afc1c25994ffa907d7f9fcfc69fcdf0d481
push id96757
push userbmo:tchiovoloni@mozilla.com
push dateThu, 25 Jan 2018 01:18:42 +0000
reviewerskitcambridge, eoger
bugs1429250
milestone60.0a1
Bug 1429250 - Avoid repeated traversals of toFetch in _processIncoming. r?kitcambridge,eoger MozReview-Commit-ID: N1Zr8iT7da
services/sync/modules-testing/rotaryengine.js
services/sync/modules/bookmark_repair.js
services/sync/modules/engines.js
services/sync/modules/util.js
services/sync/tests/unit/test_bookmark_engine.js
services/sync/tests/unit/test_history_engine.js
services/sync/tests/unit/test_syncengine.js
services/sync/tests/unit/test_syncengine_sync.js
services/sync/tests/unit/test_telemetry.js
tools/lint/eslint/modules.json
--- a/services/sync/modules-testing/rotaryengine.js
+++ b/services/sync/modules-testing/rotaryengine.js
@@ -95,18 +95,18 @@ RotaryTracker.prototype = {
   __proto__: Tracker.prototype,
   persistChangedIDs: false,
 };
 
 
 this.RotaryEngine = function RotaryEngine(service) {
   SyncEngine.call(this, "Rotary", service);
   // Ensure that the engine starts with a clean slate.
-  this.toFetch        = [];
-  this.previousFailed = [];
+  this.toFetch        = new SerializableSet();
+  this.previousFailed = new SerializableSet();
 };
 RotaryEngine.prototype = {
   __proto__: SyncEngine.prototype,
   _storeObj: RotaryStore,
   _trackerObj: RotaryTracker,
   _recordObj: RotaryRecord,
 
   async _findDupe(item) {
--- a/services/sync/modules/bookmark_repair.js
+++ b/services/sync/modules/bookmark_repair.js
@@ -189,19 +189,21 @@ class BookmarkRepairRequestor extends Co
   tryServerOnlyRepairs(validationInfo) {
     if (this._countServerOnlyFixableProblems(validationInfo) == 0) {
       return false;
     }
     let engine = this.service.engineManager.get("bookmarks");
     for (let id of validationInfo.problems.serverMissing) {
       engine.addForWeakUpload(id);
     }
-    let toFetch = engine.toFetch.concat(validationInfo.problems.clientMissing,
-                                        validationInfo.problems.serverDeleted);
-    engine.toFetch = Array.from(new Set(toFetch));
+    engine.toFetch = Utils.setAddAll(
+      Utils.setAddAll(engine.toFetch,
+                      validationInfo.problems.clientMissing),
+      validationInfo.problems.serverDeleted
+    );
     return true;
   }
 
   /* See if the repairer is willing and able to begin a repair process given
      the specified validation information.
      Returns true if a repair was started and false otherwise.
   */
   async startRepairs(validationInfo, flowID) {
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -837,16 +837,19 @@ SyncEngine.prototype = {
     if (Array.isArray(json)) {
       // Pre-`JSONFile` storage stored an array, but `JSONFile` defaults to
       // an object, so we wrap the array for consistency.
       return { ids: json };
     }
     if (!json.ids) {
       json.ids = [];
     }
+    // The set serializes the same way as an array, but offers more efficient
+    // methods of manipulation.
+    json.ids = new SerializableSet(json.ids);
     return json;
   },
 
   async _beforeSaveMetadata() {
     await ensureDirectory(this._toFetchStorage.path);
     await ensureDirectory(this._previousFailedStorage.path);
   },
 
@@ -906,25 +909,33 @@ SyncEngine.prototype = {
   },
 
   get toFetch() {
     this._toFetchStorage.ensureDataReady();
     return this._toFetchStorage.data.ids;
   },
 
   set toFetch(ids) {
+    if (ids.constructor.name != "SerializableSet") {
+      throw new Error("Bug: Attempted to set toFetch to something that isn't a SerializableSet");
+    }
     this._toFetchStorage.data = { ids };
     this._toFetchStorage.saveSoon();
   },
 
   get previousFailed() {
     this._previousFailedStorage.ensureDataReady();
     return this._previousFailedStorage.data.ids;
   },
+
   set previousFailed(ids) {
+    if (ids.constructor.name != "SerializableSet") {
+      throw new Error(
+        "Bug: Attempted to set previousFailed to something that isn't a SerializableSet");
+    }
     this._previousFailedStorage.data = { ids };
     this._previousFailedStorage.saveSoon();
   },
 
   /*
    * lastSyncLocal is a timestamp in local time.
    */
   get lastSyncLocal() {
@@ -1095,17 +1106,17 @@ SyncEngine.prototype = {
     }
 
     // applied    => number of items that should be applied.
     // failed     => number of items that failed in this sync.
     // newFailed  => number of items that failed for the first time in this sync.
     // reconciled => number of items that were reconciled.
     let count = {applied: 0, failed: 0, newFailed: 0, reconciled: 0};
     let recordsToApply = [];
-    let failedInCurrentSync = [];
+    let failedInCurrentSync = new SerializableSet();
 
     let oldestModified = this.lastModified;
     let downloadedIDs = new Set();
 
     // Stage 1: Fetch new records from the server, up to the download limit.
     if (this.lastModified == null || this.lastModified > this.lastSync) {
       let { response, records } = await newitems.getBatched(this.downloadBatchSize);
       if (!response.success) {
@@ -1119,29 +1130,29 @@ SyncEngine.prototype = {
         downloadedIDs.add(record.id);
 
         if (record.modified < oldestModified) {
           oldestModified = record.modified;
         }
 
         let { shouldApply, error } = await this._maybeReconcile(record);
         if (error) {
-          failedInCurrentSync.push(record.id);
+          failedInCurrentSync.add(record.id);
           count.failed++;
           continue;
         }
         if (!shouldApply) {
           count.reconciled++;
           continue;
         }
         recordsToApply.push(record);
       }
 
       let failedToApply = await this._applyRecords(recordsToApply);
-      failedInCurrentSync.push(...failedToApply);
+      Utils.setAddAll(failedInCurrentSync, failedToApply);
 
       // `applied` is a bit of a misnomer: it counts records that *should* be
       // applied, so it also includes records that we tried to apply and failed.
       // `recordsToApply.length - failedToApply.length` is the number of records
       // that we *successfully* applied.
       count.failed += failedToApply.length;
       count.applied += recordsToApply.length;
     }
@@ -1160,33 +1171,34 @@ SyncEngine.prototype = {
       let guids = await guidColl.get();
       if (!guids.success)
         throw guids;
 
       // Filtering out already downloaded IDs here isn't necessary. We only do
       // that in case the Sync server doesn't support `older` (bug 1316110).
       let remainingIDs = guids.obj.filter(id => !downloadedIDs.has(id));
       if (remainingIDs.length > 0) {
-        this.toFetch = Utils.arrayUnion(this.toFetch, remainingIDs);
+        this.toFetch = Utils.setAddAll(this.toFetch, remainingIDs);
       }
     }
 
     // Fast-foward the lastSync timestamp since we have backlogged the
     // remaining items.
     if (this.lastSync < this.lastModified) {
       this.lastSync = this.lastModified;
     }
 
     // Stage 3: Backfill records from the backlog, and those that failed to
     // decrypt or apply during the last sync. We only backfill up to the
     // download limit, to prevent a large backlog for one engine from blocking
     // the others. We'll keep processing the backlog on subsequent engine syncs.
     let failedInPreviousSync = this.previousFailed;
-    let idsToBackfill = Utils.arrayUnion(this.toFetch.slice(0, downloadLimit),
-      failedInPreviousSync);
+    let idsToBackfill = Array.from(
+      Utils.setAddAll(Utils.subsetOfSize(this.toFetch, downloadLimit),
+                      failedInPreviousSync));
 
     // Note that we intentionally overwrite the previously failed list here.
     // Records that fail to decrypt or apply in two consecutive syncs are likely
     // corrupt; we remove them from the list because retrying and failing on
     // every subsequent sync just adds noise.
     this.previousFailed = failedInCurrentSync;
 
     let backfilledItems = this.itemSource();
@@ -1225,30 +1237,31 @@ SyncEngine.prototype = {
         backfilledRecordsToApply.push(record);
       }
       let failedToApply = await this._applyRecords(backfilledRecordsToApply);
       failedInBackfill.push(...failedToApply);
 
       count.failed += failedToApply.length;
       count.applied += backfilledRecordsToApply.length;
 
-      this.toFetch = Utils.arraySub(this.toFetch, ids);
-      this.previousFailed = Utils.arrayUnion(this.previousFailed, failedInBackfill);
+      this.toFetch = Utils.setDeleteAll(this.toFetch, ids);
+      this.previousFailed = Utils.setAddAll(this.previousFailed, failedInBackfill);
 
       if (this.lastSync < this.lastModified) {
         this.lastSync = this.lastModified;
       }
     }
 
-    count.newFailed = this.previousFailed.reduce((count, engine) => {
-      if (failedInPreviousSync.indexOf(engine) == -1) {
-        count++;
+    count.newFailed = 0;
+    for (let item of this.previousFailed) {
+      if (!failedInPreviousSync.has(item)) {
+        ++count.newFailed;
       }
-      return count;
-    }, 0);
+    }
+
     count.succeeded = Math.max(0, count.applied - count.failed);
     this._log.info(["Records:",
                     count.applied, "applied,",
                     count.succeeded, "successfully,",
                     count.failed, "failed to apply,",
                     count.newFailed, "newly failed to apply,",
                     count.reconciled, "reconciled."].join(" "));
     Observers.notify("weave:engine:sync:applied", count, this.name);
@@ -1806,18 +1819,18 @@ SyncEngine.prototype = {
       this._log.debug("Failed test decrypt", ex);
     }
 
     return canDecrypt;
   },
 
   async _resetClient() {
     this.resetLastSync();
-    this.previousFailed = [];
-    this.toFetch = [];
+    this.previousFailed = new SerializableSet();
+    this.toFetch = new SerializableSet();
     this._needWeakUpload.clear();
   },
 
   async wipeServer() {
     let response = await this.service.resource(this.engineURL).delete();
     if (response.status != 200 && response.status != 404) {
       throw response;
     }
--- a/services/sync/modules/util.js
+++ b/services/sync/modules/util.js
@@ -1,13 +1,13 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-this.EXPORTED_SYMBOLS = ["Utils", "Svc"];
+this.EXPORTED_SYMBOLS = ["Utils", "Svc", "SerializableSet"];
 
 var {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
 
 Cu.import("resource://services-common/observers.js");
 Cu.import("resource://services-common/utils.js");
 Cu.import("resource://services-crypto/utils.js");
 Cu.import("resource://services-sync/constants.js");
 Cu.import("resource://gre/modules/Preferences.jsm");
@@ -519,16 +519,57 @@ this.Utils = {
   arrayUnion: function arrayUnion(foo, bar) {
     if (!foo.length)
       return bar;
     if (!bar.length)
       return foo;
     return foo.concat(Utils.arraySub(bar, foo));
   },
 
+  /**
+   * Add all the items in `items` to the provided Set in-place.
+   *
+   * @return The provided set.
+   */
+  setAddAll(set, items) {
+    for (let item of items) {
+      set.add(item);
+    }
+    return set;
+  },
+
+  /**
+   * Delete every items in `items` to the provided Set in-place.
+   *
+   * @return The provided set.
+   */
+  setDeleteAll(set, items) {
+    for (let item of items) {
+      set.delete(item);
+    }
+    return set;
+  },
+
+  /**
+   * Take the first `size` items from the Set `items`.
+   *
+   * @return A Set of size at most `size`
+   */
+  subsetOfSize(items, size) {
+    let result = new Set();
+    let count = 0;
+    for (let item of items) {
+      if (count++ == size) {
+        return result;
+      }
+      result.add(item);
+    }
+    return result;
+  },
+
   bind2: function Async_bind2(object, method) {
     return function innerBind() { return method.apply(object, arguments); };
   },
 
   /**
    * Is there a master password configured and currently locked?
    */
   mpLocked() {
@@ -698,16 +739,25 @@ this.Utils = {
     let hours = String(date.getHours()).padStart(2, "0");
     let minutes = String(date.getMinutes()).padStart(2, "0");
     let seconds = String(date.getSeconds()).padStart(2, "0");
 
     return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`;
   }
 };
 
+/**
+ * A subclass of Set that serializes as an Array when passed to JSON.stringify.
+ */
+class SerializableSet extends Set {
+  toJSON() {
+    return Array.from(this);
+  }
+}
+
 XPCOMUtils.defineLazyGetter(Utils, "_utf8Converter", function() {
   let converter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
                     .createInstance(Ci.nsIScriptableUnicodeConverter);
   converter.charset = "UTF-8";
   return converter;
 });
 
 XPCOMUtils.defineLazyGetter(Utils, "utf8Encoder", () =>
--- a/services/sync/tests/unit/test_bookmark_engine.js
+++ b/services/sync/tests/unit/test_bookmark_engine.js
@@ -165,17 +165,17 @@ add_task(async function test_processInco
     let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
     bogus_record.get = function get() {
       throw new Error("Sync this!");
     };
 
     // Make the 10 minutes old so it will only be synced in the toFetch phase.
     bogus_record.modified = Date.now() / 1000 - 60 * 10;
     engine.lastSync = Date.now() / 1000 - 60;
-    engine.toFetch = [BOGUS_GUID];
+    engine.toFetch = new SerializableSet([BOGUS_GUID]);
 
     let error;
     try {
       await sync_engine_and_validate_telem(engine, true);
     } catch (ex) {
       error = ex;
     }
     ok(!!error);
--- a/services/sync/tests/unit/test_history_engine.js
+++ b/services/sync/tests/unit/test_history_engine.js
@@ -64,32 +64,32 @@ add_task(async function test_history_dow
 
   // Don't actually fetch any backlogged records, so that we can inspect
   // the backlog between syncs.
   engine.guidFetchBatchSize = 0;
 
   let ping = await sync_engine_and_validate_telem(engine, false);
   deepEqual(ping.engines[0].incoming, { applied: 5 });
 
-  let backlogAfterFirstSync = engine.toFetch.slice(0);
+  let backlogAfterFirstSync = Array.from(engine.toFetch).sort();
   deepEqual(backlogAfterFirstSync, ["place0000000", "place0000001",
     "place0000002", "place0000003", "place0000004", "place0000005",
     "place0000006", "place0000007", "place0000008", "place0000009"]);
 
   // We should have fast-forwarded the last sync time.
   equal(engine.lastSync, lastSync + 15);
 
   engine.lastModified = collection.modified;
   ping = await sync_engine_and_validate_telem(engine, false);
   ok(!ping.engines[0].incoming);
 
   // After the second sync, our backlog still contains the same GUIDs: we
   // weren't able to make progress on fetching them, since our
   // `guidFetchBatchSize` is 0.
-  let backlogAfterSecondSync = engine.toFetch.slice(0);
+  let backlogAfterSecondSync = Array.from(engine.toFetch).sort();
   deepEqual(backlogAfterFirstSync, backlogAfterSecondSync);
 
   // Now add a newer record to the server.
   let newWBO = new ServerWBO("placeAAAAAAA", encryptPayload({
     id: "placeAAAAAAA",
     histUri: "http://example.com/a",
     title: "New Page A",
     visits: [{
@@ -100,38 +100,39 @@ add_task(async function test_history_dow
   newWBO.sortindex = -1;
   collection.insertWBO(newWBO);
 
   engine.lastModified = collection.modified;
   ping = await sync_engine_and_validate_telem(engine, false);
   deepEqual(ping.engines[0].incoming, { applied: 1 });
 
   // Our backlog should remain the same.
-  let backlogAfterThirdSync = engine.toFetch.slice(0);
+  let backlogAfterThirdSync = Array.from(engine.toFetch).sort();
   deepEqual(backlogAfterSecondSync, backlogAfterThirdSync);
 
   equal(engine.lastSync, lastSync + 20);
 
   // Bump the fetch batch size to let the backlog make progress. We should
   // make 3 requests to fetch 5 backlogged GUIDs.
   engine.guidFetchBatchSize = 2;
 
   engine.lastModified = collection.modified;
   ping = await sync_engine_and_validate_telem(engine, false);
   deepEqual(ping.engines[0].incoming, { applied: 5 });
 
-  deepEqual(engine.toFetch, ["place0000005", "place0000006", "place0000007",
-    "place0000008", "place0000009"]);
+  deepEqual(
+    Array.from(engine.toFetch).sort(),
+    ["place0000005", "place0000006", "place0000007", "place0000008", "place0000009"]);
 
   // Sync again to clear out the backlog.
   engine.lastModified = collection.modified;
   ping = await sync_engine_and_validate_telem(engine, false);
   deepEqual(ping.engines[0].incoming, { applied: 5 });
 
-  deepEqual(engine.toFetch, []);
+  deepEqual(Array.from(engine.toFetch), []);
   await PlacesTestUtils.clearHistory();
 });
 
 add_task(async function test_history_visit_roundtrip() {
   let engine = new HistoryEngine(Service);
   await engine.initialize();
   let server = await serverForFoo(engine);
   await SyncTestingInfrastructure(server);
--- a/services/sync/tests/unit/test_syncengine.js
+++ b/services/sync/tests/unit/test_syncengine.js
@@ -7,16 +7,26 @@ Cu.import("resource://services-sync/serv
 Cu.import("resource://services-sync/util.js");
 
 async function makeSteamEngine() {
   let engine = new SyncEngine("Steam", Service);
   await engine.initialize();
   return engine;
 }
 
+function guidSetOfSize(length) {
+  return new SerializableSet(
+    Array.from({ length }, () => Utils.makeGUID()));
+}
+
+function assertSetsEqual(a, b) {
+  // Assert.deepEqual doesn't understand Set.
+  Assert.deepEqual(Array.from(a).sort(), Array.from(b).sort());
+}
+
 async function testSteamEngineStorage(test) {
   try {
     let setupEngine = await makeSteamEngine();
 
     if (test.setup) {
       await test.setup(setupEngine);
     }
 
@@ -108,135 +118,135 @@ add_task(async function test_lastSync() 
 });
 
 add_task(async function test_toFetch() {
   _("SyncEngine.toFetch corresponds to file on disk");
   await SyncTestingInfrastructure(server);
   const filename = "weave/toFetch/steam.json";
 
   await testSteamEngineStorage({
-    toFetch: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
+    toFetch: guidSetOfSize(3),
     setup(engine) {
       // Ensure pristine environment
-      Assert.equal(engine.toFetch.length, 0);
+      Assert.equal(engine.toFetch.size, 0);
 
       // Write file to disk
       engine.toFetch = this.toFetch;
       Assert.equal(engine.toFetch, this.toFetch);
     },
     check(engine) {
       // toFetch is written asynchronously
-      Assert.deepEqual(engine.toFetch, this.toFetch);
+      assertSetsEqual(engine.toFetch, this.toFetch);
     },
   });
 
   await testSteamEngineStorage({
-    toFetch: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
-    toFetch2: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
+    toFetch: guidSetOfSize(4),
+    toFetch2: guidSetOfSize(5),
     setup(engine) {
       // Make sure it work for consecutive writes before the callback is executed.
       engine.toFetch = this.toFetch;
       Assert.equal(engine.toFetch, this.toFetch);
 
       engine.toFetch = this.toFetch2;
       Assert.equal(engine.toFetch, this.toFetch2);
     },
     check(engine) {
-      Assert.deepEqual(engine.toFetch, this.toFetch2);
+      assertSetsEqual(engine.toFetch, this.toFetch2);
     },
   });
 
   await testSteamEngineStorage({
-    toFetch: [Utils.makeGUID(), Utils.makeGUID()],
+    toFetch: guidSetOfSize(2),
     async beforeCheck() {
       let toFetchPath = OS.Path.join(OS.Constants.Path.profileDir, filename);
       let bytes = new TextEncoder().encode(JSON.stringify(this.toFetch));
       await OS.File.writeAtomic(toFetchPath, bytes,
                                 { tmpPath: toFetchPath + ".tmp" });
     },
     check(engine) {
       // Read file from disk
-      Assert.deepEqual(engine.toFetch, this.toFetch);
+      assertSetsEqual(engine.toFetch, this.toFetch);
     },
   });
 });
 
 add_task(async function test_previousFailed() {
   _("SyncEngine.previousFailed corresponds to file on disk");
   await SyncTestingInfrastructure(server);
   const filename = "weave/failed/steam.json";
 
   await testSteamEngineStorage({
-    previousFailed: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
+    previousFailed: guidSetOfSize(3),
     setup(engine) {
       // Ensure pristine environment
-      Assert.equal(engine.previousFailed.length, 0);
+      Assert.equal(engine.previousFailed.size, 0);
 
       // Write file to disk
       engine.previousFailed = this.previousFailed;
       Assert.equal(engine.previousFailed, this.previousFailed);
     },
     check(engine) {
       // previousFailed is written asynchronously
-      Assert.deepEqual(engine.previousFailed, this.previousFailed);
+      assertSetsEqual(engine.previousFailed, this.previousFailed);
     },
   });
 
   await testSteamEngineStorage({
-    previousFailed: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
-    previousFailed2: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
+    previousFailed: guidSetOfSize(4),
+    previousFailed2: guidSetOfSize(5),
     setup(engine) {
       // Make sure it work for consecutive writes before the callback is executed.
       engine.previousFailed = this.previousFailed;
       Assert.equal(engine.previousFailed, this.previousFailed);
 
       engine.previousFailed = this.previousFailed2;
       Assert.equal(engine.previousFailed, this.previousFailed2);
     },
     check(engine) {
-      Assert.deepEqual(engine.previousFailed, this.previousFailed2);
+      assertSetsEqual(engine.previousFailed, this.previousFailed2);
     },
   });
 
   await testSteamEngineStorage({
-    previousFailed: [Utils.makeGUID(), Utils.makeGUID()],
+    previousFailed: guidSetOfSize(2),
     async beforeCheck() {
       let previousFailedPath = OS.Path.join(OS.Constants.Path.profileDir,
                                             filename);
       let bytes = new TextEncoder().encode(JSON.stringify(this.previousFailed));
       await OS.File.writeAtomic(previousFailedPath, bytes,
                                 { tmpPath: previousFailedPath + ".tmp" });
     },
     check(engine) {
       // Read file from disk
-      Assert.deepEqual(engine.previousFailed, this.previousFailed);
+      assertSetsEqual(engine.previousFailed, this.previousFailed);
     },
   });
 });
 
 add_task(async function test_resetClient() {
   _("SyncEngine.resetClient resets lastSync and toFetch");
   await SyncTestingInfrastructure(server);
   let engine = await makeSteamEngine();
   try {
     // Ensure pristine environment
     Assert.equal(Svc.Prefs.get("steam.lastSync"), undefined);
     Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
-    Assert.equal(engine.toFetch.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
 
     engine.lastSync = 123.45;
     engine.lastSyncLocal = 67890;
-    engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
-    engine.previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
+    engine.toFetch = guidSetOfSize(4);
+    engine.previousFailed = guidSetOfSize(3);
 
     await engine.resetClient();
     Assert.equal(engine.lastSync, 0);
     Assert.equal(engine.lastSyncLocal, 0);
-    Assert.equal(engine.toFetch.length, 0);
-    Assert.equal(engine.previousFailed.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
+    Assert.equal(engine.previousFailed.size, 0);
   } finally {
     Svc.Prefs.resetBranch("");
   }
 });
 
 add_task(async function test_wipeServer() {
   _("SyncEngine.wipeServer deletes server data and resets the client.");
   let engine = await makeSteamEngine();
@@ -247,23 +257,23 @@ add_task(async function test_wipeServer(
     "/1.1/foo/storage/steam": steamCollection.handler()
   });
   await SyncTestingInfrastructure(steamServer);
   do_test_pending();
 
   try {
     // Some data to reset.
     engine.lastSync = 123.45;
-    engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
+    engine.toFetch = guidSetOfSize(3),
 
     _("Wipe server data and reset client.");
     await engine.wipeServer();
     Assert.equal(steamCollection.payload, undefined);
     Assert.equal(engine.lastSync, 0);
-    Assert.equal(engine.toFetch.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
 
   } finally {
     steamServer.stop(do_test_finished);
     Svc.Prefs.resetBranch("");
   }
 });
 
 add_task(async function finish() {
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -622,18 +622,18 @@ add_task(async function test_processInco
 
   collection.wbo("flying").modified =
     collection.wbo("scotsman").modified = LASTSYNC - 10;
   collection._wbos.rekolok.modified = LASTSYNC + 10;
 
   // Time travel 10 seconds into the future but still download the above WBOs.
   let engine = makeRotaryEngine();
   engine.lastSync = LASTSYNC;
-  engine.toFetch = ["flying", "scotsman"];
-  engine.previousFailed = ["failed0", "failed1", "failed2"];
+  engine.toFetch = new SerializableSet(["flying", "scotsman"]);
+  engine.previousFailed = new SerializableSet(["failed0", "failed1", "failed2"]);
 
   let server = sync_httpd_setup({
       "/1.1/foo/storage/rotary": collection.handler()
   });
 
   await SyncTestingInfrastructure(server);
 
   let meta_global = Service.recordManager.set(engine.metaURL,
@@ -652,17 +652,17 @@ add_task(async function test_processInco
 
     // Local records have been created from the server data.
     Assert.equal(engine._store.items.flying, "LNER Class A3 4472");
     Assert.equal(engine._store.items.scotsman, "Flying Scotsman");
     Assert.equal(engine._store.items.rekolok, "Rekonstruktionslokomotive");
     Assert.equal(engine._store.items.failed0, "Record No. 0");
     Assert.equal(engine._store.items.failed1, "Record No. 1");
     Assert.equal(engine._store.items.failed2, "Record No. 2");
-    Assert.equal(engine.previousFailed.length, 0);
+    Assert.equal(engine.previousFailed.size, 0);
   } finally {
     await cleanAndGo(engine, server);
   }
 });
 
 
 add_task(async function test_processIncoming_notify_count() {
   _("Ensure that failed records are reported only once.");
@@ -698,18 +698,18 @@ add_task(async function test_processInco
 
   let meta_global = Service.recordManager.set(engine.metaURL,
                                               new WBORecord(engine.metaURL));
   meta_global.payload.engines = {rotary: {version: engine.version,
                                          syncID: engine.syncID}};
   try {
     // Confirm initial environment.
     Assert.equal(engine.lastSync, 0);
-    Assert.equal(engine.toFetch.length, 0);
-    Assert.equal(engine.previousFailed.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
+    Assert.equal(engine.previousFailed.size, 0);
     do_check_empty(engine._store.items);
 
     let called = 0;
     let counts;
     function onApplied(count) {
       _("Called with " + JSON.stringify(counts));
       counts = count;
       called++;
@@ -717,32 +717,32 @@ add_task(async function test_processInco
     Svc.Obs.add("weave:engine:sync:applied", onApplied);
 
     // Do sync.
     await engine._syncStartup();
     await engine._processIncoming();
 
     // Confirm failures.
     do_check_attribute_count(engine._store.items, 12);
-    Assert.deepEqual(engine.previousFailed, ["record-no-00", "record-no-05",
-      "record-no-10"]);
+    Assert.deepEqual(Array.from(engine.previousFailed).sort(),
+                     ["record-no-00", "record-no-05", "record-no-10"].sort());
 
     // There are newly failed records and they are reported.
     Assert.equal(called, 1);
     Assert.equal(counts.failed, 3);
     Assert.equal(counts.applied, 15);
     Assert.equal(counts.newFailed, 3);
     Assert.equal(counts.succeeded, 12);
 
     // Sync again, 1 of the failed items are the same, the rest didn't fail.
     await engine._processIncoming();
 
     // Confirming removed failures.
     do_check_attribute_count(engine._store.items, 14);
-    Assert.deepEqual(engine.previousFailed, ["record-no-00"]);
+    Assert.deepEqual(Array.from(engine.previousFailed), ["record-no-00"]);
 
     Assert.equal(called, 2);
     Assert.equal(counts.failed, 1);
     Assert.equal(counts.applied, 3);
     Assert.equal(counts.newFailed, 0);
     Assert.equal(counts.succeeded, 2);
 
     Svc.Obs.remove("weave:engine:sync:applied", onApplied);
@@ -787,43 +787,45 @@ add_task(async function test_processInco
 
   let meta_global = Service.recordManager.set(engine.metaURL,
                                               new WBORecord(engine.metaURL));
   meta_global.payload.engines = {rotary: {version: engine.version,
                                          syncID: engine.syncID}};
   try {
     // Confirm initial environment.
     Assert.equal(engine.lastSync, 0);
-    Assert.equal(engine.toFetch.length, 0);
-    Assert.equal(engine.previousFailed.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
+    Assert.equal(engine.previousFailed.size, 0);
     do_check_empty(engine._store.items);
 
     // Initial failed items in previousFailed to be reset.
-    let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
+    let previousFailed = new SerializableSet([Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()]);
     engine.previousFailed = previousFailed;
     Assert.equal(engine.previousFailed, previousFailed);
 
     // Do sync.
     await engine._syncStartup();
     await engine._processIncoming();
 
     // Expected result: 4 sync batches with 2 failures each => 8 failures
     do_check_attribute_count(engine._store.items, 6);
-    Assert.deepEqual(engine.previousFailed, ["record-no-00", "record-no-01",
-      "record-no-04", "record-no-05", "record-no-08", "record-no-09",
-      "record-no-12", "record-no-13"]);
+    Assert.deepEqual(
+      Array.from(engine.previousFailed).sort(),
+      ["record-no-00", "record-no-01", "record-no-04", "record-no-05",
+       "record-no-08", "record-no-09", "record-no-12", "record-no-13"].sort());
 
     // Sync again with the same failed items (records 0, 1, 8, 9).
     await engine._processIncoming();
 
     // A second sync with the same failed items should not add the same items again.
     // Items that did not fail a second time should no longer be in previousFailed.
     do_check_attribute_count(engine._store.items, 10);
-    Assert.deepEqual(engine.previousFailed, ["record-no-00", "record-no-01",
-      "record-no-08", "record-no-09"]);
+    Assert.deepEqual(
+      Array.from(engine.previousFailed).sort(),
+      ["record-no-00", "record-no-01", "record-no-08", "record-no-09"].sort());
 
     // Refetched items that didn't fail the second time are in engine._store.items.
     Assert.equal(engine._store.items["record-no-04"], "Record No. 4");
     Assert.equal(engine._store.items["record-no-05"], "Record No. 5");
     Assert.equal(engine._store.items["record-no-12"], "Record No. 12");
     Assert.equal(engine._store.items["record-no-13"], "Record No. 13");
   } finally {
     await cleanAndGo(engine, server);
@@ -895,18 +897,18 @@ add_task(async function test_processInco
                                               new WBORecord(engine.metaURL));
   meta_global.payload.engines = {rotary: {version: engine.version,
                                          syncID: engine.syncID}};
 
   try {
 
     // Confirm initial environment
     Assert.equal(engine.lastSync, 0);
-    Assert.equal(engine.toFetch.length, 0);
-    Assert.equal(engine.previousFailed.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
+    Assert.equal(engine.previousFailed.size, 0);
     do_check_empty(engine._store.items);
 
     let observerSubject;
     let observerData;
     Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
       Svc.Obs.remove("weave:engine:sync:applied", onApplied);
       observerSubject = subject;
       observerData = data;
@@ -915,18 +917,18 @@ add_task(async function test_processInco
     await engine._syncStartup();
     await engine._processIncoming();
 
     // Ensure that all records but the bogus 4 have been applied.
     do_check_attribute_count(engine._store.items,
                              NUMBER_OF_RECORDS - BOGUS_RECORDS.length);
 
     // Ensure that the bogus records will be fetched again on the next sync.
-    Assert.equal(engine.previousFailed.length, BOGUS_RECORDS.length);
-    Assert.deepEqual(engine.previousFailed.sort(), BOGUS_RECORDS.sort());
+    Assert.equal(engine.previousFailed.size, BOGUS_RECORDS.length);
+    Assert.deepEqual(Array.from(engine.previousFailed).sort(), BOGUS_RECORDS.sort());
 
     // Ensure the observer was notified
     Assert.equal(observerData, engine.name);
     Assert.equal(observerSubject.failed, BOGUS_RECORDS.length);
     Assert.equal(observerSubject.newFailed, BOGUS_RECORDS.length);
 
     // Testing batching of failed item fetches.
     // Try to sync again. Ensure that we split the request into chunks to avoid
@@ -994,38 +996,38 @@ add_task(async function test_processInco
 
   let meta_global = Service.recordManager.set(engine.metaURL,
                                               new WBORecord(engine.metaURL));
   meta_global.payload.engines = {rotary: {version: engine.version,
                                          syncID: engine.syncID}};
   try {
 
     // Confirm initial state
-    Assert.equal(engine.toFetch.length, 0);
-    Assert.equal(engine.previousFailed.length, 0);
+    Assert.equal(engine.toFetch.size, 0);
+    Assert.equal(engine.previousFailed.size, 0);
 
     let observerSubject;
     let observerData;
     Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
       Svc.Obs.remove("weave:engine:sync:applied", onApplied);
       observerSubject = subject;
       observerData = data;
     });
 
     engine.lastSync = collection.wbo("nojson").modified - 1;
     let ping = await sync_engine_and_validate_telem(engine, true);
     Assert.equal(ping.engines[0].incoming.applied, 2);
     Assert.equal(ping.engines[0].incoming.failed, 4);
     Assert.equal(ping.engines[0].incoming.newFailed, 4);
 
-    Assert.equal(engine.previousFailed.length, 4);
-    Assert.equal(engine.previousFailed[0], "nojson");
-    Assert.equal(engine.previousFailed[1], "nojson2");
-    Assert.equal(engine.previousFailed[2], "nodecrypt");
-    Assert.equal(engine.previousFailed[3], "nodecrypt2");
+    Assert.equal(engine.previousFailed.size, 4);
+    Assert.ok(engine.previousFailed.has("nojson"));
+    Assert.ok(engine.previousFailed.has("nojson2"));
+    Assert.ok(engine.previousFailed.has("nodecrypt"));
+    Assert.ok(engine.previousFailed.has("nodecrypt2"));
 
     // Ensure the observer was notified
     Assert.equal(observerData, engine.name);
     Assert.equal(observerSubject.applied, 2);
     Assert.equal(observerSubject.failed, 4);
 
   } finally {
     await promiseClean(engine, server);
--- a/services/sync/tests/unit/test_telemetry.js
+++ b/services/sync/tests/unit/test_telemetry.js
@@ -117,17 +117,17 @@ add_task(async function test_processInco
     const BOGUS_GUID = "zzzzzzzzzzzz";
     let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
     bogus_record.get = function get() {
       throw new Error("Sync this!");
     };
     // Make the 10 minutes old so it will only be synced in the toFetch phase.
     bogus_record.modified = Date.now() / 1000 - 60 * 10;
     engine.lastSync = Date.now() / 1000 - 60;
-    engine.toFetch = [BOGUS_GUID];
+    engine.toFetch = new SerializableSet([BOGUS_GUID]);
 
     let error, pingPayload, fullPing;
     try {
       await sync_engine_and_validate_telem(engine, true, (errPing, fullErrPing) => {
         pingPayload = errPing;
         fullPing = fullErrPing;
       });
     } catch (ex) {
--- a/tools/lint/eslint/modules.json
+++ b/tools/lint/eslint/modules.json
@@ -203,17 +203,17 @@
   "Timer.jsm": ["setTimeout", "setTimeoutWithTarget", "clearTimeout", "setInterval", "setIntervalWithTarget", "clearInterval"],
   "tokenserverclient.js": ["TokenServerClient", "TokenServerClientError", "TokenServerClientNetworkError", "TokenServerClientServerError"],
   "ToolboxProcess.jsm": ["BrowserToolboxProcess"],
   "tps.jsm": ["ACTIONS", "TPS"],
   "Translation.jsm": ["Translation", "TranslationTelemetry"],
   "Traversal.jsm": ["TraversalRules", "TraversalHelper"],
   "UpdateTelemetry.jsm": ["AUSTLMY"],
   "UpdateTopLevelContentWindowIDHelper.jsm": ["trackBrowserWindow"],
-  "util.js": ["getChromeWindow", "Utils", "Svc"],
+  "util.js": ["getChromeWindow", "Utils", "Svc", "SerializableSet"],
   "utils.js": ["btoa", "encryptPayload", "makeIdentityConfig", "makeFxAccountsInternalMock", "configureFxAccountIdentity", "configureIdentity", "SyncTestingInfrastructure", "waitForZeroTimer", "Promise", "MockFxaStorageManager", "AccountState", "sumHistogram", "CommonUtils", "CryptoUtils", "TestingUtils", "promiseZeroTimer", "promiseNamedTimer", "getLoginTelemetryScalar", "syncTestLogging"],
   "Utils.jsm": ["Utils", "Logger", "PivotContext", "PrefCache"],
   "VariablesView.jsm": ["VariablesView", "escapeHTML"],
   "VariablesViewController.jsm": ["VariablesViewController", "StackFrameUtils"],
   "version.jsm": ["VERSION"],
   "vtt.jsm": ["WebVTT"],
   "WebChannel.jsm": ["WebChannel", "WebChannelBroker"],
   "WindowDraggingUtils.jsm": ["WindowDraggingElement"],