Bug 1368209 - Add a test for fetching backlogged history records. r?tcsc draft
authorKit Cambridge <kit@yakshaving.ninja>
Fri, 27 Oct 2017 17:54:48 -0700
changeset 692825 91a81dccbd167dc1974476ad44318146132166d3
parent 692425 5a72d3b88da7073d36218dc07279f076873feaaf
child 692826 de41ca3a55a169bfe89504ea4ea4f427e966998e
child 692869 b235bfc81d73b50e54686f5b34816d36d6012e51
push id87605
push userbmo:kit@mozilla.com
push dateFri, 03 Nov 2017 16:08:56 +0000
reviewerstcsc
bugs1368209
milestone58.0a1
Bug 1368209 - Add a test for fetching backlogged history records. r?tcsc The test captures the existing logic in `_processIncoming`, even though it's not quite correct: * First, we fetch all records changed since the last sync, up to the download limit, and without an explicit sort order. This happens to work correctly now because the Python server uses "newest" by default, but can change in the future. * If we reached the download limit fetching records, we request IDs for all records changed since the last sync, also up to the download limit, and sorted by index. This is likely to return IDs for records we've already seen, since the index is based on the frecency. It's also likely to miss IDs for other changed records, because the number of changed records might be higher than the download limit. * Since we then fast-forward the last sync time, we'll never download any remaining changed records that we didn't add to our backlog. * Finally, we backfill previously failed and backlogged records. MozReview-Commit-ID: 7uQLXMseMIU
services/sync/modules/engines.js
services/sync/tests/unit/test_history_engine.js
services/sync/tests/unit/xpcshell.ini
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -824,16 +824,18 @@ SyncEngine.prototype = {
 
   // How many records to pull at one time when specifying IDs. This is to avoid
   // URI length limitations.
   guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
 
   // How many records to process in a single batch.
   applyIncomingBatchSize: DEFAULT_STORE_BATCH_SIZE,
 
+  downloadBatchSize: DEFAULT_DOWNLOAD_BATCH_SIZE,
+
   async initialize() {
     await this.loadToFetch();
     await this.loadPreviousFailed();
     this._log.debug("SyncEngine initialized", this.name);
   },
 
   get storageURL() {
     return this.service.storageURL;
@@ -1238,17 +1240,17 @@ SyncEngine.prototype = {
 
       if (applyBatch.length == self.applyIncomingBatchSize) {
         await doApplyBatch.call(self);
       }
     };
 
     // Only bother getting data from the server if there's new things
     if (this.lastModified == null || this.lastModified > this.lastSync) {
-      let { response, records } = await newitems.getBatched();
+      let { response, records } = await newitems.getBatched(this.downloadBatchSize);
       if (!response.success) {
         response.failureCode = ENGINE_DOWNLOAD_FAIL;
         throw response;
       }
 
       let maybeYield = Async.jankYielder();
       for (let record of records) {
         await maybeYield();
@@ -1296,17 +1298,17 @@ SyncEngine.prototype = {
       this.lastSync = this.lastModified;
     }
 
     // Process any backlog of GUIDs.
     // At this point we impose an upper limit on the number of items to fetch
     // in a single request, even for desktop, to avoid hitting URI limits.
     batchSize = this.guidFetchBatchSize;
 
-    while (fetchBatch.length && !aborting) {
+    while (batchSize && fetchBatch.length && !aborting) {
       // Reuse the original query, but get rid of the restricting params
       // and batch remaining records.
       newitems.limit = 0;
       newitems.newer = 0;
       newitems.ids = fetchBatch.slice(0, batchSize);
 
       let resp = await newitems.get();
       if (!resp.success) {
new file mode 100644
--- /dev/null
+++ b/services/sync/tests/unit/test_history_engine.js
@@ -0,0 +1,106 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+Cu.import("resource://services-sync/service.js");
+Cu.import("resource://services-sync/engines/history.js");
+Cu.import("resource://testing-common/services/sync/utils.js");
+
+add_task(async function setup() {
+  initTestLogging("Trace");
+});
+
+add_task(async function test_history_download_limit() {
+  let engine = new HistoryEngine(Service);
+  await engine.initialize();
+
+  let server = await serverForFoo(engine);
+  await SyncTestingInfrastructure(server);
+
+  let lastSync = Date.now() / 1000;
+
+  let collection = server.user("foo").collection("history");
+  for (let i = 0; i < 15; i++) {
+    let id = "place" + i.toString(10).padStart(7, "0");
+    let wbo = new ServerWBO(id, encryptPayload({
+      id,
+      histUri: "http://example.com/" + i,
+      title: "Page " + i,
+      visits: [{
+        date: Date.now() * 1000,
+        type: PlacesUtils.history.TRANSITIONS.TYPED,
+      }, {
+        date: Date.now() * 1000,
+        type: PlacesUtils.history.TRANSITIONS.LINK,
+      }],
+    }), lastSync + 1 + i);
+    wbo.sortindex = 15 - i;
+    collection.insertWBO(wbo);
+  }
+
+  // We have 15 records on the server since the last sync, but our download
+  // limit is 5 records at a time.
+  engine.lastSync = lastSync;
+  engine.downloadBatchSize = 4;
+  engine.downloadLimit = 5;
+
+  // Don't actually fetch any backlogged records, so that we can inspect
+  // the backlog between syncs.
+  engine.guidFetchBatchSize = 0;
+
+  let ping = await sync_engine_and_validate_telem(engine, false);
+  deepEqual(ping.engines[0].incoming, { applied: 5 });
+
+  let backlogAfterFirstSync = engine.toFetch.slice(0);
+  deepEqual(backlogAfterFirstSync, ["place0000000", "place0000001",
+    "place0000002", "place0000003", "place0000004"]);
+
+  // We should have fast-forwarded the last sync time.
+  equal(engine.lastSync, lastSync + 15);
+
+  engine.lastModified = collection.modified;
+  ping = await sync_engine_and_validate_telem(engine, false);
+  ok(!ping.engines[0].incoming);
+
+  // After the second sync, our backlog still contains the same GUIDs: we
+  // weren't able to make progress on fetching them, since our
+  // `guidFetchBatchSize` is 0.
+  let backlogAfterSecondSync = engine.toFetch.slice(0);
+  deepEqual(backlogAfterFirstSync, backlogAfterSecondSync);
+
+  // Now add a newer record to the server. We should download and apply it, even
+  // though we've backlogged records with higher sort indices.
+  let newWBO = new ServerWBO("placeAAAAAAA", encryptPayload({
+    id: "placeAAAAAAA",
+    histUri: "http://example.com/a",
+    title: "New Page A",
+    visits: [{
+      date: Date.now() * 1000,
+      type: PlacesUtils.history.TRANSITIONS.TYPED,
+    }],
+  }), lastSync + 20);
+  newWBO.sortindex = -1;
+  collection.insertWBO(newWBO);
+
+  engine.lastModified = collection.modified;
+  ping = await sync_engine_and_validate_telem(engine, false);
+  deepEqual(ping.engines[0].incoming, { applied: 1 });
+
+  // Our backlog should remain the same.
+  let backlogAfterThirdSync = engine.toFetch.slice(0);
+  deepEqual(backlogAfterSecondSync, backlogAfterThirdSync);
+
+  equal(engine.lastSync, lastSync + 20);
+
+  // Bump the fetch batch size to let the backlog make progress. We should
+  // make 3 requests to fetch 5 backlogged GUIDs.
+  engine.guidFetchBatchSize = 2;
+
+  engine.lastModified = collection.modified;
+  ping = await sync_engine_and_validate_telem(engine, false);
+  deepEqual(ping.engines[0].incoming, { applied: 5 });
+
+  deepEqual(engine.toFetch, []);
+
+  // Note that we'll only have fetched *at most* 10 records: we'll never fetch
+  // the remaining 5, because they're not in our backlog.
+});
--- a/services/sync/tests/unit/xpcshell.ini
+++ b/services/sync/tests/unit/xpcshell.ini
@@ -166,16 +166,17 @@ run-sequentially = Frequent timeouts, bu
 [test_doctor.js]
 [test_extension_storage_engine.js]
 [test_extension_storage_tracker.js]
 [test_forms_store.js]
 [test_forms_tracker.js]
 # Too many intermittent "ASSERTION: thread pool wasn't shutdown: '!mPool'" (bug 804479)
 skip-if = debug
 [test_form_validator.js]
+[test_history_engine.js]
 [test_history_store.js]
 [test_history_tracker.js]
 # Too many intermittent "ASSERTION: thread pool wasn't shutdown: '!mPool'" (bug 804479)
 skip-if = debug
 [test_places_guid_downgrade.js]
 [test_password_engine.js]
 [test_password_store.js]
 [test_password_validator.js]