--- a/services/fxaccounts/tests/xpcshell/test_loginmgr_storage.js
+++ b/services/fxaccounts/tests/xpcshell/test_loginmgr_storage.js
@@ -43,17 +43,20 @@ function createFxAccounts() {
return new FxAccounts({
_getDeviceName() {
return "mock device name";
},
fxaPushService: {
registerPushEndpoint() {
return new Promise((resolve) => {
resolve({
- endpoint: "http://mochi.test:8888"
+ endpoint: "http://mochi.test:8888",
+ getKey() {
+ return null;
+ },
});
});
},
}
});
}
add_task(async function test_simple() {
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -652,31 +652,29 @@ function SyncEngine(name, service) {
beforeSave: () => this._beforeSaveMetadata(),
});
this._previousFailedStorage = new JSONFile({
path: Utils.jsonFilePath("failed/" + this.name),
dataPostProcessor: json => this._metadataPostProcessor(json),
beforeSave: () => this._beforeSaveMetadata(),
});
- Utils.defineLazyIDProperty(this, "syncID", `services.sync.${this.name}.syncID`);
XPCOMUtils.defineLazyPreferenceGetter(this, "_enabled",
`services.sync.engine.${this.prefName}`, false,
(data, previous, latest) =>
// We do not await on the promise onEngineEnabledChanged returns.
this._tracker.onEngineEnabledChanged(latest));
+ XPCOMUtils.defineLazyPreferenceGetter(this, "_syncID",
+ `services.sync.${this.name}.syncID`,
+ "");
XPCOMUtils.defineLazyPreferenceGetter(this, "_lastSync",
`services.sync.${this.name}.lastSync`,
"0", null,
v => parseFloat(v));
- XPCOMUtils.defineLazyPreferenceGetter(this, "_lastSyncLocal",
- `services.sync.${this.name}.lastSyncLocal`,
- "0", null,
- v => parseInt(v, 10));
// Async initializations can be made in the initialize() method.
// The map of ids => metadata for records needing a weak upload.
//
// Currently the "metadata" fields are:
//
// - forceTombstone: whether or not we should ignore the local information
// about the record, and write a tombstone for it anyway -- e.g. in the case
@@ -841,36 +839,96 @@ SyncEngine.prototype = {
return this._notify("sync", this.name, this._sync)();
},
// Override this method to return a new changeset type.
emptyChangeset() {
return new Changeset();
},
+ /**
+ * Returns the local sync ID for this engine, or `""` if the engine hasn't
+ * synced for the first time. This is exposed for tests.
+ *
+ * @return the current sync ID.
+ */
+ async getSyncID() {
+ return this._syncID;
+ },
+
+ /**
+ * Ensures that the local sync ID for the engine matches the sync ID for the
+ * collection on the server. A mismatch indicates that another client wiped
+ * the collection; we're syncing after a node reassignment, and another
+ * client synced before us; or the store was replaced since the last sync.
+ * In case of a mismatch, we need to reset all local Sync state and start
+ * over as a first sync.
+ *
+ * In most cases, this method should return the new sync ID as-is. However, an
+ * engine may ignore the given ID and assign a different one, if it determines
+ * that the sync ID on the server is out of date. The bookmarks engine uses
+ * this to wipe the server and other clients on the first sync after the user
+ * restores from a backup.
+ *
+ * @param newSyncID
+ * The new sync ID for the collection from `meta/global`.
+ * @return The assigned sync ID. If this doesn't match `newSyncID`, we'll
+ * replace the sync ID in `meta/global` with the assigned ID.
+ */
+ async ensureCurrentSyncID(newSyncID) {
+ let existingSyncID = this._syncID;
+ if (existingSyncID == newSyncID) {
+ return existingSyncID;
+ }
+ this._log.debug("Engine syncIDs: " + [newSyncID, existingSyncID]);
+ this.setSyncIDPref(newSyncID);
+ return newSyncID;
+ },
+
+ /**
+ * Resets the local sync ID for the engine, wipes the server, and resets all
+ * local Sync state to start over as a first sync.
+ *
+ * @return the new sync ID.
+ */
+ async resetSyncID() {
+ let newSyncID = await this.resetLocalSyncID();
+ await this.wipeServer();
+ return newSyncID;
+ },
+
+ /**
+ * Resets the local sync ID for the engine, signaling that we're starting over
+ * as a first sync.
+ *
+ * @return the new sync ID.
+ */
+ async resetLocalSyncID() {
+ return this.setSyncIDPref(Utils.makeGUID());
+ },
+
+ setSyncIDPref(syncID) {
+ Svc.Prefs.set(this.name + ".syncID", syncID);
+ Svc.Prefs.set(this.name + ".lastSync", "0");
+ return syncID;
+ },
+
/*
* lastSync is a timestamp in server time.
*/
async getLastSync() {
- return this.lastSync;
- },
- async setLastSync(lastSync) {
- this.lastSync = lastSync;
- },
- get lastSync() {
return this._lastSync;
},
- set lastSync(value) {
+ async setLastSync(lastSync) {
// Store the value as a string to keep floating point precision
- Svc.Prefs.set(this.name + ".lastSync", value.toString());
+ Svc.Prefs.set(this.name + ".lastSync", lastSync.toString());
},
- resetLastSync() {
+ async resetLastSync() {
this._log.debug("Resetting " + this.name + " last sync time");
- Svc.Prefs.set(this.name + ".lastSync", "0");
- this.lastSyncLocal = 0;
+ await this.setLastSync(0);
},
get toFetch() {
this._toFetchStorage.ensureDataReady();
return this._toFetchStorage.data.ids;
},
set toFetch(ids) {
@@ -891,27 +949,16 @@ SyncEngine.prototype = {
throw new Error(
"Bug: Attempted to set previousFailed to something that isn't a SerializableSet");
}
this._previousFailedStorage.data = { ids };
this._previousFailedStorage.saveSoon();
},
/*
- * lastSyncLocal is a timestamp in local time.
- */
- get lastSyncLocal() {
- return this._lastSyncLocal;
- },
- set lastSyncLocal(value) {
- // Store as a string because pref can only store C longs as numbers.
- Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
- },
-
- /*
* Returns a changeset for this sync. Engine implementations can override this
* method to bypass the tracker for certain or all changed items.
*/
async getChangedIDs() {
return this._tracker.getChangedIDs();
},
// Create a new record using the store and add in metadata.
@@ -937,62 +984,54 @@ SyncEngine.prototype = {
// Any setup that needs to happen at the beginning of each sync.
async _syncStartup() {
// Determine if we need to wipe on outdated versions
let metaGlobal = await this.service.recordManager.get(this.metaURL);
let engines = metaGlobal.payload.engines || {};
let engineData = engines[this.name] || {};
- let needsWipe = false;
-
// Assume missing versions are 0 and wipe the server
if ((engineData.version || 0) < this.version) {
this._log.debug("Old engine data: " + [engineData.version, this.version]);
- // Prepare to clear the server and upload everything
- needsWipe = true;
- this.syncID = "";
+ // Clear the server and reupload everything on bad version or missing
+ // meta. Note that we don't regenerate per-collection keys here.
+ let newSyncID = await this.resetSyncID();
// Set the newer version and newly generated syncID
engineData.version = this.version;
- engineData.syncID = this.syncID;
+ engineData.syncID = newSyncID;
// Put the new data back into meta/global and mark for upload
engines[this.name] = engineData;
metaGlobal.payload.engines = engines;
metaGlobal.changed = true;
} else if (engineData.version > this.version) {
// Don't sync this engine if the server has newer data
// Changes below need to be processed in bug 1295510 that's why eslint is ignored
// eslint-disable-next-line no-new-wrappers
let error = new String("New data: " + [engineData.version, this.version]);
error.failureCode = VERSION_OUT_OF_DATE;
throw error;
- } else if (engineData.syncID != this.syncID) {
+ } else {
// Changes to syncID mean we'll need to upload everything
- this._log.debug("Engine syncIDs: " + [engineData.syncID, this.syncID]);
- this.syncID = engineData.syncID;
- await this._resetClient();
+ let assignedSyncID = await this.ensureCurrentSyncID(engineData.syncID);
+ if (assignedSyncID != engineData.syncID) {
+ engineData.syncID = assignedSyncID;
+ metaGlobal.changed = true;
+ }
}
- // Delete any existing data and reupload on bad version or missing meta.
- // No crypto component here...? We could regenerate per-collection keys...
- if (needsWipe) {
- await this.wipeServer();
- }
-
- // Save objects that need to be uploaded in this._modified. We also save
- // the timestamp of this fetch in this.lastSyncLocal. As we successfully
- // upload objects we remove them from this._modified. If an error occurs
- // or any objects fail to upload, they will remain in this._modified. At
- // the end of a sync, or after an error, we add all objects remaining in
- // this._modified to the tracker.
- this.lastSyncLocal = Date.now();
+ // Save objects that need to be uploaded in this._modified. As we
+ // successfully upload objects we remove them from this._modified. If an
+ // error occurs or any objects fail to upload, they will remain in
+ // this._modified. At the end of a sync, or after an error, we add all
+ // objects remaining in this._modified to the tracker.
let initialChanges = await this.pullChanges();
this._modified.replace(initialChanges);
// Clear the tracker now. If the sync fails we'll add the ones we failed
// to upload back.
await this._tracker.clearChangedIDs();
this._tracker.resetScore();
this._log.info(this._modified.count() +
@@ -1787,29 +1826,37 @@ SyncEngine.prototype = {
throw ex;
}
this._log.debug("Failed test decrypt", ex);
}
return canDecrypt;
},
- async _resetClient() {
- this.resetLastSync();
- this.previousFailed = new SerializableSet();
- this.toFetch = new SerializableSet();
- this._needWeakUpload.clear();
+ /**
+ * Deletes the collection for this engine on the server, and removes all local
+ * Sync metadata for this engine. This does *not* remove any existing data on
+ * other clients. This is called when we reset the sync ID.
+ */
+ async wipeServer() {
+ await this._deleteServerCollection();
+ await this._resetClient();
},
- async wipeServer() {
+ /**
+ * Deletes the collection for this engine on the server, without removing
+ * any local Sync metadata or user data. Deleting the collection will not
+ * remove any user data on other clients, but will force other clients to
+ * start over as a first sync.
+ */
+ async _deleteServerCollection() {
let response = await this.service.resource(this.engineURL).delete();
if (response.status != 200 && response.status != 404) {
throw response;
}
- await this._resetClient();
},
async removeClientData() {
// Implement this method in engines that store client specific data
// on the server.
},
/*
@@ -1873,26 +1920,33 @@ SyncEngine.prototype = {
*/
async trackRemainingChanges() {
for (let [id, change] of this._modified.entries()) {
await this._tracker.addChangedID(id, change);
}
},
/**
- * Get rid of any local meta-data.
+ * Removes all local Sync metadata for this engine, but keeps all existing
+ * local user data.
*/
async resetClient() {
- if (!this._resetClient) {
- throw new Error("engine does not implement _resetClient method");
- }
-
return this._notify("reset-client", this.name, this._resetClient)();
},
+ async _resetClient() {
+ await this.resetLastSync();
+ this.previousFailed = new SerializableSet();
+ this.toFetch = new SerializableSet();
+ this._needWeakUpload.clear();
+ },
+
+ /**
+ * Removes all local Sync metadata and user data for this engine.
+ */
async wipeClient() {
return this._notify("wipe-client", this.name, this._wipeClient)();
},
async _wipeClient() {
await this.resetClient();
this._log.debug("Deleting all local data");
this._tracker.ignoreAll = true;
--- a/services/sync/modules/engines/addons.js
+++ b/services/sync/modules/engines/addons.js
@@ -157,17 +157,18 @@ AddonsEngine.prototype = {
*/
async getChangedIDs() {
let changes = {};
const changedIDs = await this._tracker.getChangedIDs();
for (let [id, modified] of Object.entries(changedIDs)) {
changes[id] = modified;
}
- let lastSyncDate = new Date(this.lastSync * 1000);
+ let lastSync = await this.getLastSync();
+ let lastSyncDate = new Date(lastSync * 1000);
// The reconciler should have been refreshed at the beginning of a sync and
// we assume this function is only called from within a sync.
let reconcilerChanges = this._reconciler.getChangesSinceDate(lastSyncDate);
let addons = this._reconciler.addons;
for (let change of reconcilerChanges) {
let changeTime = change[0];
let id = change[2];
@@ -216,17 +217,18 @@ AddonsEngine.prototype = {
* Override end of sync to perform a little housekeeping on the reconciler.
*
* We prune changes to prevent the reconciler state from growing without
* bound. Even if it grows unbounded, there would have to be many add-on
* changes (thousands) for it to slow things down significantly. This is
* highly unlikely to occur. Still, we exercise defense just in case.
*/
async _syncCleanup() {
- let ms = 1000 * this.lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
+ let lastSync = await this.getLastSync();
+ let ms = 1000 * lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
this._reconciler.pruneChangesBeforeDate(new Date(ms));
return SyncEngine.prototype._syncCleanup.call(this);
},
/**
* Helper function to ensure reconciler is up to date.
*
* This will load the reconciler's state from the file
--- a/services/sync/modules/engines/bookmarks.js
+++ b/services/sync/modules/engines/bookmarks.js
@@ -518,17 +518,18 @@ BookmarksEngine.prototype = {
return undefined;
},
async _syncStartup() {
await SyncEngine.prototype._syncStartup.call(this);
try {
// For first-syncs, make a backup for the user to restore
- if (this.lastSync == 0) {
+ let lastSync = await this.getLastSync();
+ if (!lastSync) {
this._log.debug("Bookmarks backup starting.");
await PlacesBackups.create(null, true);
this._log.debug("Bookmarks backup done.");
}
} catch (ex) {
// Failure to create a backup is somewhat bad, but probably not bad
// enough to prevent syncing of bookmarks - so just log the error and
// continue.
@@ -702,17 +703,17 @@ BufferedBookmarksEngine.prototype = {
return mirror.getCollectionHighWaterMark();
},
async setLastSync(lastSync) {
let mirror = await this._store.ensureOpenMirror();
await mirror.setCollectionLastModified(lastSync);
// Update the pref so that reverting to the original bookmarks engine
// doesn't download records we've already applied.
- super.lastSync = lastSync;
+ await super.setLastSync(lastSync);
},
get lastSync() {
throw new TypeError("Use getLastSync");
},
set lastSync(value) {
throw new TypeError("Use setLastSync");
--- a/services/sync/modules/engines/clients.js
+++ b/services/sync/modules/engines/clients.js
@@ -80,31 +80,34 @@ Utils.deferGetSet(ClientsRec,
"version", "protocols",
"formfactor", "os", "appPackage", "application", "device",
"fxaDeviceId"]);
function ClientEngine(service) {
SyncEngine.call(this, "Clients", service);
- // Reset the last sync timestamp on every startup so that we fetch all clients
- this.resetLastSync();
this.fxAccounts = fxAccounts;
this.addClientCommandQueue = Promise.resolve();
Utils.defineLazyIDProperty(this, "localID", "services.sync.client.GUID");
}
ClientEngine.prototype = {
__proto__: SyncEngine.prototype,
_storeObj: ClientStore,
_recordObj: ClientsRec,
_trackerObj: ClientsTracker,
allowSkippedRecord: false,
_knownStaleFxADeviceIds: null,
_lastDeviceCounts: null,
+ async initialize() {
+ // Reset the last sync timestamp on every startup so that we fetch all clients
+ await this.resetLastSync();
+ },
+
// These two properties allow us to avoid replaying the same commands
// continuously if we cannot manage to upload our own record.
_localClientLastModified: 0,
get _lastModifiedOnProcessCommands() {
return Services.prefs.getIntPref(LAST_MODIFIED_ON_PROCESS_COMMAND_PREF, -1);
},
set _lastModifiedOnProcessCommands(value) {
@@ -366,17 +369,17 @@ ClientEngine.prototype = {
await this._tracker.addChangedID(this.localID);
this.lastRecordUpload = Date.now() / 1000;
}
return SyncEngine.prototype._syncStartup.call(this);
},
async _processIncoming() {
// Fetch all records from the server.
- this.lastSync = 0;
+ await this.setLastSync(0);
this._incomingClients = {};
try {
await SyncEngine.prototype._processIncoming.call(this);
// Refresh the known stale clients list at startup and when we receive
// "device connected/disconnected" push notifications.
if (!this._knownStaleFxADeviceIds) {
await this._refreshKnownStaleClients();
}
@@ -443,19 +446,20 @@ ClientEngine.prototype = {
for (let clientId of clientWithPendingCommands) {
if (this._store._remoteClients[clientId] || this.localID == clientId) {
this._modified.set(clientId, 0);
}
}
let updatedIDs = this._modified.ids();
await SyncEngine.prototype._uploadOutgoing.call(this);
// Record the response time as the server time for each item we uploaded.
+ let lastSync = await this.getLastSync();
for (let id of updatedIDs) {
if (id != this.localID) {
- this._store._remoteClients[id].serverLastModified = this.lastSync;
+ this._store._remoteClients[id].serverLastModified = lastSync;
}
}
},
async _onRecordsWritten(succeeded, failed) {
// Reconcile the status of the local records with what we just wrote on the
// server
for (let id of succeeded) {
--- a/services/sync/tests/unit/head_errorhandler_common.js
+++ b/services/sync/tests/unit/head_errorhandler_common.js
@@ -21,23 +21,27 @@ const EHTestsCommon = {
service_unavailable(request, response) {
let body = "Service Unavailable";
response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
response.setHeader("Retry-After", "42");
response.bodyOutputStream.write(body, body.length);
},
async sync_httpd_setup() {
+ let clientsEngine = Service.clientsEngine;
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+ let catapultEngine = Service.engineManager.get("catapult");
+ let catapultSyncID = await catapultEngine.resetLocalSyncID();
let global = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- engines: {clients: {version: Service.clientsEngine.version,
- syncID: Service.clientsEngine.syncID},
- catapult: {version: Service.engineManager.get("catapult").version,
- syncID: Service.engineManager.get("catapult").syncID}}
+ engines: {clients: {version: clientsEngine.version,
+ syncID: clientsSyncID},
+ catapult: {version: catapultEngine.version,
+ syncID: catapultSyncID}}
});
let clientsColl = new ServerCollection({}, true);
// Tracking info/collections.
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let handler_401 = httpd_handler(401, "Unauthorized");
--- a/services/sync/tests/unit/head_helpers.js
+++ b/services/sync/tests/unit/head_helpers.js
@@ -500,19 +500,20 @@ Utils.getDefaultDeviceName = function()
async function registerRotaryEngine() {
let {RotaryEngine} =
ChromeUtils.import("resource://testing-common/services/sync/rotaryengine.js", {});
await Service.engineManager.clear();
await Service.engineManager.register(RotaryEngine);
let engine = Service.engineManager.get("rotary");
+ let syncID = await engine.resetLocalSyncID();
engine.enabled = true;
- return { engine, tracker: engine._tracker };
+ return { engine, syncID, tracker: engine._tracker };
}
// Set the validation prefs to attempt validation every time to avoid non-determinism.
function enableValidationPrefs() {
Svc.Prefs.set("engine.bookmarks.validation.interval", 0);
Svc.Prefs.set("engine.bookmarks.validation.percentageChance", 100);
Svc.Prefs.set("engine.bookmarks.validation.maxRecords", -1);
Svc.Prefs.set("engine.bookmarks.validation.enabled", true);
@@ -522,37 +523,37 @@ async function serverForEnginesWithKeys(
// Generate and store a fake default key bundle to avoid resetting the client
// before the first sync.
let wbo = await Service.collectionKeys.generateNewKeysWBO();
let modified = new_timestamp();
Service.collectionKeys.setContents(wbo.cleartext, modified);
let allEngines = [Service.clientsEngine].concat(engines);
- let globalEngines = allEngines.reduce((entries, engine) => {
- let { name, version, syncID } = engine;
- entries[name] = { version, syncID };
- return entries;
- }, {});
+ let globalEngines = {};
+ for (let engine of allEngines) {
+ let syncID = await engine.resetLocalSyncID();
+ globalEngines[engine.name] = { version: engine.version, syncID };
+ }
- let contents = allEngines.reduce((collections, engine) => {
- collections[engine.name] = {};
- return collections;
- }, {
+ let contents = {
meta: {
global: {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: globalEngines,
},
},
crypto: {
keys: encryptPayload(wbo.cleartext),
},
- });
+ };
+ for (let engine of allEngines) {
+ contents[engine.name] = {};
+ }
return serverForUsers(users, contents, callback);
}
async function serverForFoo(engine, callback) {
// The bookmarks engine *always* tracks changes, meaning we might try
// and sync due to the bookmarks we ourselves create! Worse, because we
// do an engine sync only, there's no locking - so we end up with multiple
--- a/services/sync/tests/unit/test_412.js
+++ b/services/sync/tests/unit/test_412.js
@@ -27,17 +27,18 @@ add_task(async function test_412_not_tre
// create a new record that should be uploaded and arrange for our lastSync
// timestamp to be wrong so we get a 412.
engine._store.items = {new: "new record"};
await engine._tracker.addChangedID("new", 0);
let saw412 = false;
let _uploadOutgoing = engine._uploadOutgoing;
engine._uploadOutgoing = async () => {
- engine.lastSync -= 2;
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 2);
try {
await _uploadOutgoing.call(engine);
} catch (ex) {
saw412 = ex.status == 412;
throw ex;
}
};
_("Second sync - expecting a 412");
--- a/services/sync/tests/unit/test_addons_engine.js
+++ b/services/sync/tests/unit/test_addons_engine.js
@@ -14,16 +14,17 @@ ChromeUtils.import("resource://services-
ChromeUtils.defineModuleGetter(this, "Preferences", "resource://gre/modules/Preferences.jsm");
const prefs = new Preferences();
prefs.set("extensions.getAddons.get.url",
"http://localhost:8888/search/guid:%IDS%");
prefs.set("extensions.install.requireSecureOrigin", false);
let engine;
+let syncID;
let reconciler;
let tracker;
async function resetReconciler() {
reconciler._addons = {};
reconciler._changes = [];
await reconciler.saveState();
@@ -32,16 +33,17 @@ async function resetReconciler() {
}
add_task(async function setup() {
loadAddonTestFunctions();
startupManager();
await Service.engineManager.register(AddonsEngine);
engine = Service.engineManager.get("addons");
+ syncID = await engine.resetLocalSyncID();
reconciler = engine._reconciler;
tracker = engine._tracker;
reconciler.startListening();
// Don't flush to disk in the middle of an event listener!
// This causes test hangs on WinXP.
reconciler._shouldPersist = false;
@@ -172,18 +174,17 @@ add_task(async function test_disabled_in
let server = new SyncServer();
server.start();
await SyncTestingInfrastructure(server, USER, PASSWORD);
await generateNewKeys(Service.collectionKeys);
let contents = {
- meta: {global: {engines: {addons: {version: engine.version,
- syncID: engine.syncID}}}},
+ meta: {global: {engines: {addons: {version: engine.version, syncID}}}},
crypto: {},
addons: {}
};
server.registerUser(USER, "password");
server.createContents(USER, contents);
let amoServer = new HttpServer();
--- a/services/sync/tests/unit/test_bookmark_duping.js
+++ b/services/sync/tests/unit/test_bookmark_duping.js
@@ -154,17 +154,18 @@ add_task(async function test_dupe_bookma
equal(oldVal, bmk1_guid);
equal(source, PlacesUtils.bookmarks.SOURCE_SYNC);
onItemChangedObserved = true;
}
};
PlacesUtils.bookmarks.addObserver(obs, false);
_("Syncing so new dupe record is processed");
- engine.lastSync = engine.lastSync - 5;
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
await engine.sync();
// We should have logically deleted the dupe record.
equal(collection.count(), 7);
ok(collection.cleartext(bmk1_guid).deleted);
// and physically removed from the local store.
await promiseNoLocalItem(bmk1_guid);
// Parent should still only have 1 item.
@@ -215,17 +216,18 @@ add_task(async function test_dupe_repare
title: "Get Firefox!",
parentName: "Folder 1",
parentid: folder2_guid,
};
collection.insert(newGUID, encryptPayload(to_apply), Date.now() / 1000 + 500);
_("Syncing so new dupe record is processed");
- engine.lastSync = engine.lastSync - 5;
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
await engine.sync();
// We should have logically deleted the dupe record.
equal(collection.count(), 8);
ok(collection.cleartext(bmk1_guid).deleted);
// and physically removed from the local store.
await promiseNoLocalItem(bmk1_guid);
// The original folder no longer has the item
@@ -295,17 +297,17 @@ add_task(async function test_dupe_repare
guid: bmk1_guid,
syncChangeCounter: 1,
lastModified: Date.now() + (deltaSeconds + 10) * 1000,
});
_("Syncing so new dupe record is processed");
// We need to take care to only sync the one new record - if we also see
// our local item as incoming the test fails - bug 1368608.
- engine.lastSync = newWBO.modified - 0.000001;
+ await engine.setLastSync(newWBO.modified - 0.000001);
engine.lastModified = null;
await engine.sync();
// We should have logically deleted the dupe record.
equal(collection.count(), 8);
ok(collection.cleartext(bmk1_guid).deleted);
// and physically removed from the local store.
await promiseNoLocalItem(bmk1_guid);
@@ -387,17 +389,18 @@ add_task(async function test_dupe_repare
title: "Get Firefox!",
parentName: "Folder 1",
parentid: newParentGUID,
tags: [],
}), Date.now() / 1000 + 500);
_("Syncing so new records are processed.");
- engine.lastSync = engine.lastSync - 5;
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
await engine.sync();
// Everything should be parented correctly.
equal((await getFolderChildrenIDs(folder1_id)).length, 0);
let newParentID = await store.idForGUID(newParentGUID);
let newID = await store.idForGUID(newGUID);
deepEqual(await getFolderChildrenIDs(newParentID), [newID]);
@@ -463,17 +466,18 @@ add_task(async function test_dupe_repare
title: "A second folder",
parentName: "Bookmarks Toolbar",
parentid: "toolbar",
children: [newParentGUID],
tags: [],
}), Date.now() / 1000 + 500);
_("Syncing so out-of-order records are processed.");
- engine.lastSync = engine.lastSync - 5;
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
await engine.sync();
// The intended parent did end up existing, so it should be parented
// correctly after de-duplication.
equal((await getFolderChildrenIDs(folder1_id)).length, 0);
let newParentID = await store.idForGUID(newParentGUID);
let newID = await store.idForGUID(newGUID);
deepEqual(await getFolderChildrenIDs(newParentID), [newID]);
@@ -519,17 +523,20 @@ add_task(async function test_dupe_repare
title: "Get Firefox!",
parentName: "Folder 1",
parentid: newParentGUID,
tags: [],
dateAdded: Date.now() - 10000
}), Date.now() / 1000 + 500);
_("Syncing so new dupe record is processed");
- engine.lastSync = engine.lastSync - 5;
+ {
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
+ }
await engine.sync();
// We should have logically deleted the dupe record.
equal(collection.count(), 8);
ok(collection.cleartext(bmk1_guid).deleted);
// and physically removed from the local store.
await promiseNoLocalItem(bmk1_guid);
// The intended parent doesn't exist, so it remains in the original folder
@@ -577,17 +584,20 @@ add_task(async function test_dupe_repare
parentid: "toolbar",
children: [newParentGUID],
tags: [],
dateAdded: Date.now() - 11000,
}), Date.now() / 1000 + 500);
_("Syncing so missing parent appears");
- engine.lastSync = engine.lastSync - 5;
+ {
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
+ }
await engine.sync();
// The intended parent now does exist, so it should have been reparented.
equal((await getFolderChildrenIDs(folder1_id)).length, 0);
let newParentID = await store.idForGUID(newParentGUID);
let newID = await store.idForGUID(newGUID);
deepEqual(await getFolderChildrenIDs(newParentID), [newID]);
@@ -634,17 +644,18 @@ add_task(async function test_dupe_empty_
type: "folder",
title: "Folder 1",
parentName: BookmarksToolbarTitle,
parentid: "toolbar",
children: [],
}), Date.now() / 1000 + 500);
_("Syncing so new dupe records are processed");
- engine.lastSync = engine.lastSync - 5;
+ let lastSync = await engine.getLastSync();
+ await engine.setLastSync(lastSync - 5);
await engine.sync();
await validate(collection);
// Collection now has one additional record - the logically deleted dupe.
equal(collection.count(), 6);
// original folder should be logically deleted.
ok(collection.cleartext(folder1_guid).deleted);
--- a/services/sync/tests/unit/test_bookmark_order.js
+++ b/services/sync/tests/unit/test_bookmark_order.js
@@ -7,29 +7,31 @@ ChromeUtils.import("resource://services-
ChromeUtils.import("resource://services-sync/main.js");
ChromeUtils.import("resource://services-sync/service.js");
ChromeUtils.import("resource://services-sync/util.js");
async function serverForFoo(engine) {
await generateNewKeys(Service.collectionKeys);
let clientsEngine = Service.clientsEngine;
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+ let engineSyncID = await engine.resetLocalSyncID();
return serverForUsers({"foo": "password"}, {
meta: {
global: {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {
clients: {
version: clientsEngine.version,
- syncID: clientsEngine.syncID,
+ syncID: clientsSyncID,
},
[engine.name]: {
version: engine.version,
- syncID: engine.syncID,
+ syncID: engineSyncID,
},
},
},
},
crypto: {
keys: encryptPayload({
id: "keys",
// Generate a fake default key bundle to avoid resetting the client
@@ -726,11 +728,12 @@ add_task(async function test_bookmark_or
guid: id20,
index: 2,
}],
}, {
guid: PlacesUtils.bookmarks.mobileGuid,
index: 4,
}], "Move 20 back to front -> update 20, f30");
- engine.resetClient();
+ await engine.wipeClient();
+ await Service.startOver();
await engine.finalize();
});
--- a/services/sync/tests/unit/test_bookmark_repair.js
+++ b/services/sync/tests/unit/test_bookmark_repair.js
@@ -343,17 +343,17 @@ add_task(async function test_bookmark_re
}]);
await revalidationPromise;
ok(!Services.prefs.prefHasUserValue("services.sync.repairs.bookmarks.state"),
"Should clear repair pref after successfully completing repair");
} finally {
await cleanup(server);
clientsEngine = Service.clientsEngine = new ClientEngine(Service);
clientsEngine.ignoreLastModifiedOnProcessCommands = true;
- clientsEngine.initialize();
+ await clientsEngine.initialize();
}
});
add_task(async function test_repair_client_missing() {
enableValidationPrefs();
_("Ensure that a record missing from the client only will get re-downloaded from the server");
--- a/services/sync/tests/unit/test_bookmark_smart_bookmarks.js
+++ b/services/sync/tests/unit/test_bookmark_smart_bookmarks.js
@@ -195,84 +195,87 @@ add_task(async function test_smart_bookm
await sync_engine_and_validate_telem(engine, false);
let idForNewGUID = await PlacesUtils.promiseItemId(record.id);
equal(idForOldGUID, idForNewGUID);
}
_("Verify that queries with the same anno and different URL dupe");
{
+ let lastSync = await engine.getLastSync();
let info = await newSmartBookmark(PlacesUtils.bookmarks.menuGuid,
"place:bar", -1, title,
"MostVisited");
let idForOldGUID = await PlacesUtils.promiseItemId(info.guid);
let record = await store.createRecord(info.guid);
record.id = Utils.makeGUID();
- collection.insert(record.id, encryptPayload(record.cleartext), engine.lastSync + 1);
+ collection.insert(record.id, encryptPayload(record.cleartext), lastSync + 1);
collection.insert("menu", encryptPayload({
id: "menu",
parentid: "places",
type: "folder",
title: "Bookmarks Menu",
children: [record.id],
- }), engine.lastSync + 1);
+ }), lastSync + 1);
engine.lastModified = collection.timestamp;
await sync_engine_and_validate_telem(engine, false);
let idForNewGUID = await PlacesUtils.promiseItemId(record.id);
equal(idForOldGUID, idForNewGUID);
}
_("Verify that different annos don't dupe.");
{
+ let lastSync = await engine.getLastSync();
let info = await newSmartBookmark(PlacesUtils.bookmarks.unfiledGuid,
"place:foo", -1, title, "LeastVisited");
let idForOldGUID = await PlacesUtils.promiseItemId(info.guid);
let other = await store.createRecord(info.guid);
other.id = "abcdefabcdef";
other.queryId = "MostVisited";
- collection.insert(other.id, encryptPayload(other.cleartext), engine.lastSync + 1);
+ collection.insert(other.id, encryptPayload(other.cleartext), lastSync + 1);
collection.insert("unfiled", encryptPayload({
id: "unfiled",
parentid: "places",
type: "folder",
title: "Other Bookmarks",
children: [other.id],
- }), engine.lastSync + 1);
+ }), lastSync + 1);
engine.lastModified = collection.timestamp;
await sync_engine_and_validate_telem(engine, false);
let idForNewGUID = await PlacesUtils.promiseItemId(other.id);
notEqual(idForOldGUID, idForNewGUID);
}
_("Handle records without a queryId entry.");
{
+ let lastSync = await engine.getLastSync();
let info = await newSmartBookmark(PlacesUtils.bookmarks.mobileGuid, url,
-1, title, "MostVisited");
let idForOldGUID = await PlacesUtils.promiseItemId(info.guid);
let record = await store.createRecord(info.guid);
record.id = Utils.makeGUID();
delete record.queryId;
- collection.insert(record.id, encryptPayload(record.cleartext), engine.lastSync + 1);
+ collection.insert(record.id, encryptPayload(record.cleartext), lastSync + 1);
collection.insert("mobile", encryptPayload({
id: "mobile",
parentid: "places",
type: "folder",
title: "Mobile Bookmarks",
children: [record.id],
- }), engine.lastSync + 1);
+ }), lastSync + 1);
engine.lastModified = collection.timestamp;
await sync_engine_and_validate_telem(engine, false);
let idForNewGUID = await PlacesUtils.promiseItemId(record.id);
equal(idForOldGUID, idForNewGUID);
}
--- a/services/sync/tests/unit/test_bookmark_tracker.js
+++ b/services/sync/tests/unit/test_bookmark_tracker.js
@@ -59,17 +59,17 @@ function promiseSpinningly(promise) {
}
return rv;
}
async function cleanup() {
- engine.lastSync = 0;
+ await engine.setLastSync(0);
engine._needWeakUpload.clear();
await store.wipe();
await resetTracker();
await tracker.stop();
}
// startTracking is a signal that the test wants to notice things that happen
// after this is called (ie, things already tracked should be discarded.)
--- a/services/sync/tests/unit/test_clients_engine.js
+++ b/services/sync/tests/unit/test_clients_engine.js
@@ -791,17 +791,17 @@ add_task(async function test_command_syn
_("Checking record was uploaded.");
notEqual(clientWBO(engine.localID).payload, undefined);
ok(engine.lastRecordUpload > 0);
notEqual(clientWBO(remoteId).payload, undefined);
Svc.Prefs.set("client.GUID", remoteId);
- engine._resetClient();
+ await engine._resetClient();
equal(engine.localID, remoteId);
_("Performing sync on resetted client.");
await syncClientsEngine(server);
notEqual(engine.localCommands, undefined);
equal(engine.localCommands.length, 1);
let command = engine.localCommands[0];
equal(command.command, "wipeAll");
@@ -1381,17 +1381,17 @@ add_task(async function test_keep_cleare
localRemoteRecord = collection.cleartext(deviceBID);
deepEqual(localRemoteRecord.commands, [], "Should be empty");
} finally {
await cleanup();
// Reset service (remove mocks)
engine = Service.clientsEngine = new ClientEngine(Service);
await engine.initialize();
- engine._resetClient();
+ await engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
await promiseStopServer(server);
}
}
});
@@ -1719,18 +1719,18 @@ add_task(async function test_duplicate_c
cleanup();
await promiseStopServer(server);
}
});
add_task(async function test_other_clients_notified_on_first_sync() {
_("Ensure that other clients are notified when we upload our client record for the first time.");
- engine.resetLastSync();
- engine._store.wipe();
+ await engine.resetLastSync();
+ await engine._store.wipe();
await generateNewKeys(Service.collectionKeys);
let server = await serverForFoo(engine);
await SyncTestingInfrastructure(server);
const fxAccounts = engine.fxAccounts;
let calls = 0;
engine.fxAccounts = {
--- a/services/sync/tests/unit/test_engine_abort.js
+++ b/services/sync/tests/unit/test_engine_abort.js
@@ -19,20 +19,20 @@ add_task(async function test_processInco
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
await generateNewKeys(Service.collectionKeys);
_("Create some server data.");
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
_("Fake applyIncoming to abort.");
engine._store.applyIncoming = async function(record) {
let ex = {code: SyncEngine.prototype.eEngineAbortApplyIncoming,
cause: "Nooo"};
_("Throwing: " + JSON.stringify(ex));
throw ex;
};
--- a/services/sync/tests/unit/test_engine_changes_during_sync.js
+++ b/services/sync/tests/unit/test_engine_changes_during_sync.js
@@ -264,33 +264,34 @@ add_task(async function test_forms_chang
}
});
add_task(async function test_bookmark_change_during_sync() {
_("Ensure that we track bookmark changes made during a sync.");
enableValidationPrefs();
+ let engine = Service.engineManager.get("bookmarks");
+ let server = await serverForEnginesWithKeys({"foo": "password"}, [engine]);
+ await SyncTestingInfrastructure(server);
+
// Already-tracked bookmarks that shouldn't be uploaded during the first sync.
let bzBmk = await PlacesUtils.bookmarks.insert({
parentGuid: PlacesUtils.bookmarks.menuGuid,
url: "https://bugzilla.mozilla.org/",
title: "Bugzilla",
});
_(`Bugzilla GUID: ${bzBmk.guid}`);
await PlacesTestUtils.setBookmarkSyncFields({
guid: bzBmk.guid,
syncChangeCounter: 0,
syncStatus: PlacesUtils.bookmarks.SYNC_STATUS.NORMAL,
});
- let engine = Service.engineManager.get("bookmarks");
- let server = await serverForEnginesWithKeys({"foo": "password"}, [engine]);
- await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
let bmk3; // New child of Folder 1, created locally during sync.
let uploadOutgoing = engine._uploadOutgoing;
engine._uploadOutgoing = async function() {
engine._uploadOutgoing = uploadOutgoing;
try {
--- a/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
+++ b/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
@@ -23,18 +23,18 @@ CatapultEngine.prototype = {
}
};
async function sync_httpd_setup() {
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let catapultEngine = engineManager.get("catapult");
- let engines = {catapult: {version: catapultEngine.version,
- syncID: catapultEngine.syncID}};
+ let syncID = await catapultEngine.resetLocalSyncID();
+ let engines = {catapult: {version: catapultEngine.version, syncID}};
// Track these using the collections helper, which keeps modified times
// up-to-date.
let clientsColl = new ServerCollection({}, true);
let keysWBO = new ServerWBO("keys");
let globalWBO = new ServerWBO("global", {storageVersion: STORAGE_VERSION,
syncID: Utils.makeGUID(),
engines});
--- a/services/sync/tests/unit/test_fxa_node_reassignment.js
+++ b/services/sync/tests/unit/test_fxa_node_reassignment.js
@@ -205,24 +205,23 @@ add_task(async function test_single_toke
add_task(async function test_momentary_401_engine() {
enableValidationPrefs();
_("Test a failure for engine URLs that's resolved by reassignment.");
let server = await prepareServer();
let john = server.user("johndoe");
_("Enabling the Rotary engine.");
- let { engine, tracker } = await registerRotaryEngine();
+ let { engine, syncID, tracker } = await registerRotaryEngine();
// We need the server to be correctly set up prior to experimenting. Do this
// through a sync.
let global = {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- rotary: {version: engine.version,
- syncID: engine.syncID}};
+ rotary: {version: engine.version, syncID}};
john.createCollection("meta").insert("global", global);
_("First sync to prepare server contents.");
await Service.sync();
_("Setting up Rotary collection to 401.");
let rotary = john.createCollection("rotary");
let oldHandler = rotary.collectionHandler;
--- a/services/sync/tests/unit/test_history_engine.js
+++ b/services/sync/tests/unit/test_history_engine.js
@@ -53,34 +53,34 @@ add_task(async function test_history_dow
}],
}), lastSync + 1 + i);
wbo.sortindex = 15 - i;
collection.insertWBO(wbo);
}
// We have 15 records on the server since the last sync, but our download
// limit is 5 records at a time. We should eventually fetch all 15.
- engine.lastSync = lastSync;
+ await engine.setLastSync(lastSync);
engine.downloadBatchSize = 4;
engine.downloadLimit = 5;
// Don't actually fetch any backlogged records, so that we can inspect
// the backlog between syncs.
engine.guidFetchBatchSize = 0;
let ping = await sync_engine_and_validate_telem(engine, false);
deepEqual(ping.engines[0].incoming, { applied: 5 });
let backlogAfterFirstSync = Array.from(engine.toFetch).sort();
deepEqual(backlogAfterFirstSync, ["place0000000", "place0000001",
"place0000002", "place0000003", "place0000004", "place0000005",
"place0000006", "place0000007", "place0000008", "place0000009"]);
// We should have fast-forwarded the last sync time.
- equal(engine.lastSync, lastSync + 15);
+ equal(await engine.getLastSync(), lastSync + 15);
engine.lastModified = collection.modified;
ping = await sync_engine_and_validate_telem(engine, false);
ok(!ping.engines[0].incoming);
// After the second sync, our backlog still contains the same GUIDs: we
// weren't able to make progress on fetching them, since our
// `guidFetchBatchSize` is 0.
@@ -103,17 +103,17 @@ add_task(async function test_history_dow
engine.lastModified = collection.modified;
ping = await sync_engine_and_validate_telem(engine, false);
deepEqual(ping.engines[0].incoming, { applied: 1 });
// Our backlog should remain the same.
let backlogAfterThirdSync = Array.from(engine.toFetch).sort();
deepEqual(backlogAfterSecondSync, backlogAfterThirdSync);
- equal(engine.lastSync, lastSync + 20);
+ equal(await engine.getLastSync(), lastSync + 20);
// Bump the fetch batch size to let the backlog make progress. We should
// make 3 requests to fetch 5 backlogged GUIDs.
engine.guidFetchBatchSize = 2;
engine.lastModified = collection.modified;
ping = await sync_engine_and_validate_telem(engine, false);
deepEqual(ping.engines[0].incoming, { applied: 5 });
@@ -171,17 +171,17 @@ add_task(async function test_history_vis
// exist.
cleartext.visits.push({
date: (Date.now() - oneHourMS / 2) * 1000,
type: PlacesUtils.history.TRANSITIONS.LINK
});
}, Date.now() / 1000 + 10);
// Force a remote sync.
- engine.lastSync = Date.now() / 1000 - 30;
+ await engine.setLastSync(Date.now() / 1000 - 30);
await sync_engine_and_validate_telem(engine, false);
// Make sure that we didn't duplicate the visit when inserting. (Prior to bug
// 1423395, we would insert a duplicate visit, where the timestamp was
// effectively `Math.round(microsecondTimestamp / 1000) * 1000`.)
visits = await PlacesSyncUtils.history.fetchVisitsForURL("https://www.example.com");
equal(visits.length, 2);
@@ -235,17 +235,17 @@ add_task(async function test_history_vis
type: PlacesUtils.history.TRANSITIONS.LINK
}, {
date: Date.UTC(2017, 11, 5) * 1000, // Dec 5, 2017
type: PlacesUtils.history.TRANSITIONS.LINK
}
);
}, Date.now() / 1000 + 10);
- engine.lastSync = Date.now() / 1000 - 30;
+ await engine.setLastSync(Date.now() / 1000 - 30);
await sync_engine_and_validate_telem(engine, false);
allVisits = (await PlacesUtils.history.fetch("https://www.example.com", {
includeVisits: true
})).visits;
equal(allVisits.length, 28);
ok(allVisits.find(x => x.date.getTime() === Date.UTC(2017, 11, 4)),
--- a/services/sync/tests/unit/test_hmac_error.js
+++ b/services/sync/tests/unit/test_hmac_error.js
@@ -17,28 +17,30 @@ var hmacErrorCount = 0;
})();
async function shared_setup() {
enableValidationPrefs();
syncTestLogging();
hmacErrorCount = 0;
+ let clientsEngine = Service.clientsEngine;
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
+
// Make sure RotaryEngine is the only one we sync.
- let { engine, tracker } = await registerRotaryEngine();
- engine.lastSync = 123; // Needs to be non-zero so that tracker is queried.
+ let { engine, syncID, tracker } = await registerRotaryEngine();
+ await engine.setLastSync(123); // Needs to be non-zero so that tracker is queried.
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman"};
await tracker.addChangedID("scotsman", 0);
Assert.equal(1, Service.engineManager.getEnabled().length);
- let engines = {rotary: {version: engine.version,
- syncID: engine.syncID},
- clients: {version: Service.clientsEngine.version,
- syncID: Service.clientsEngine.syncID}};
+ let engines = {rotary: {version: engine.version, syncID},
+ clients: {version: clientsEngine.version,
+ syncID: clientsSyncID}};
// Common server objects.
let global = new ServerWBO("global", {engines});
let keysWBO = new ServerWBO("keys");
let rotaryColl = new ServerCollection({}, true);
let clientsColl = new ServerCollection({}, true);
return [engine, rotaryColl, clientsColl, keysWBO, global, tracker];
@@ -78,17 +80,17 @@ add_task(async function hmac_error_durin
await Service.login();
try {
_("Syncing.");
await sync_and_validate_telem();
_("Partially resetting client, as if after a restart, and forcing redownload.");
Service.collectionKeys.clear();
- engine.lastSync = 0; // So that we redownload records.
+ await engine.setLastSync(0); // So that we redownload records.
key404Counter = 1;
_("---------------------------");
await sync_and_validate_telem();
_("---------------------------");
// Two rotary items, one client record... no errors.
Assert.equal(hmacErrorCount, 0);
} finally {
@@ -201,40 +203,38 @@ add_task(async function hmac_error_durin
_("== Second (automatic) sync done.");
let hasData = rotaryColl.wbo("flying") ||
rotaryColl.wbo("scotsman");
let hasKeys = keysWBO.modified;
Assert.ok(!hasData == !hasKeys);
// Kick off another sync. Can't just call it, because we're inside the
// lock...
- CommonUtils.nextTick(function() {
+ (async () => {
+ await Async.promiseYield();
_("Now a fresh sync will get no HMAC errors.");
_("Partially resetting client, as if after a restart, and forcing redownload.");
Service.collectionKeys.clear();
- engine.lastSync = 0;
+ await engine.setLastSync(0);
hmacErrorCount = 0;
onSyncFinished = async function() {
// Two rotary items, one client record... no errors.
Assert.equal(hmacErrorCount, 0);
Svc.Obs.remove("weave:service:sync:finish", obs);
Svc.Obs.remove("weave:service:sync:error", obs);
- (async () => {
- await tracker.clearChangedIDs();
- await Service.engineManager.unregister(engine);
- Svc.Prefs.resetBranch("");
- Service.recordManager.clearCache();
- server.stop(resolve);
- })();
+ await tracker.clearChangedIDs();
+ await Service.engineManager.unregister(engine);
+ Svc.Prefs.resetBranch("");
+ Service.recordManager.clearCache();
+ server.stop(resolve);
};
Service.sync();
- },
- this);
+ })().catch(Cu.reportError);
};
};
});
await onwards();
await callbacksPromise;
});
--- a/services/sync/tests/unit/test_interval_triggers.js
+++ b/services/sync/tests/unit/test_interval_triggers.js
@@ -8,22 +8,23 @@ ChromeUtils.import("resource://services-
ChromeUtils.import("resource://services-sync/util.js");
Svc.Prefs.set("registerEngines", "");
ChromeUtils.import("resource://services-sync/service.js");
let scheduler;
let clientsEngine;
-function sync_httpd_setup() {
+async function sync_httpd_setup() {
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
let global = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {clients: {version: clientsEngine.version,
- syncID: clientsEngine.syncID}}
+ syncID: clientsSyncID}}
});
let clientsColl = new ServerCollection({}, true);
// Tracking info/collections.
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
return httpd_setup({
@@ -60,17 +61,17 @@ add_task(async function test_successful_
_("Test successful sync calling adjustSyncInterval");
let syncSuccesses = 0;
function onSyncFinish() {
_("Sync success.");
syncSuccesses++;
}
Svc.Obs.add("weave:service:sync:finish", onSyncFinish);
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Confirm defaults
Assert.ok(!scheduler.idle);
Assert.equal(false, scheduler.numClients > 1);
Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
Assert.ok(!scheduler.hasIncomingItems);
@@ -166,17 +167,17 @@ add_task(async function test_unsuccessfu
syncFailures++;
}
Svc.Obs.add("weave:service:sync:error", onSyncError);
_("Test unsuccessful sync calls adjustSyncInterval");
// Force sync to fail.
Svc.Prefs.set("firstSync", "notReady");
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Confirm defaults
Assert.ok(!scheduler.idle);
Assert.equal(false, scheduler.numClients > 1);
Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
Assert.ok(!scheduler.hasIncomingItems);
@@ -261,17 +262,17 @@ add_task(async function test_unsuccessfu
await Service.startOver();
Svc.Obs.remove("weave:service:sync:error", onSyncError);
await promiseStopServer(server);
});
add_task(async function test_back_triggers_sync() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Single device: no sync triggered.
scheduler.idle = true;
scheduler.observe(null, "active", Svc.Prefs.get("scheduler.idleTime"));
Assert.ok(!scheduler.idle);
// Multiple devices: sync is triggered.
@@ -292,17 +293,17 @@ add_task(async function test_back_trigge
await Service.startOver();
await promiseStopServer(server);
});
add_task(async function test_adjust_interval_on_sync_error() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
let syncFailures = 0;
function onSyncError() {
_("Sync error.");
syncFailures++;
}
Svc.Obs.add("weave:service:sync:error", onSyncError);
@@ -330,17 +331,17 @@ add_task(async function test_adjust_inte
add_task(async function test_bug671378_scenario() {
enableValidationPrefs();
// Test scenario similar to bug 671378. This bug appeared when a score
// update occurred that wasn't large enough to trigger a sync so
// scheduleNextSync() was called without a time interval parameter,
// setting nextSync to a non-zero value and preventing the timer from
// being adjusted in the next call to scheduleNextSync().
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
let syncSuccesses = 0;
function onSyncFinish() {
_("Sync success.");
syncSuccesses++;
}
Svc.Obs.add("weave:service:sync:finish", onSyncFinish);
--- a/services/sync/tests/unit/test_node_reassignment.js
+++ b/services/sync/tests/unit/test_node_reassignment.js
@@ -129,24 +129,23 @@ async function syncAndExpectNodeReassign
add_task(async function test_momentary_401_engine() {
enableValidationPrefs();
_("Test a failure for engine URLs that's resolved by reassignment.");
let server = await prepareServer();
let john = server.user("johndoe");
_("Enabling the Rotary engine.");
- let { engine, tracker } = await registerRotaryEngine();
+ let { engine, syncID, tracker } = await registerRotaryEngine();
// We need the server to be correctly set up prior to experimenting. Do this
// through a sync.
let global = {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- rotary: {version: engine.version,
- syncID: engine.syncID}};
+ rotary: {version: engine.version, syncID}};
john.createCollection("meta").insert("global", global);
_("First sync to prepare server contents.");
await Service.sync();
_("Setting up Rotary collection to 401.");
let rotary = john.createCollection("rotary");
let oldHandler = rotary.collectionHandler;
@@ -362,34 +361,33 @@ add_task(async function test_loop_avoida
enableValidationPrefs();
_("Test that a repeated 401 in an engine doesn't result in a sync loop " +
"if node reassignment cannot resolve the failure.");
let server = await prepareServer();
let john = server.user("johndoe");
_("Enabling the Rotary engine.");
- let { engine, tracker } = await registerRotaryEngine();
+ let { engine, syncID, tracker } = await registerRotaryEngine();
let deferred = PromiseUtils.defer();
let getTokenCount = 0;
let mockTSC = { // TokenServerClient
getTokenFromBrowserIDAssertion(uri, assertion) {
getTokenCount++;
return {endpoint: server.baseURI + "1.1/johndoe/"};
},
};
Service.identity._tokenServerClient = mockTSC;
// We need the server to be correctly set up prior to experimenting. Do this
// through a sync.
let global = {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- rotary: {version: engine.version,
- syncID: engine.syncID}};
+ rotary: {version: engine.version, syncID}};
john.createCollection("meta").insert("global", global);
_("First sync to prepare server contents.");
await Service.sync();
_("Setting up Rotary collection to 401.");
let rotary = john.createCollection("rotary");
let oldHandler = rotary.collectionHandler;
--- a/services/sync/tests/unit/test_service_sync_remoteSetup.js
+++ b/services/sync/tests/unit/test_service_sync_remoteSetup.js
@@ -150,21 +150,22 @@ add_task(async function run_test() {
_("Checking that remoteSetup returns true.");
Assert.ok((await Service._remoteSetup()));
_("Verify that the meta record was uploaded.");
Assert.equal(meta_global.data.syncID, Service.syncID);
Assert.equal(meta_global.data.storageVersion, STORAGE_VERSION);
Assert.equal(meta_global.data.engines.clients.version, Service.clientsEngine.version);
- Assert.equal(meta_global.data.engines.clients.syncID, Service.clientsEngine.syncID);
+ Assert.equal(meta_global.data.engines.clients.syncID, await Service.clientsEngine.getSyncID());
_("Set the collection info hash so that sync() will remember the modified times for future runs.");
- collections.meta = Service.clientsEngine.lastSync;
- collections.clients = Service.clientsEngine.lastSync;
+ let lastSync = await Service.clientsEngine.getLastSync();
+ collections.meta = lastSync;
+ collections.clients = lastSync;
await Service.sync();
_("Sync again and verify that meta/global wasn't downloaded again");
meta_global.wasCalled = false;
await Service.sync();
Assert.ok(!meta_global.wasCalled);
_("Fake modified records. This will cause a redownload, but not reupload since it hasn't changed.");
--- a/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
+++ b/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
@@ -145,21 +145,21 @@ add_task(async function test_enabledLoca
});
add_task(async function test_disabledLocally() {
enableValidationPrefs();
_("Test: Engine is enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
+ let syncID = await engine.resetLocalSyncID();
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- engines: {steam: {syncID: engine.syncID,
- version: engine.version}}
+ engines: {steam: {syncID, version: engine.version}}
});
let steamCollection = new ServerWBO("steam", PAYLOAD);
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": steamCollection.handler()
});
await setUp(server);
@@ -189,21 +189,21 @@ add_task(async function test_disabledLoc
});
add_task(async function test_disabledLocally_wipe503() {
enableValidationPrefs();
_("Test: Engine is enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
+ let syncID = await engine.resetLocalSyncID();
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- engines: {steam: {syncID: engine.syncID,
- version: engine.version}}
+ engines: {steam: {syncID, version: engine.version}}
});
function service_unavailable(request, response) {
let body = "Service Unavailable";
response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
response.setHeader("Retry-After", "23");
response.bodyOutputStream.write(body, body.length);
}
@@ -232,21 +232,21 @@ add_task(async function test_disabledLoc
});
add_task(async function test_enabledRemotely() {
enableValidationPrefs();
_("Test: Engine is disabled locally and enabled on a remote client");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
+ let syncID = await engine.resetLocalSyncID();
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- engines: {steam: {syncID: engine.syncID,
- version: engine.version}}
+ engines: {steam: {syncID, version: engine.version}}
});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global":
upd("meta", metaWBO.handler()),
"/1.1/johndoe/storage/steam":
upd("steam", new ServerWBO("steam", {}).handler())
});
@@ -265,17 +265,17 @@ add_task(async function test_enabledRemo
_("Sync.");
await Service.sync();
_("Engine is enabled.");
Assert.ok(engine.enabled);
_("Meta record still present.");
- Assert.equal(metaWBO.data.engines.steam.syncID, engine.syncID);
+ Assert.equal(metaWBO.data.engines.steam.syncID, await engine.getSyncID());
} finally {
await Service.startOver();
await promiseStopServer(server);
}
});
add_task(async function test_disabledRemotelyTwoClients() {
enableValidationPrefs();
@@ -394,23 +394,25 @@ add_task(async function test_dependentEn
});
add_task(async function test_dependentEnginesDisabledLocally() {
enableValidationPrefs();
_("Test: Two dependent engines are enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let steamEngine = Service.engineManager.get("steam");
+ let steamSyncID = await steamEngine.resetLocalSyncID();
let stirlingEngine = Service.engineManager.get("stirling");
+ let stirlingSyncID = await stirlingEngine.resetLocalSyncID();
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
- engines: {steam: {syncID: steamEngine.syncID,
+ engines: {steam: {syncID: steamSyncID,
version: steamEngine.version},
- stirling: {syncID: stirlingEngine.syncID,
+ stirling: {syncID: stirlingSyncID,
version: stirlingEngine.version}}
});
let steamCollection = new ServerWBO("steam", PAYLOAD);
let stirlingCollection = new ServerWBO("stirling", PAYLOAD);
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
--- a/services/sync/tests/unit/test_syncengine.js
+++ b/services/sync/tests/unit/test_syncengine.js
@@ -69,53 +69,52 @@ add_task(async function test_url_attribu
add_task(async function test_syncID() {
_("SyncEngine.syncID corresponds to preference");
await SyncTestingInfrastructure(server);
let engine = await makeSteamEngine();
try {
// Ensure pristine environment
Assert.equal(Svc.Prefs.get("steam.syncID"), undefined);
+ Assert.equal(await engine.getSyncID(), "");
// Performing the first get on the attribute will generate a new GUID.
- Assert.equal(engine.syncID, "fake-guid-00");
+ Assert.equal(await engine.resetLocalSyncID(), "fake-guid-00");
Assert.equal(Svc.Prefs.get("steam.syncID"), "fake-guid-00");
Svc.Prefs.set("steam.syncID", Utils.makeGUID());
Assert.equal(Svc.Prefs.get("steam.syncID"), "fake-guid-01");
- Assert.equal(engine.syncID, "fake-guid-01");
+ Assert.equal(await engine.getSyncID(), "fake-guid-01");
} finally {
Svc.Prefs.resetBranch("");
}
});
add_task(async function test_lastSync() {
- _("SyncEngine.lastSync and SyncEngine.lastSyncLocal correspond to preferences");
+ _("SyncEngine.lastSync corresponds to preferences");
await SyncTestingInfrastructure(server);
let engine = await makeSteamEngine();
try {
// Ensure pristine environment
Assert.equal(Svc.Prefs.get("steam.lastSync"), undefined);
- Assert.equal(engine.lastSync, 0);
- Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
- Assert.equal(engine.lastSyncLocal, 0);
+ Assert.equal(await engine.getLastSync(), 0);
// Floats are properly stored as floats and synced with the preference
- engine.lastSync = 123.45;
- Assert.equal(engine.lastSync, 123.45);
+ await engine.setLastSync(123.45);
+ Assert.equal(await engine.getLastSync(), 123.45);
Assert.equal(Svc.Prefs.get("steam.lastSync"), "123.45");
// Integer is properly stored
- engine.lastSyncLocal = 67890;
- Assert.equal(engine.lastSyncLocal, 67890);
- Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), "67890");
+ await engine.setLastSync(67890);
+ Assert.equal(await engine.getLastSync(), 67890);
+ Assert.equal(Svc.Prefs.get("steam.lastSync"), "67890");
// resetLastSync() resets the value (and preference) to 0
- engine.resetLastSync();
- Assert.equal(engine.lastSync, 0);
+ await engine.resetLastSync();
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(Svc.Prefs.get("steam.lastSync"), "0");
} finally {
Svc.Prefs.resetBranch("");
}
});
add_task(async function test_toFetch() {
_("SyncEngine.toFetch corresponds to file on disk");
@@ -224,27 +223,24 @@ add_task(async function test_previousFai
add_task(async function test_resetClient() {
_("SyncEngine.resetClient resets lastSync and toFetch");
await SyncTestingInfrastructure(server);
let engine = await makeSteamEngine();
try {
// Ensure pristine environment
Assert.equal(Svc.Prefs.get("steam.lastSync"), undefined);
- Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
Assert.equal(engine.toFetch.size, 0);
- engine.lastSync = 123.45;
- engine.lastSyncLocal = 67890;
+ await engine.setLastSync(123.45);
engine.toFetch = guidSetOfSize(4);
engine.previousFailed = guidSetOfSize(3);
await engine.resetClient();
- Assert.equal(engine.lastSync, 0);
- Assert.equal(engine.lastSyncLocal, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(engine.toFetch.size, 0);
Assert.equal(engine.previousFailed.size, 0);
} finally {
Svc.Prefs.resetBranch("");
}
});
add_task(async function test_wipeServer() {
@@ -256,23 +252,23 @@ add_task(async function test_wipeServer(
let steamServer = httpd_setup({
"/1.1/foo/storage/steam": steamCollection.handler()
});
await SyncTestingInfrastructure(steamServer);
do_test_pending();
try {
// Some data to reset.
- engine.lastSync = 123.45;
+ await engine.setLastSync(123.45);
engine.toFetch = guidSetOfSize(3),
_("Wipe server data and reset client.");
await engine.wipeServer();
Assert.equal(steamCollection.payload, undefined);
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(engine.toFetch.size, 0);
} finally {
steamServer.stop(do_test_finished);
Svc.Prefs.resetBranch("");
}
});
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -30,20 +30,20 @@ async function cleanAndGo(engine, server
async function promiseClean(engine, server) {
await clean(engine);
await promiseStopServer(server);
}
async function createServerAndConfigureClient() {
let engine = new RotaryEngine(Service);
+ let syncID = await engine.resetLocalSyncID();
let contents = {
- meta: {global: {engines: {rotary: {version: engine.version,
- syncID: engine.syncID}}}},
+ meta: {global: {engines: {rotary: {version: engine.version, syncID}}}},
crypto: {},
rotary: {}
};
const USER = "foo";
let server = new SyncServer();
server.registerUser(USER, "password");
server.createContents(USER, contents);
@@ -100,30 +100,29 @@ add_task(async function test_syncStartup
// Confirm initial environment
const changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.rekolok, undefined);
let metaGlobal = await Service.recordManager.get(engine.metaURL);
Assert.equal(metaGlobal.payload.engines, undefined);
Assert.ok(!!collection.payload("flying"));
Assert.ok(!!collection.payload("scotsman"));
- engine.lastSync = Date.now() / 1000;
- engine.lastSyncLocal = Date.now();
+ await engine.setLastSync(Date.now() / 1000);
// Trying to prompt a wipe -- we no longer track CryptoMeta per engine,
// so it has nothing to check.
await engine._syncStartup();
// The meta/global WBO has been filled with data about the engine
let engineData = metaGlobal.payload.engines.rotary;
Assert.equal(engineData.version, engine.version);
- Assert.equal(engineData.syncID, engine.syncID);
+ Assert.equal(engineData.syncID, await engine.getSyncID());
// Sync was reset and server data was wiped
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(collection.payload("flying"), undefined);
Assert.equal(collection.payload("scotsman"), undefined);
} finally {
await cleanAndGo(engine, server);
}
});
@@ -168,29 +167,28 @@ add_task(async function test_syncStartup
let global = new ServerWBO("global",
{engines: {rotary: {version: engine.version,
syncID: "foobar"}}});
server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler());
try {
// Confirm initial environment
- Assert.equal(engine.syncID, "fake-guid-00");
+ Assert.equal(await engine.getSyncID(), "");
const changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.rekolok, undefined);
- engine.lastSync = Date.now() / 1000;
- engine.lastSyncLocal = Date.now();
+ await engine.setLastSync(Date.now() / 1000);
await engine._syncStartup();
// The engine has assumed the server's syncID
- Assert.equal(engine.syncID, "foobar");
+ Assert.equal(await engine.getSyncID(), "foobar");
// Sync was reset
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
} finally {
await cleanAndGo(engine, server);
}
});
add_task(async function test_processIncoming_emptyServer() {
@@ -203,17 +201,17 @@ add_task(async function test_processInco
await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
try {
// Merely ensure that this code path is run without any errors
await engine._processIncoming();
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
} finally {
await cleanAndGo(engine, server);
}
});
add_task(async function test_processIncoming_createFromServer() {
@@ -239,35 +237,35 @@ add_task(async function test_processInco
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
});
await SyncTestingInfrastructure(server);
await generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial environment
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(engine.lastModified, null);
Assert.equal(engine._store.items.flying, undefined);
Assert.equal(engine._store.items.scotsman, undefined);
Assert.equal(engine._store.items["../pathological"], undefined);
await engine._syncStartup();
await engine._processIncoming();
// Timestamps of last sync and last server modification are set.
- Assert.ok(engine.lastSync > 0);
+ Assert.ok((await engine.getLastSync()) > 0);
Assert.ok(engine.lastModified > 0);
// Local records have been created from the server data.
Assert.equal(engine._store.items.flying, "LNER Class A3 4472");
Assert.equal(engine._store.items.scotsman, "Flying Scotsman");
Assert.equal(engine._store.items["../pathological"], "Pathological Case");
} finally {
@@ -332,37 +330,37 @@ add_task(async function test_processInco
original: "Original Entry",
long_original: "Long Original Entry",
nukeme: "Nuke me!"};
// Make this record 1 min old, thus older than the one on the server
await engine._tracker.addChangedID("newerserver", Date.now() / 1000 - 60);
// This record has been changed 2 mins later than the one on the server
await engine._tracker.addChangedID("olderidentical", Date.now() / 1000);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial environment
Assert.equal(engine._store.items.newrecord, undefined);
Assert.equal(engine._store.items.newerserver, "New data, but not as new as server!");
Assert.equal(engine._store.items.olderidentical, "Older but identical");
Assert.equal(engine._store.items.updateclient, "Got data?");
Assert.equal(engine._store.items.nukeme, "Nuke me!");
let changes = await engine._tracker.getChangedIDs();
Assert.ok(changes.olderidentical > 0);
await engine._syncStartup();
await engine._processIncoming();
// Timestamps of last sync and last server modification are set.
- Assert.ok(engine.lastSync > 0);
+ Assert.ok((await engine.getLastSync()) > 0);
Assert.ok(engine.lastModified > 0);
// The new record is created.
Assert.equal(engine._store.items.newrecord, "New stuff...");
// The 'newerserver' record is updated since the server data is newer.
Assert.equal(engine._store.items.newerserver, "New data!");
@@ -390,17 +388,17 @@ add_task(async function test_processInco
add_task(async function test_processIncoming_reconcile_local_deleted() {
_("Ensure local, duplicate ID is deleted on server.");
// When a duplicate is resolved, the local ID (which is never taken) should
// be deleted on the server.
let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
- engine.lastSync = now;
+ await engine.setLastSync(now);
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
record = encryptPayload({id: "DUPE_LOCAL", denomination: "local"});
wbo = new ServerWBO("DUPE_LOCAL", record, now - 1);
@@ -423,17 +421,17 @@ add_task(async function test_processInco
});
add_task(async function test_processIncoming_reconcile_equivalent() {
_("Ensure proper handling of incoming records that match local.");
let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
- engine.lastSync = now;
+ await engine.setLastSync(now);
engine.lastModified = now + 1;
let record = encryptPayload({id: "entry", denomination: "denomination"});
let wbo = new ServerWBO("entry", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.items = {entry: "denomination"};
Assert.ok((await engine._store.itemExists("entry")));
@@ -450,17 +448,17 @@ add_task(async function test_processInco
// This is a somewhat complicated test. It ensures that if a client receives
// a modified record for an item that is deleted locally but with a different
// ID that the incoming record is ignored. This is a corner case for record
// handling, but it needs to be supported.
let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
- engine.lastSync = now;
+ await engine.setLastSync(now);
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
// Simulate a locally-deleted item.
engine._store.items = {};
@@ -489,17 +487,17 @@ add_task(async function test_processInco
_("Ensure locally deleted duplicate record older than incoming is restored.");
// This is similar to the above test except it tests the condition where the
// incoming record is newer than the local deletion, therefore overriding it.
let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
- engine.lastSync = now;
+ await engine.setLastSync(now);
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
// Simulate a locally-deleted item.
engine._store.items = {};
@@ -525,17 +523,17 @@ add_task(async function test_processInco
});
add_task(async function test_processIncoming_reconcile_changed_dupe() {
_("Ensure that locally changed duplicate record is handled properly.");
let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
- engine.lastSync = now;
+ await engine.setLastSync(now);
engine.lastModified = now + 1;
// The local record is newer than the incoming one, so it should be retained.
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
await engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
@@ -565,17 +563,17 @@ add_task(async function test_processInco
add_task(async function test_processIncoming_reconcile_changed_dupe_new() {
_("Ensure locally changed duplicate record older than incoming is ignored.");
// This test is similar to the above except the incoming record is younger
// than the local record. The incoming record should be authoritative.
let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
- engine.lastSync = now;
+ await engine.setLastSync(now);
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
await engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
await engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
@@ -625,30 +623,30 @@ add_task(async function test_processInco
}
collection.wbo("flying").modified =
collection.wbo("scotsman").modified = LASTSYNC - 10;
collection._wbos.rekolok.modified = LASTSYNC + 10;
// Time travel 10 seconds into the future but still download the above WBOs.
let engine = makeRotaryEngine();
- engine.lastSync = LASTSYNC;
+ await engine.setLastSync(LASTSYNC);
engine.toFetch = new SerializableSet(["flying", "scotsman"]);
engine.previousFailed = new SerializableSet(["failed0", "failed1", "failed2"]);
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial environment
Assert.equal(engine._store.items.flying, undefined);
Assert.equal(engine._store.items.scotsman, undefined);
Assert.equal(engine._store.items.rekolok, undefined);
await engine._syncStartup();
@@ -695,23 +693,23 @@ add_task(async function test_processInco
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial environment.
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(engine.toFetch.size, 0);
Assert.equal(engine.previousFailed.size, 0);
do_check_empty(engine._store.items);
let called = 0;
let counts;
function onApplied(count) {
_("Called with " + JSON.stringify(counts));
@@ -784,23 +782,23 @@ add_task(async function test_processInco
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial environment.
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(engine.toFetch.size, 0);
Assert.equal(engine.previousFailed.size, 0);
do_check_empty(engine._store.items);
// Initial failed items in previousFailed to be reset.
let previousFailed = new SerializableSet([Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()]);
engine.previousFailed = previousFailed;
Assert.equal(engine.previousFailed, previousFailed);
@@ -892,25 +890,25 @@ add_task(async function test_processInco
};
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": recording_handler(collection)
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial environment
- Assert.equal(engine.lastSync, 0);
+ Assert.equal(await engine.getLastSync(), 0);
Assert.equal(engine.toFetch.size, 0);
Assert.equal(engine.previousFailed.size, 0);
do_check_empty(engine._store.items);
let observerSubject;
let observerData;
Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
@@ -993,35 +991,35 @@ add_task(async function test_processInco
nodecrypt: "Valid ciphertext"};
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
// Confirm initial state
Assert.equal(engine.toFetch.size, 0);
Assert.equal(engine.previousFailed.size, 0);
let observerSubject;
let observerData;
Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
observerSubject = subject;
observerData = data;
});
- engine.lastSync = collection.wbo("nojson").modified - 1;
+ await engine.setLastSync(collection.wbo("nojson").modified - 1);
let ping = await sync_engine_and_validate_telem(engine, true);
Assert.equal(ping.engines[0].incoming.applied, 2);
Assert.equal(ping.engines[0].incoming.failed, 4);
Assert.equal(ping.engines[0].incoming.newFailed, 4);
Assert.equal(engine.previousFailed.size, 4);
Assert.ok(engine.previousFailed.has("nojson"));
Assert.ok(engine.previousFailed.has("nojson2"));
@@ -1051,40 +1049,36 @@ add_task(async function test_uploadOutgo
"/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
});
await SyncTestingInfrastructure(server);
await generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
- engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman"};
// Mark one of these records as changed
await engine._tracker.addChangedID("scotsman", 0);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
// Confirm initial environment
- Assert.equal(engine.lastSyncLocal, 0);
Assert.equal(collection.payload("flying"), undefined);
Assert.equal(collection.payload("scotsman"), undefined);
await engine._syncStartup();
await engine._uploadOutgoing();
- // Local timestamp has been set.
- Assert.ok(engine.lastSyncLocal > 0);
-
// Ensure the marked record ('scotsman') has been uploaded and is
// no longer marked.
Assert.equal(collection.payload("flying"), undefined);
Assert.ok(!!collection.payload("scotsman"));
Assert.equal(collection.cleartext("scotsman").id, "scotsman");
const changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.scotsman, undefined);
@@ -1108,30 +1102,30 @@ async function test_uploadOutgoing_max_r
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler(),
});
await SyncTestingInfrastructure(server);
await generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
engine.allowSkippedRecord = allowSkippedRecord;
- engine.lastSync = 1;
engine._store.items = { flying: "a".repeat(1024 * 1024), scotsman: "abcd" };
await engine._tracker.addChangedID("flying", 1000);
await engine._tracker.addChangedID("scotsman", 1000);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
+ await engine.setLastSync(1); // needs to be non-zero so that tracker is queried
+
// Confirm initial environment
- Assert.equal(engine.lastSyncLocal, 0);
Assert.equal(collection.payload("flying"), undefined);
Assert.equal(collection.payload("scotsman"), undefined);
await engine._syncStartup();
await engine._uploadOutgoing();
if (!allowSkippedRecord) {
do_throw("should not get here");
@@ -1183,49 +1177,45 @@ add_task(async function test_uploadOutgo
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
- engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman",
peppercorn: "Peppercorn Class"};
// Mark these records as changed
const FLYING_CHANGED = 12345;
const SCOTSMAN_CHANGED = 23456;
const PEPPERCORN_CHANGED = 34567;
await engine._tracker.addChangedID("flying", FLYING_CHANGED);
await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
await engine._tracker.addChangedID("peppercorn", PEPPERCORN_CHANGED);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
// Confirm initial environment
- Assert.equal(engine.lastSyncLocal, 0);
Assert.equal(collection.payload("flying"), undefined);
let changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.flying, FLYING_CHANGED);
Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
Assert.equal(changes.peppercorn, PEPPERCORN_CHANGED);
engine.enabled = true;
await sync_engine_and_validate_telem(engine, true);
- // Local timestamp has been set.
- Assert.ok(engine.lastSyncLocal > 0);
-
// Ensure the 'flying' record has been uploaded and is no longer marked.
Assert.ok(!!collection.payload("flying"));
changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.flying, undefined);
// The 'scotsman' and 'peppercorn' records couldn't be uploaded so
// they weren't cleared from the tracker.
Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
@@ -1252,34 +1242,34 @@ async function createRecordFailTelemetry
engine.allowSkippedRecord = allowSkippedRecord;
let oldCreateRecord = engine._store.createRecord;
engine._store.createRecord = async (id, col) => {
if (id != "flying") {
throw new Error("oops");
}
return oldCreateRecord.call(engine._store, id, col);
};
- engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman"};
// Mark these records as changed
const FLYING_CHANGED = 12345;
const SCOTSMAN_CHANGED = 23456;
await engine._tracker.addChangedID("flying", FLYING_CHANGED);
await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
let ping;
try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
+
// Confirm initial environment
- Assert.equal(engine.lastSyncLocal, 0);
Assert.equal(collection.payload("flying"), undefined);
let changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.flying, FLYING_CHANGED);
Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
engine.enabled = true;
ping = await sync_engine_and_validate_telem(engine, true, onErrorPing => {
ping = onErrorPing;
@@ -1298,19 +1288,16 @@ async function createRecordFailTelemetry
do_throw("should not get here");
}
// Ensure the 'flying' record has not been uploaded and is still marked
Assert.ok(!collection.payload("flying"));
const changes = await engine._tracker.getChangedIDs();
Assert.ok(changes.flying);
} finally {
- // Local timestamp has been set.
- Assert.ok(engine.lastSyncLocal > 0);
-
// We reported in telemetry that we failed a record
Assert.equal(ping.engines[0].outgoing[0].failed, 1);
// In any case, the 'scotsman' record couldn't be created so it wasn't
// uploaded nor it was not cleared from the tracker.
Assert.ok(!collection.payload("scotsman"));
const changes = await engine._tracker.getChangedIDs();
Assert.equal(changes.scotsman, SCOTSMAN_CHANGED);
@@ -1336,21 +1323,20 @@ add_task(async function test_uploadOutgo
let collection = new ServerCollection();
let engine = makeRotaryEngine();
engine.allowSkippedRecord = false;
engine._store.items["large-item"] = "Y".repeat(Service.getMaxRecordPayloadSize() * 2);
await engine._tracker.addChangedID("large-item", 0);
collection.insert("large-item");
-
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
try {
@@ -1487,18 +1473,16 @@ add_task(async function test_sync_partia
let oldServerConfiguration = Service.serverConfiguration;
Service.serverConfiguration = {
max_post_records: 100
};
await SyncTestingInfrastructure(server);
await generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
- engine.lastSync = 123; // needs to be non-zero so that tracker is queried
- engine.lastSyncLocal = 456;
// Let the third upload fail completely
var noOfUploads = 0;
collection.post = (function(orig) {
return function() {
if (noOfUploads == 2)
throw new Error("FAIL!");
noOfUploads++;
@@ -1512,36 +1496,34 @@ add_task(async function test_sync_partia
engine._store.items[id] = "Record No. " + i;
await engine._tracker.addChangedID(id, i);
// Let two items in the first upload batch fail.
if ((i != 23) && (i != 42)) {
collection.insert(id);
}
}
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
engine.enabled = true;
let error;
try {
await sync_engine_and_validate_telem(engine, true);
} catch (ex) {
error = ex;
}
ok(!!error);
- // The timestamp has been updated.
- Assert.ok(engine.lastSyncLocal > 456);
-
const changes = await engine._tracker.getChangedIDs();
for (let i = 0; i < 234; i++) {
let id = "record-no-" + i;
// Ensure failed records are back in the tracker:
// * records no. 23 and 42 were rejected by the server,
// * records after the third batch and higher couldn't be uploaded because
// we failed hard on the 3rd upload.
if ((i == 23) || (i == 42) || (i >= 200))
@@ -1622,20 +1604,20 @@ add_task(async function test_syncapplied
}
let server = httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
let numApplyCalls = 0;
let engine_name;
let count;
function onApplied(subject, data) {
numApplyCalls++;
engine_name = data;
count = subject;
--- a/services/sync/tests/unit/test_syncscheduler.js
+++ b/services/sync/tests/unit/test_syncscheduler.js
@@ -21,22 +21,23 @@ CatapultEngine.prototype = {
async _sync() {
throw this.exception;
}
};
var scheduler = new SyncScheduler(Service);
let clientsEngine;
-function sync_httpd_setup() {
+async function sync_httpd_setup() {
+ let clientsSyncID = await clientsEngine.resetLocalSyncID();
let global = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {clients: {version: clientsEngine.version,
- syncID: clientsEngine.syncID}}
+ syncID: clientsSyncID}}
});
let clientsColl = new ServerCollection({}, true);
// Tracking info/collections.
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
return httpd_setup({
@@ -208,17 +209,17 @@ add_task(async function test_masterpassw
};
let oldVerifyLogin = Service.verifyLogin;
Service.verifyLogin = async function() {
Status.login = MASTER_PASSWORD_LOCKED;
return false;
};
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
await Service.sync();
Assert.ok(loginFailed);
Assert.equal(Status.login, MASTER_PASSWORD_LOCKED);
Assert.ok(rescheduleInterval);
@@ -250,17 +251,17 @@ add_task(async function test_calculateBa
await cleanUpAndGo();
});
add_task(async function test_scheduleNextSync_nowOrPast() {
enableValidationPrefs();
let promiseObserved = promiseOneObserver("weave:service:sync:finish");
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// We're late for a sync...
scheduler.scheduleNextSync(-1);
await promiseObserved;
await cleanUpAndGo(server);
});
@@ -365,17 +366,17 @@ add_task(async function test_scheduleNex
Assert.equal(scheduler.syncTimer.delay, Status.backoffInterval);
await cleanUpAndGo();
});
add_task(async function test_handleSyncError() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Force sync to fail.
Svc.Prefs.set("firstSync", "notReady");
_("Ensure expected initial environment.");
Assert.equal(scheduler._syncErrors, 0);
Assert.ok(!Status.enforceBackoff);
@@ -430,17 +431,17 @@ add_task(async function test_handleSyncE
scheduler.scheduleNextSync(-1);
await promiseObserved;
await cleanUpAndGo(server);
});
add_task(async function test_client_sync_finish_updateClientMode() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Confirm defaults.
Assert.equal(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
Assert.equal(scheduler.syncInterval, scheduler.singleDeviceInterval);
Assert.ok(!scheduler.idle);
// Trigger a change in interval & threshold by adding a client.
@@ -474,17 +475,17 @@ add_task(async function test_client_sync
});
add_task(async function test_autoconnect_nextSync_past() {
enableValidationPrefs();
let promiseObserved = promiseOneObserver("weave:service:sync:finish");
// nextSync will be 0 by default, so it's way in the past.
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
scheduler.delayedAutoConnect(0);
await promiseObserved;
await cleanUpAndGo(server);
});
add_task(async function test_autoconnect_nextSync_future() {
@@ -509,17 +510,17 @@ add_task(async function test_autoconnect
Assert.equal(scheduler.nextSync, expectedSync);
Assert.ok(scheduler.syncTimer.delay >= expectedInterval);
Svc.Obs.remove("weave:service:login:start", onLoginStart);
await cleanUpAndGo();
});
add_task(async function test_autoconnect_mp_locked() {
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Pretend user did not unlock master password.
let origLocked = Utils.mpLocked;
Utils.mpLocked = () => true;
let origEnsureMPUnlocked = Utils.ensureMPUnlocked;
@@ -551,17 +552,17 @@ add_task(async function test_autoconnect
Utils.mpLocked = origLocked;
Utils.ensureMPUnlocked = origEnsureMPUnlocked;
Service.identity._fxaService = origFxA;
await cleanUpAndGo(server);
});
add_task(async function test_no_autoconnect_during_wizard() {
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Simulate the Sync setup wizard.
Svc.Prefs.set("firstSync", "notReady");
// Ensure we don't actually try to sync (or log in for that matter).
function onLoginStart() {
do_throw("Should not get here!");
@@ -570,17 +571,17 @@ add_task(async function test_no_autoconn
scheduler.delayedAutoConnect(0);
await promiseZeroTimer();
Svc.Obs.remove("weave:service:login:start", onLoginStart);
await cleanUpAndGo(server);
});
add_task(async function test_no_autoconnect_status_not_ok() {
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
Status.__authManager = Service.identity = new BrowserIDManager();
// Ensure we don't actually try to sync (or log in for that matter).
function onLoginStart() {
do_throw("Should not get here!");
}
Svc.Obs.add("weave:service:login:start", onLoginStart);
@@ -596,17 +597,17 @@ add_task(async function test_no_autoconn
add_task(async function test_autoconnectDelay_pref() {
enableValidationPrefs();
let promiseObserved = promiseOneObserver("weave:service:sync:finish");
Svc.Prefs.set("autoconnectDelay", 1);
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
Svc.Obs.notify("weave:service:ready");
// autoconnectDelay pref is multiplied by 1000.
Assert.equal(scheduler._autoTimer.delay, 1000);
Assert.equal(Status.service, STATUS_OK);
await promiseObserved;
@@ -711,17 +712,17 @@ add_task(async function test_back_deboun
await cleanUpAndGo();
});
add_task(async function test_no_sync_node() {
enableValidationPrefs();
// Test when Status.sync == NO_SYNC_NODE_FOUND
// it is not overwritten on sync:finish
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
let oldfc = Service.identity._findCluster;
Service.identity._findCluster = () => null;
Service.clusterURL = "";
try {
await Service.sync();
Assert.equal(Status.sync, NO_SYNC_NODE_FOUND);
@@ -733,17 +734,17 @@ add_task(async function test_no_sync_nod
}
});
add_task(async function test_sync_failed_partial_500s() {
enableValidationPrefs();
_("Test a 5xx status calls handleSyncError.");
scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
let engine = Service.engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 500};
Assert.equal(Status.sync, SYNC_SUCCEEDED);
Assert.ok(await setUp(server));
@@ -759,17 +760,17 @@ add_task(async function test_sync_failed
Assert.ok(scheduler.nextSync <= (Date.now() + maxInterval));
Assert.ok(scheduler.syncTimer.delay <= maxInterval);
await cleanUpAndGo(server);
});
add_task(async function test_sync_failed_partial_noresync() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
let engine = Service.engineManager.get("catapult");
engine.enabled = true;
engine.exception = "Bad news";
engine._tracker._score = 10;
Assert.equal(Status.sync, SYNC_SUCCEEDED);
@@ -793,17 +794,17 @@ add_task(async function test_sync_failed
await cleanUpAndGo(server);
});
add_task(async function test_sync_failed_partial_400s() {
enableValidationPrefs();
_("Test a non-5xx status doesn't call handleSyncError.");
scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
let engine = Service.engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 400};
// Have multiple devices for an active interval.
await clientsEngine._store.create(
{ id: "foo", cleartext: { os: "mobile", version: "0.01", type: "desktop" } }
@@ -825,17 +826,17 @@ add_task(async function test_sync_failed
Assert.ok(scheduler.syncTimer.delay <= scheduler.activeInterval);
await cleanUpAndGo(server);
});
add_task(async function test_sync_X_Weave_Backoff() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Use an odd value on purpose so that it doesn't happen to coincide with one
// of the sync intervals.
const BACKOFF = 7337;
// Extend info/collections so that we can put it into server maintenance mode.
const INFO_COLLECTIONS = "/1.1/johndoe@mozilla.com/info/collections";
@@ -884,17 +885,17 @@ add_task(async function test_sync_X_Weav
Assert.ok(scheduler.syncTimer.delay >= minimumExpectedDelay);
await cleanUpAndGo(server);
});
add_task(async function test_sync_503_Retry_After() {
enableValidationPrefs();
- let server = sync_httpd_setup();
+ let server = await sync_httpd_setup();
await setUp(server);
// Use an odd value on purpose so that it doesn't happen to coincide with one
// of the sync intervals.
const BACKOFF = 7337;
// Extend info/collections so that we can put it into server maintenance mode.
const INFO_COLLECTIONS = "/1.1/johndoe@mozilla.com/info/collections";
--- a/services/sync/tests/unit/test_tab_engine.js
+++ b/services/sync/tests/unit/test_tab_engine.js
@@ -69,20 +69,20 @@ add_task(async function test_tab_engine_
_("Setting up Sync server");
let server = sync_httpd_setup({
"/1.1/foo/storage/tabs": collection.handler()
});
await SyncTestingInfrastructure(server);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {tabs: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {tabs: {version: engine.version, syncID}};
await generateNewKeys(Service.collectionKeys);
let promiseFinished = new Promise(resolve => {
let syncFinish = engine._syncFinish;
engine._syncFinish = async function() {
equal(applied.length, 1, "Remote client record was applied");
equal(applied[0].id, remoteID, "Remote client ID matches");
--- a/services/sync/tests/unit/test_telemetry.js
+++ b/services/sync/tests/unit/test_telemetry.js
@@ -116,17 +116,17 @@ add_task(async function test_processInco
// network error which in turn provokes an exception in _processIncoming.
const BOGUS_GUID = "zzzzzzzzzzzz";
let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
bogus_record.get = function get() {
throw new Error("Sync this!");
};
// Make the 10 minutes old so it will only be synced in the toFetch phase.
bogus_record.modified = Date.now() / 1000 - 60 * 10;
- engine.lastSync = Date.now() / 1000 - 60;
+ await engine.setLastSync(Date.now() / 1000 - 60);
engine.toFetch = new SerializableSet([BOGUS_GUID]);
let error, pingPayload, fullPing;
try {
await sync_engine_and_validate_telem(engine, true, (errPing, fullErrPing) => {
pingPayload = errPing;
fullPing = fullErrPing;
});
@@ -205,45 +205,44 @@ add_task(async function test_upload_fail
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
await configureIdentity({ username: "foo" }, server);
let engine = new RotaryEngine(Service);
- engine.lastSync = 123; // needs to be non-zero so that tracker is queried
- engine.lastSyncLocal = 456;
engine._store.items = {
flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman",
peppercorn: "Peppercorn Class"
};
const FLYING_CHANGED = 12345;
const SCOTSMAN_CHANGED = 23456;
const PEPPERCORN_CHANGED = 34567;
await engine._tracker.addChangedID("flying", FLYING_CHANGED);
await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
await engine._tracker.addChangedID("peppercorn", PEPPERCORN_CHANGED);
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL, new WBORecord(engine.metaURL));
- meta_global.payload.engines = { rotary: { version: engine.version, syncID: engine.syncID } };
+ meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
try {
+ await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
let changes = await engine._tracker.getChangedIDs();
_(`test_upload_failed: Rotary tracker contents at first sync: ${
JSON.stringify(changes)}`);
engine.enabled = true;
let ping = await sync_engine_and_validate_telem(engine, true);
ok(!!ping);
equal(ping.engines.length, 1);
equal(ping.engines[0].incoming, null);
deepEqual(ping.engines[0].outgoing, [{ sent: 3, failed: 2 }]);
- engine.lastSync = 123;
- engine.lastSyncLocal = 456;
+ await engine.setLastSync(123);
changes = await engine._tracker.getChangedIDs();
_(`test_upload_failed: Rotary tracker contents at second sync: ${
JSON.stringify(changes)}`);
ping = await sync_engine_and_validate_telem(engine, true);
ok(!!ping);
equal(ping.engines.length, 1);
equal(ping.engines[0].incoming.reconciled, 1);
@@ -259,35 +258,33 @@ add_task(async function test_sync_partia
let collection = new ServerCollection();
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
await SyncTestingInfrastructure(server);
await generateNewKeys(Service.collectionKeys);
let engine = new RotaryEngine(Service);
- engine.lastSync = 123;
- engine.lastSyncLocal = 456;
-
+ await engine.setLastSync(123);
// Create a bunch of records (and server side handlers)
for (let i = 0; i < 234; i++) {
let id = "record-no-" + i;
engine._store.items[id] = "Record No. " + i;
await engine._tracker.addChangedID(id, i);
// Let two items in the first upload batch fail.
if (i != 23 && i != 42) {
collection.insert(id);
}
}
+ let syncID = await engine.resetLocalSyncID();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
+ meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
try {
let changes = await engine._tracker.getChangedIDs();
_(`test_sync_partialUpload: Rotary tracker contents at first sync: ${
JSON.stringify(changes)}`);
engine.enabled = true;
let ping = await sync_engine_and_validate_telem(engine, true);
@@ -300,18 +297,17 @@ add_task(async function test_sync_partia
deepEqual(ping.engines[0].outgoing, [{ sent: 234, failed: 2 }]);
collection.post = function() { throw new Error("Failure"); };
engine._store.items["record-no-1000"] = "Record No. 1000";
await engine._tracker.addChangedID("record-no-1000", 1000);
collection.insert("record-no-1000", 1000);
- engine.lastSync = 123;
- engine.lastSyncLocal = 456;
+ await engine.setLastSync(123);
ping = null;
changes = await engine._tracker.getChangedIDs();
_(`test_sync_partialUpload: Rotary tracker contents at second sync: ${
JSON.stringify(changes)}`);
try {
// should throw
await sync_engine_and_validate_telem(engine, true, errPing => ping = errPing);