--- a/services/sync/modules-testing/utils.js
+++ b/services/sync/modules-testing/utils.js
@@ -255,67 +255,61 @@ this.configureIdentity = async function(
let config = makeIdentityConfig(identityOverrides, server);
let ns = {};
Cu.import("resource://services-sync/service.js", ns);
if (server) {
ns.Service.serverURL = server.baseURI;
}
+ ns.Service._clusterManager = ns.Service.identity.createClusterManager(ns.Service);
+
if (ns.Service.identity instanceof BrowserIDManager) {
// do the FxAccounts thang...
// If a server was specified, ensure FxA has a correct cluster URL available.
if (server && !config.fxaccount.token.endpoint) {
let ep = server.baseURI;
if (!ep.endsWith("/")) {
ep += "/";
}
ep += "1.1/" + config.username + "/";
config.fxaccount.token.endpoint = ep;
}
configureFxAccountIdentity(ns.Service.identity, config);
await ns.Service.identity.initializeWithCurrentIdentity();
- // need to wait until this identity manager is readyToAuthenticate.
- await ns.Service.identity.whenReadyToAuthenticate.promise;
+ // and cheat to avoid requiring each test do an explicit login - give it
+ // a cluster URL.
+ if (config.fxaccount.token.endpoint) {
+ ns.Service.clusterURL = config.fxaccount.token.endpoint;
+ }
return;
}
// old style identity provider.
+ if (server) {
+ ns.Service.clusterURL = server.baseURI + "/";
+ }
+ ns.Service.identity.username = config.username;
+ ns.Service._updateCachedURLs();
setBasicCredentials(config.username, config.sync.password, config.sync.syncKey);
}
-this.SyncTestingInfrastructure = function (server, username, password, syncKey) {
+this.SyncTestingInfrastructure = async function (server, username, password) {
let ns = {};
Cu.import("resource://services-sync/service.js", ns);
- ensureLegacyIdentityManager();
- let config = makeIdentityConfig();
- // XXX - hacks for the sync identity provider.
- if (username)
- config.username = username;
- if (password)
- config.sync.password = password;
- if (syncKey)
- config.sync.syncKey = syncKey;
- let cb = Async.makeSpinningCallback();
- configureIdentity(config).then(cb, cb);
- cb.wait();
-
- let i = server.identity;
- let uri = i.primaryScheme + "://" + i.primaryHost + ":" +
- i.primaryPort + "/";
-
- ns.Service.serverURL = uri;
- ns.Service.clusterURL = uri;
-
- this.logStats = initTestLogging();
- this.fakeFilesystem = new FakeFilesystemService({});
- this.fakeGUIDService = new FakeGUIDService();
- this.fakeCryptoService = new FakeCryptoService();
+ let config = makeIdentityConfig({ username, password });
+ await configureIdentity(config, server);
+ return {
+ logStats: initTestLogging(),
+ fakeFilesystem: new FakeFilesystemService({}),
+ fakeGUIDService: new FakeGUIDService(),
+ fakeCryptoService: new FakeCryptoService(),
+ }
}
/**
* Turn WBO cleartext into fake "encrypted" payload as it goes over the wire.
*/
this.encryptPayload = function encryptPayload(cleartext) {
if (typeof cleartext == "object") {
cleartext = JSON.stringify(cleartext);
--- a/services/sync/tests/unit/head_errorhandler_common.js
+++ b/services/sync/tests/unit/head_errorhandler_common.js
@@ -38,35 +38,35 @@ const EHTestsCommon = {
upd("crypto", (new ServerWBO("keys")).handler()),
"/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
// Credentials are wrong or node reallocated.
"/1.1/janedoe/storage/meta/global": handler_401,
"/1.1/janedoe/info/collections": handler_401,
// Maintenance or overloaded (503 + Retry-After) at info/collections.
- "/maintenance/1.1/broken.info/info/collections": EHTestsCommon.service_unavailable,
+ "/1.1/broken.info/info/collections": EHTestsCommon.service_unavailable,
// Maintenance or overloaded (503 + Retry-After) at meta/global.
- "/maintenance/1.1/broken.meta/storage/meta/global": EHTestsCommon.service_unavailable,
- "/maintenance/1.1/broken.meta/info/collections": collectionsHelper.handler,
+ "/1.1/broken.meta/storage/meta/global": EHTestsCommon.service_unavailable,
+ "/1.1/broken.meta/info/collections": collectionsHelper.handler,
// Maintenance or overloaded (503 + Retry-After) at crypto/keys.
- "/maintenance/1.1/broken.keys/storage/meta/global": upd("meta", global.handler()),
- "/maintenance/1.1/broken.keys/info/collections": collectionsHelper.handler,
- "/maintenance/1.1/broken.keys/storage/crypto/keys": EHTestsCommon.service_unavailable,
+ "/1.1/broken.keys/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/broken.keys/info/collections": collectionsHelper.handler,
+ "/1.1/broken.keys/storage/crypto/keys": EHTestsCommon.service_unavailable,
// Maintenance or overloaded (503 + Retry-After) at wiping collection.
- "/maintenance/1.1/broken.wipe/info/collections": collectionsHelper.handler,
- "/maintenance/1.1/broken.wipe/storage/meta/global": upd("meta", global.handler()),
- "/maintenance/1.1/broken.wipe/storage/crypto/keys":
+ "/1.1/broken.wipe/info/collections": collectionsHelper.handler,
+ "/1.1/broken.wipe/storage/meta/global": upd("meta", global.handler()),
+ "/1.1/broken.wipe/storage/crypto/keys":
upd("crypto", (new ServerWBO("keys")).handler()),
- "/maintenance/1.1/broken.wipe/storage": EHTestsCommon.service_unavailable,
- "/maintenance/1.1/broken.wipe/storage/clients": upd("clients", clientsColl.handler()),
- "/maintenance/1.1/broken.wipe/storage/catapult": EHTestsCommon.service_unavailable
+ "/1.1/broken.wipe/storage": EHTestsCommon.service_unavailable,
+ "/1.1/broken.wipe/storage/clients": upd("clients", clientsColl.handler()),
+ "/1.1/broken.wipe/storage/catapult": EHTestsCommon.service_unavailable
});
},
CatapultEngine: (function() {
function CatapultEngine() {
SyncEngine.call(this, "Catapult", Service);
}
CatapultEngine.prototype = {
@@ -88,19 +88,17 @@ const EHTestsCommon = {
// the keys with a different Sync Key, without changing the local one.
let newSyncKeyBundle = new SyncKeyBundle("johndoe", "23456234562345623456234562");
let keys = Service.collectionKeys.asWBO();
keys.encrypt(newSyncKeyBundle);
keys.upload(Service.resource(Service.cryptoKeysURL));
},
async setUp(server) {
- await configureIdentity({ username: "johndoe" });
- Service.serverURL = server.baseURI + "/";
- Service.clusterURL = server.baseURI + "/";
+ await configureIdentity({ username: "johndoe" }, server);
return EHTestsCommon.generateAndUploadKeys()
},
generateAndUploadKeys() {
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
return serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success;
--- a/services/sync/tests/unit/head_helpers.js
+++ b/services/sync/tests/unit/head_helpers.js
@@ -290,17 +290,17 @@ function assert_valid_ping(record) {
}
}
// Asserts that `ping` is a ping that doesn't contain any failure information
function assert_success_ping(ping) {
ok(!!ping);
assert_valid_ping(ping);
ping.syncs.forEach(record => {
- ok(!record.failureReason);
+ ok(!record.failureReason, JSON.stringify(record.failureReason));
equal(undefined, record.status);
greater(record.engines.length, 0);
for (let e of record.engines) {
ok(!e.failureReason);
equal(undefined, e.status);
if (e.validation) {
equal(undefined, e.validation.problems);
equal(undefined, e.validation.failureReason);
--- a/services/sync/tests/unit/test_addons_engine.js
+++ b/services/sync/tests/unit/test_addons_engine.js
@@ -138,30 +138,29 @@ add_test(function test_get_changed_ids()
changes = engine.getChangedIDs();
_(JSON.stringify(changes));
do_check_eq(0, Object.keys(changes).length);
advance_test();
});
-add_test(function test_disabled_install_semantics() {
+add_task(async function test_disabled_install_semantics() {
_("Ensure that syncing a disabled add-on preserves proper state.");
// This is essentially a test for bug 712542, which snuck into the original
// add-on sync drop. It ensures that when an add-on is installed that the
// disabled state and incoming syncGUID is preserved, even on the next sync.
const USER = "foo";
const PASSWORD = "password";
- const PASSPHRASE = "abcdeabcdeabcdeabcdeabcdea";
const ADDON_ID = "addon1@tests.mozilla.org";
let server = new SyncServer();
server.start();
- new SyncTestingInfrastructure(server.server, USER, PASSWORD, PASSPHRASE);
+ await SyncTestingInfrastructure(server, USER, PASSWORD);
generateNewKeys(Service.collectionKeys);
let contents = {
meta: {global: {engines: {addons: {version: engine.version,
syncID: engine.syncID}}}},
crypto: {},
addons: {}
@@ -194,19 +193,17 @@ add_test(function test_disabled_install_
server.insertWBO(USER, "addons", wbo);
_("Performing sync of add-ons engine.");
engine._sync();
// At this point the non-restartless extension should be staged for install.
// Don't need this server any more.
- let cb = Async.makeSpinningCallback();
- amoServer.stop(cb);
- cb.wait();
+ await promiseStopServer(amoServer);
// We ensure the reconciler has recorded the proper ID and enabled state.
let addon = reconciler.getAddonStateFromSyncGUID(id);
do_check_neq(null, addon);
do_check_eq(false, addon.enabled);
// We fake an app restart and perform another sync, just to make sure things
// are sane.
@@ -219,17 +216,17 @@ add_test(function test_disabled_install_
let collection = server.getCollection(USER, "addons");
do_check_eq(1, collection.count());
let payload = collection.payloads()[0];
do_check_neq(null, collection.wbo(id));
do_check_eq(ADDON_ID, payload.addonID);
do_check_false(payload.enabled);
- server.stop(advance_test);
+ promiseStopServer(server);
});
add_test(function cleanup() {
// There's an xpcom-shutdown hook for this, but let's give this a shot.
reconciler.stopListening();
run_next_test();
});
--- a/services/sync/tests/unit/test_bookmark_duping.js
+++ b/services/sync/tests/unit/test_bookmark_duping.js
@@ -18,29 +18,35 @@ const bms = PlacesUtils.bookmarks;
Service.engineManager.register(BookmarksEngine);
const engine = new BookmarksEngine(Service);
const store = engine._store;
store._log.level = Log.Level.Trace;
engine._log.level = Log.Level.Trace;
-function setup() {
+async function setup() {
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bookmarks: {version: engine.version,
syncID: engine.syncID}}}},
bookmarks: {},
});
generateNewKeys(Service.collectionKeys);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
+ // The bookmarks engine *always* tracks changes, meaning we might try
+ // and sync due to the bookmarks we ourselves create! Worse, because we
+ // do an engine sync only, there's no locking - so we end up with multiple
+ // syncs running. Neuter that by making the threshold very large.
+ Service.scheduler.syncThreshold = 10000000;
+
Svc.Obs.notify("weave:engine:start-tracking"); // We skip usual startup...
return { server, collection };
}
async function cleanup(server) {
Svc.Obs.notify("weave:engine:stop-tracking");
Services.prefs.setBoolPref("services.sync-testing.startOverKeepIdentity", true);
@@ -125,17 +131,17 @@ async function validate(collection, expe
do_print("Local bookmark tree:\n" + JSON.stringify(tree, undefined, 2));
ok(false);
}
}
add_task(async function test_dupe_bookmark() {
_("Ensure that a bookmark we consider a dupe is handled correctly.");
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The parent folder and one bookmark in it.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
let {id: bmk1_id, guid: bmk1_guid} = createBookmark(folder1_id, "http://getfirefox.com/", "Get Firefox!");
engine.sync();
@@ -176,17 +182,17 @@ add_task(async function test_dupe_bookma
} finally {
await cleanup(server);
}
});
add_task(async function test_dupe_reparented_bookmark() {
_("Ensure that a bookmark we consider a dupe from a different parent is handled correctly");
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The parent folder and one bookmark in it.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
let {id: bmk1_id, guid: bmk1_guid} = createBookmark(folder1_id, "http://getfirefox.com/", "Get Firefox!");
// Another parent folder *with the same name*
let {id: folder2_id, guid: folder2_guid } = createFolder(bms.toolbarFolder, "Folder 1");
@@ -242,17 +248,17 @@ add_task(async function test_dupe_repare
} finally {
await cleanup(server);
}
});
add_task(async function test_dupe_reparented_locally_changed_bookmark() {
_("Ensure that a bookmark with local changes we consider a dupe from a different parent is handled correctly");
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The parent folder and one bookmark in it.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
let {id: bmk1_id, guid: bmk1_guid} = createBookmark(folder1_id, "http://getfirefox.com/", "Get Firefox!");
// Another parent folder *with the same name*
let {id: folder2_id, guid: folder2_guid } = createFolder(bms.toolbarFolder, "Folder 1");
@@ -319,17 +325,17 @@ add_task(async function test_dupe_repare
await cleanup(server);
}
});
add_task(async function test_dupe_reparented_to_earlier_appearing_parent_bookmark() {
_("Ensure that a bookmark we consider a dupe from a different parent that " +
"appears in the same sync before the dupe item");
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The parent folder and one bookmark in it.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
let {id: bmk1_id, guid: bmk1_guid} = createBookmark(folder1_id, "http://getfirefox.com/", "Get Firefox!");
// One more folder we'll use later.
let {id: folder2_id, guid: folder2_guid} = createFolder(bms.toolbarFolder, "A second folder");
@@ -396,17 +402,17 @@ add_task(async function test_dupe_repare
await cleanup(server);
}
});
add_task(async function test_dupe_reparented_to_later_appearing_parent_bookmark() {
_("Ensure that a bookmark we consider a dupe from a different parent that " +
"doesn't exist locally as we process the child, but does appear in the same sync");
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The parent folder and one bookmark in it.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
let {id: bmk1_id, guid: bmk1_guid} = createBookmark(folder1_id, "http://getfirefox.com/", "Get Firefox!");
// One more folder we'll use later.
let {id: folder2_id, guid: folder2_guid} = createFolder(bms.toolbarFolder, "A second folder");
@@ -473,17 +479,17 @@ add_task(async function test_dupe_repare
await cleanup(server);
}
});
add_task(async function test_dupe_reparented_to_future_arriving_parent_bookmark() {
_("Ensure that a bookmark we consider a dupe from a different parent that " +
"doesn't exist locally and doesn't appear in this Sync is handled correctly");
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The parent folder and one bookmark in it.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
let {id: bmk1_id, guid: bmk1_guid} = createBookmark(folder1_id, "http://getfirefox.com/", "Get Firefox!");
// One more folder we'll use later.
let {id: folder2_id, guid: folder2_guid} = createFolder(bms.toolbarFolder, "A second folder");
@@ -593,17 +599,17 @@ add_task(async function test_dupe_repare
await cleanup(server);
}
});
add_task(async function test_dupe_empty_folder() {
_("Ensure that an empty folder we consider a dupe is handled correctly.");
// Empty folders aren't particularly interesting in practice (as that seems
// an edge-case) but duping folders with items is broken - bug 1293163.
- let { server, collection } = this.setup();
+ let { server, collection } = await this.setup();
try {
// The folder we will end up duping away.
let {id: folder1_id, guid: folder1_guid } = createFolder(bms.toolbarFolder, "Folder 1");
engine.sync();
// We've added 1 folder, "menu", "toolbar", "unfiled", and "mobile".
--- a/services/sync/tests/unit/test_bookmark_engine.js
+++ b/services/sync/tests/unit/test_bookmark_engine.js
@@ -46,17 +46,17 @@ async function fetchAllSyncIds() {
add_task(async function test_delete_invalid_roots_from_server() {
_("Ensure that we delete the Places and Reading List roots from the server.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let tracker = engine._tracker;
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
Svc.Obs.notify("weave:engine:start-tracking");
try {
collection.insert("places", encryptPayload(store.createRecord("places").cleartext));
@@ -103,17 +103,17 @@ add_task(async function test_delete_inva
add_task(async function test_change_during_sync() {
_("Ensure that we track changes made during a sync.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let tracker = engine._tracker;
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
let bz_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarksMenuFolderId, Utils.makeURI("https://bugzilla.mozilla.org/"),
PlacesUtils.bookmarks.DEFAULT_INDEX, "Bugzilla");
let bz_guid = await PlacesUtils.promiseItemGuid(bz_id);
_(`Bugzilla GUID: ${bz_guid}`);
@@ -251,17 +251,17 @@ add_task(async function test_change_duri
await promiseStopServer(server);
Svc.Obs.notify("weave:engine:stop-tracking");
}
});
add_task(async function bad_record_allIDs() {
let server = new SyncServer();
server.start();
- let syncTesting = new SyncTestingInfrastructure(server.server);
+ let syncTesting = await SyncTestingInfrastructure(server);
_("Ensure that bad Places queries don't cause an error in getAllIDs.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let badRecordID = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder,
Utils.makeURI("place:folder=1138"),
PlacesUtils.bookmarks.DEFAULT_INDEX,
@@ -280,30 +280,36 @@ add_task(async function bad_record_allID
_("Clean up.");
PlacesUtils.bookmarks.removeItem(badRecordID);
await PlacesSyncUtils.bookmarks.reset();
await promiseStopServer(server);
});
function serverForFoo(engine) {
+ // The bookmarks engine *always* tracks changes, meaning we might try
+ // and sync due to the bookmarks we ourselves create! Worse, because we
+ // do an engine sync only, there's no locking - so we end up with multiple
+ // syncs running. Neuter that by making the threshold very large.
+ Service.scheduler.syncThreshold = 10000000;
+
return serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bookmarks: {version: engine.version,
syncID: engine.syncID}}}},
bookmarks: {}
});
}
add_task(async function test_processIncoming_error_orderChildren() {
_("Ensure that _orderChildren() is called even when _processIncoming() throws an error.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
try {
let folder1_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder 1", 0);
let folder1_guid = store.GUIDForId(folder1_id);
@@ -363,17 +369,17 @@ add_task(async function test_processInco
}
});
add_task(async function test_restorePromptsReupload() {
_("Ensure that restoring from a backup will reupload all records.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
Svc.Obs.notify("weave:engine:start-tracking"); // We skip usual startup...
try {
let folder1_id = PlacesUtils.bookmarks.createFolder(
@@ -533,17 +539,17 @@ add_task(async function test_mismatched_
"oT74WwV8_j4P", "IztsItWVSo3-"],
"parentid": "toolbar"
};
newRecord.cleartext = newRecord;
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
_("GUID: " + store.GUIDForId(6, true));
try {
let bms = PlacesUtils.bookmarks;
let oldR = new FakeRecord(BookmarkFolder, oldRecord);
let newR = new FakeRecord(Livemark, newRecord);
oldR.parentid = PlacesUtils.bookmarks.toolbarGuid;
@@ -578,17 +584,17 @@ add_task(async function test_mismatched_
add_task(async function test_bookmark_guidMap_fail() {
_("Ensure that failures building the GUID map cause early death.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
let coll = server.user("foo").collection("bookmarks");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
// Add one item to the server.
let itemID = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder 1", 0);
let itemGUID = store.GUIDForId(itemID);
let itemPayload = store.createRecord(itemGUID).cleartext;
coll.insert(itemGUID, encryptPayload(itemPayload));
--- a/services/sync/tests/unit/test_bookmark_smart_bookmarks.js
+++ b/services/sync/tests/unit/test_bookmark_smart_bookmarks.js
@@ -54,17 +54,17 @@ function serverForFoo(engine) {
bookmarks: {}
});
}
// Verify that Places smart bookmarks have their annotation uploaded and
// handled locally.
add_task(async function test_annotation_uploaded() {
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let startCount = smartBookmarkCount();
_("Start count is " + startCount);
if (startCount > 0) {
// This can happen in XULRunner.
clearBookmarks();
@@ -163,23 +163,23 @@ add_task(async function test_annotation_
newID, SMART_BOOKMARKS_ANNO));
} finally {
// Clean up.
store.wipe();
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_smart_bookmarks_duped() {
+add_task(async function test_smart_bookmarks_duped() {
let server = serverForFoo(engine);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let parent = PlacesUtils.toolbarFolderId;
let uri =
Utils.makeURI("place:sort=" +
Ci.nsINavHistoryQueryOptions.SORT_BY_VISITCOUNT_DESCENDING +
"&maxResults=10");
let title = "Most Visited";
let mostVisitedID = newSmartBookmark(parent, uri, -1, title, "MostVisited");
@@ -214,17 +214,17 @@ add_test(function test_smart_bookmarks_d
delete record.queryId;
do_check_eq(mostVisitedGUID, engine._mapDupe(record));
engine._syncFinish();
} finally {
// Clean up.
store.wipe();
- server.stop(do_test_finished);
+ await promiseStopServer(server);
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
}
});
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Bookmarks").level = Log.Level.Trace;
--- a/services/sync/tests/unit/test_clients_engine.js
+++ b/services/sync/tests/unit/test_clients_engine.js
@@ -198,30 +198,30 @@ add_test(function test_properties() {
engine.lastRecordUpload = now / 1000;
equal(engine.lastRecordUpload, Math.floor(now / 1000));
} finally {
Svc.Prefs.resetBranch("");
run_next_test();
}
});
-add_test(function test_full_sync() {
+add_task(async function test_full_sync() {
_("Ensure that Clients engine fetches all records for each sync.");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let activeID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(activeID, encryptPayload({
id: activeID,
name: "Active client",
type: "desktop",
commands: [],
@@ -266,34 +266,34 @@ add_test(function test_full_sync() {
"Deleted client should be removed on next sync");
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_sync() {
+add_task(async function test_sync() {
_("Ensure that Clients engine uploads a new client record once a week.");
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
function clientWBO() {
return user.collection("clients").wbo(engine.localID);
}
try {
@@ -321,17 +321,17 @@ add_test(function test_sync() {
let yesterday = engine.lastRecordUpload;
engine._sync();
equal(clientWBO().payload, undefined);
equal(engine.lastRecordUpload, yesterday);
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
add_test(function test_client_name_change() {
_("Ensure client name change incurs a client record update.");
let tracker = engine._tracker;
@@ -519,30 +519,30 @@ add_test(function test_process_incoming_
};
Svc.Obs.add(ev, handler);
// logout command causes processIncomingCommands to return explicit false.
ok(!engine.processIncomingCommands());
});
-add_test(function test_filter_duplicate_names() {
+add_task(async function test_filter_duplicate_names() {
_("Ensure that we exclude clients with identical names that haven't synced in a week.");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
// Synced recently.
let recentID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(recentID, encryptPayload({
id: recentID,
name: "My Phone",
type: "mobile",
@@ -670,35 +670,35 @@ add_test(function test_filter_duplicate_
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_command_sync() {
+add_task(async function test_command_sync() {
_("Ensure that commands are synced across clients.");
engine._store.wipe();
generateNewKeys(Service.collectionKeys);
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let user = server.user("foo");
let remoteId = Utils.makeGUID();
function clientWBO(id) {
return user.collection("clients").wbo(id);
}
@@ -748,17 +748,17 @@ add_test(function test_command_sync() {
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
try {
let collection = server.getCollection("foo", "clients");
collection.remove(remoteId);
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
add_test(function test_send_uri_to_client_for_display() {
_("Ensure sendURIToClientForDisplay() sends command properly.");
let tracker = engine._tracker;
@@ -875,30 +875,30 @@ add_test(function test_optional_client_f
// We don't currently populate device or formfactor.
// See Bug 1100722, Bug 1100723.
engine._resetClient();
run_next_test();
});
-add_test(function test_merge_commands() {
+add_task(async function test_merge_commands() {
_("Verifies local commands for remote clients are merged with the server's");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let desktopID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(desktopID, encryptPayload({
id: desktopID,
name: "Desktop client",
type: "desktop",
commands: [{
@@ -949,35 +949,35 @@ add_test(function test_merge_commands()
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_duplicate_remote_commands() {
+add_task(async function test_duplicate_remote_commands() {
_("Verifies local commands for remote clients are sent only once (bug 1289287)");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let desktopID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(desktopID, encryptPayload({
id: desktopID,
name: "Desktop client",
type: "desktop",
commands: [],
@@ -1019,35 +1019,35 @@ add_test(function test_duplicate_remote_
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_upload_after_reboot() {
+add_task(async function test_upload_after_reboot() {
_("Multiple downloads, reboot, then upload (bug 1289287)");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let deviceBID = Utils.makeGUID();
let deviceCID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(deviceBID, encryptPayload({
id: deviceBID,
name: "Device B",
type: "desktop",
@@ -1110,35 +1110,35 @@ add_test(function test_upload_after_rebo
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_keep_cleared_commands_after_reboot() {
+add_task(async function test_keep_cleared_commands_after_reboot() {
_("Download commands, fail upload, reboot, then apply new commands (bug 1289287)");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let deviceBID = Utils.makeGUID();
let deviceCID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(engine.localID, encryptPayload({
id: engine.localID,
name: "Device A",
type: "desktop",
@@ -1228,35 +1228,35 @@ add_test(function test_keep_cleared_comm
// Reset service (remove mocks)
engine = Service.clientsEngine = new ClientEngine(Service);
engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_deleted_commands() {
+add_task(async function test_deleted_commands() {
_("Verifies commands for a deleted client are discarded");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let activeID = Utils.makeGUID();
server.insertWBO("foo", "clients", new ServerWBO(activeID, encryptPayload({
id: activeID,
name: "Active client",
type: "desktop",
commands: [],
@@ -1297,35 +1297,35 @@ add_test(function test_deleted_commands(
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_send_uri_ack() {
+add_task(async function test_send_uri_ack() {
_("Ensure a sent URI is deleted when the client syncs");
let now = Date.now() / 1000;
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
let user = server.user("foo");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
try {
let fakeSenderID = Utils.makeGUID();
_("Initial sync for empty clients collection");
engine._sync();
let collection = server.getCollection("foo", "clients");
@@ -1360,35 +1360,35 @@ add_test(function test_send_uri_ack() {
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
engine._resetClient();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
-add_test(function test_command_sync() {
+add_task(async function test_command_sync() {
_("Notify other clients when writing their record.");
engine._store.wipe();
generateNewKeys(Service.collectionKeys);
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let server = serverForUsers({"foo": "password"}, contents);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let user = server.user("foo");
let collection = server.getCollection("foo", "clients");
let remoteId = Utils.makeGUID();
let remoteId2 = Utils.makeGUID();
function clientWBO(id) {
return user.collection("clients").wbo(id);
@@ -1432,17 +1432,17 @@ add_test(function test_command_sync() {
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
try {
server.deleteCollections("foo");
} finally {
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
}
});
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Clients").level = Log.Level.Trace;
run_next_test();
--- a/services/sync/tests/unit/test_collections_recovery.js
+++ b/services/sync/tests/unit/test_collections_recovery.js
@@ -19,34 +19,32 @@ add_identity_test(this, async function t
response.setStatusLine(request.httpVersion, 200, "OK");
response.bodyOutputStream.write(body, body.length);
} else {
handler(request, response);
}
};
}
- await configureIdentity({username: "johndoe"});
-
let handlers = {
"/1.1/johndoe/info/collections": maybe_empty(johnHelper.handler),
"/1.1/johndoe/storage/crypto/keys": johnU("crypto", new ServerWBO("keys").handler()),
"/1.1/johndoe/storage/meta/global": johnU("meta", new ServerWBO("global").handler())
};
let collections = ["clients", "bookmarks", "forms", "history",
"passwords", "prefs", "tabs"];
// Disable addon sync because AddonManager won't be initialized here.
Service.engineManager.unregister("addons");
for (let coll of collections) {
handlers["/1.1/johndoe/storage/" + coll] =
johnU(coll, new ServerCollection({}, true).handler());
}
let server = httpd_setup(handlers);
- Service.serverURL = server.baseURI;
+ await configureIdentity({username: "johndoe"}, server);
try {
let fresh = 0;
let orig = Service._freshStart;
Service._freshStart = function() {
_("Called _freshStart.");
orig.call(Service);
fresh++;
--- a/services/sync/tests/unit/test_engine_abort.js
+++ b/services/sync/tests/unit/test_engine_abort.js
@@ -3,30 +3,30 @@
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/rotaryengine.js");
Cu.import("resource://testing-common/services/sync/utils.js");
-add_test(function test_processIncoming_abort() {
+add_task(async function test_processIncoming_abort() {
_("An abort exception, raised in applyIncoming, will abort _processIncoming.");
let engine = new RotaryEngine(Service);
let collection = new ServerCollection();
let id = Utils.makeGUID();
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- new SyncTestingInfrastructure(server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
_("Create some server data.");
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
_("Fake applyIncoming to abort.");
@@ -54,16 +54,16 @@ add_test(function test_processIncoming_a
// This will quietly fail.
engine.sync();
} catch (ex) {
err = ex;
}
do_check_eq(err, undefined);
- server.stop(run_next_test);
+ await promiseStopServer(server);
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
});
function run_test() {
run_next_test();
}
--- a/services/sync/tests/unit/test_errorhandler_1.js
+++ b/services/sync/tests/unit/test_errorhandler_1.js
@@ -94,17 +94,17 @@ add_identity_test(this, async function t
Service.startOver();
server.stop(deferred.resolve);
});
}
Svc.Obs.add("weave:service:login:error", onLoginError);
}
// Make sync fail due to login rejected.
- await configureIdentity({username: "janedoe"});
+ await configureIdentity({username: "janedoe"}, server);
Service._updateCachedURLs();
_("Starting first sync.");
let ping = await sync_and_validate_telem(true);
deepEqual(ping.failureReason, { name: "httperror", code: 401 });
_("First sync done.");
await deferred.promise;
});
@@ -443,18 +443,19 @@ add_identity_test(this, async function t
deepEqual(ping.failureReason, {
name: "unexpectederror",
error: "Error: Aborting sync, remote setup failed"
});
await promiseObserved;
do_check_eq(Status.sync, CREDENTIALS_CHANGED);
// If we clean this tick, telemetry won't get the right error
+ await promiseNextTick();
+ clean();
await promiseStopServer(server);
- clean();
});
add_task(async function test_login_syncAndReportErrors_prolonged_non_network_error() {
// Test prolonged, non-network errors are
// reported when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
Service.identity.resetSyncKey();
@@ -491,18 +492,19 @@ add_identity_test(this, async function t
deepEqual(ping.failureReason, {
name: "unexpectederror",
error: "Error: Aborting sync, remote setup failed"
});
await promiseObserved;
do_check_eq(Status.sync, CREDENTIALS_CHANGED);
// If we clean this tick, telemetry won't get the right error
+ await promiseNextTick();
+ clean();
await promiseStopServer(server);
- clean();
});
add_identity_test(this, async function test_login_syncAndReportErrors_network_error() {
// Test network errors are reported when calling syncAndReportErrors.
await configureIdentity({username: "broken.wipe"});
Service.serverURL = fakeServerUrl;
Service.clusterURL = fakeServerUrl;
@@ -609,18 +611,18 @@ add_task(async function test_sync_prolon
equal(ping.status.sync, PROLONGED_SYNC_FAILURE);
deepEqual(ping.failureReason, {
name: "unexpectederror",
error: "Error: Aborting sync, remote setup failed"
});
await promiseObserved;
do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
do_check_true(errorHandler.didReportProlongedError);
+ clean();
await promiseStopServer(server);
- clean();
});
add_identity_test(this, async function test_login_prolonged_network_error() {
// Test prolonged, network errors are reported
await configureIdentity({username: "johndoe"});
Service.serverURL = fakeServerUrl;
Service.clusterURL = fakeServerUrl;
@@ -756,29 +758,26 @@ add_identity_test(this, async function t
equal(ping.status.sync, SERVER_MAINTENANCE);
deepEqual(ping.engines.find(e => e.failureReason).failureReason, { name: "httperror", code: 503 })
await promiseObserved;
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
do_check_eq(Status.sync, SERVER_MAINTENANCE);
do_check_false(errorHandler.didReportProlongedError);
+ clean();
await promiseStopServer(server);
- clean();
});
add_identity_test(this, async function test_info_collections_login_server_maintenance_error() {
// Test info/collections server maintenance errors are not reported.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- Service.username = "broken.info";
- await configureIdentity({username: "broken.info"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.info"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
function onUIUpdate() {
@@ -806,19 +805,17 @@ add_identity_test(this, async function t
await promiseStopServer(server);
});
add_identity_test(this, async function test_meta_global_login_server_maintenance_error() {
// Test meta/global server maintenance errors are not reported.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.meta"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.meta"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
function onUIUpdate() {
--- a/services/sync/tests/unit/test_errorhandler_2.js
+++ b/services/sync/tests/unit/test_errorhandler_2.js
@@ -64,19 +64,17 @@ function clean() {
}
add_identity_test(this, async function test_crypto_keys_login_server_maintenance_error() {
Status.resetSync();
// Test crypto/keys server maintenance errors are not reported.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
// Force re-download of keys
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
@@ -137,19 +135,17 @@ add_task(async function test_sync_prolon
clean();
});
add_identity_test(this, async function test_info_collections_login_prolonged_server_maintenance_error(){
// Test info/collections prolonged server maintenance errors are reported.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.info"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.info"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -171,19 +167,17 @@ add_identity_test(this, async function t
await promiseStopServer(server);
});
add_identity_test(this, async function test_meta_global_login_prolonged_server_maintenance_error(){
// Test meta/global prolonged server maintenance errors are reported.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.meta"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.meta"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -205,19 +199,17 @@ add_identity_test(this, async function t
await promiseStopServer(server);
});
add_identity_test(this, async function test_download_crypto_keys_login_prolonged_server_maintenance_error(){
// Test crypto/keys prolonged server maintenance errors are reported.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
// Force re-download of keys
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
@@ -240,19 +232,17 @@ add_identity_test(this, async function t
await promiseStopServer(server);
});
add_identity_test(this, async function test_upload_crypto_keys_login_prolonged_server_maintenance_error(){
// Test crypto/keys prolonged server maintenance errors are reported.
let server = EHTestsCommon.sync_httpd_setup();
// Start off with an empty account, do not upload a key.
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -275,19 +265,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_wipeServer_login_prolonged_server_maintenance_error(){
// Test that we report prolonged server maintenance errors that occur whilst
// wiping the server.
let server = EHTestsCommon.sync_httpd_setup();
// Start off with an empty account, do not upload a key.
- await configureIdentity({username: "broken.wipe"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.wipe"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -310,19 +298,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_wipeRemote_prolonged_server_maintenance_error(){
// Test that we report prolonged server maintenance errors that occur whilst
// wiping all remote devices.
let server = EHTestsCommon.sync_httpd_setup();
server.registerPathHandler("/1.1/broken.wipe/storage/catapult", EHTestsCommon.service_unavailable);
- await configureIdentity({username: "broken.wipe"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.wipe"}, server);
EHTestsCommon.generateAndUploadKeys();
let engine = engineManager.get("catapult");
engine.exception = null;
engine.enabled = true;
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
@@ -380,19 +366,17 @@ add_task(async function test_sync_syncAn
});
add_identity_test(this, async function test_info_collections_login_syncAndReportErrors_server_maintenance_error() {
// Test info/collections server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.info"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.info"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -415,19 +399,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_meta_global_login_syncAndReportErrors_server_maintenance_error() {
// Test meta/global server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.meta"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.meta"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -450,19 +432,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_download_crypto_keys_login_syncAndReportErrors_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
// Force re-download of keys
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
@@ -487,19 +467,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_upload_crypto_keys_login_syncAndReportErrors_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
// Start off with an empty account, do not upload a key.
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -522,19 +500,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_wipeServer_login_syncAndReportErrors_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
// Start off with an empty account, do not upload a key.
- await configureIdentity({username: "broken.wipe"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.wipe"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -556,19 +532,17 @@ add_identity_test(this, async function t
await promiseStopServer(server);
});
add_identity_test(this, async function test_wipeRemote_syncAndReportErrors_server_maintenance_error(){
// Test that we report prolonged server maintenance errors that occur whilst
// wiping all remote devices.
let server = EHTestsCommon.sync_httpd_setup();
- await configureIdentity({username: "broken.wipe"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.wipe"}, server);
EHTestsCommon.generateAndUploadKeys();
let engine = engineManager.get("catapult");
engine.exception = null;
engine.enabled = true;
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
@@ -628,19 +602,17 @@ add_task(async function test_sync_syncAn
});
add_identity_test(this, async function test_info_collections_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test info/collections server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.info"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.info"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -665,19 +637,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_meta_global_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test meta/global server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.meta"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.meta"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -702,19 +672,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_download_crypto_keys_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
await EHTestsCommon.setUp(server);
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
// Force re-download of keys
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
@@ -741,19 +709,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_upload_crypto_keys_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
// Start off with an empty account, do not upload a key.
- await configureIdentity({username: "broken.keys"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.keys"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
@@ -778,19 +744,17 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_wipeServer_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = EHTestsCommon.sync_httpd_setup();
// Start off with an empty account, do not upload a key.
- await configureIdentity({username: "broken.wipe"});
- Service.serverURL = server.baseURI + "/maintenance/";
- Service.clusterURL = server.baseURI + "/maintenance/";
+ await configureIdentity({username: "broken.wipe"}, server);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
let promiseObserved = promiseOneObserver("weave:ui:login:error");
--- a/services/sync/tests/unit/test_errorhandler_eol.js
+++ b/services/sync/tests/unit/test_errorhandler_eol.js
@@ -39,19 +39,17 @@ function handler200(eolCode) {
function sync_httpd_setup(infoHandler) {
let handlers = {
"/1.1/johndoe/info/collections": infoHandler,
};
return httpd_setup(handlers);
}
async function setUp(server) {
- await configureIdentity({username: "johndoe"});
- Service.serverURL = server.baseURI + "/";
- Service.clusterURL = server.baseURI + "/";
+ await configureIdentity({username: "johndoe"}, server);
new FakeCryptoService();
}
function run_test() {
run_next_test();
}
function do_check_soft_eol(eh, start) {
--- a/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
+++ b/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
@@ -49,19 +49,17 @@ function sync_httpd_setup() {
"/1.1/johndoe/storage/meta/global": upd("meta", globalWBO.handler()),
"/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
"/1.1/johndoe/storage/crypto/keys": upd("crypto", keysWBO.handler())
};
return httpd_setup(handlers);
}
async function setUp(server) {
- await configureIdentity({username: "johndoe"});
- Service.serverURL = server.baseURI + "/";
- Service.clusterURL = server.baseURI + "/";
+ await configureIdentity({username: "johndoe"}, server);
new FakeCryptoService();
}
function generateAndUploadKeys(server) {
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
let res = Service.resource(server.baseURI + "/1.1/johndoe/storage/crypto/keys");
--- a/services/sync/tests/unit/test_history_engine.js
+++ b/services/sync/tests/unit/test_history_engine.js
@@ -11,17 +11,17 @@ Cu.import("resource://services-sync/util
Cu.import("resource://testing-common/services/sync/utils.js");
Service.engineManager.clear();
add_test(function test_setup() {
PlacesTestUtils.clearHistory().then(run_next_test);
});
-add_test(function test_processIncoming_mobile_history_batched() {
+add_task(async function test_processIncoming_mobile_history_batched() {
_("SyncEngine._processIncoming works on history engine.");
let FAKE_DOWNLOAD_LIMIT = 100;
Svc.Prefs.set("client.type", "mobile");
Service.engineManager.register(HistoryEngine);
// A collection that logs each GET
@@ -32,17 +32,17 @@ add_test(function test_processIncoming_m
this.get_log.push(options);
return this._get(options);
};
let server = sync_httpd_setup({
"/1.1/foo/storage/history": collection.handler()
});
- new SyncTestingInfrastructure(server);
+ await SyncTestingInfrastructure(server);
// Let's create some 234 server side history records. They're all at least
// 10 minutes old.
let visitType = Ci.nsINavHistoryService.TRANSITION_LINK;
for (var i = 0; i < 234; i++) {
let id = 'record-no' + ("00" + i).slice(-3);
let modified = Date.now()/1000 - 60*(i+10);
let payload = encryptPayload({
@@ -127,21 +127,20 @@ add_test(function test_processIncoming_m
do_check_eq(collection.get_log[j].limit, undefined);
if (i < Math.floor((234 - 50) / MOBILE_BATCH_SIZE))
do_check_eq(collection.get_log[j].ids.length, MOBILE_BATCH_SIZE);
else
do_check_eq(collection.get_log[j].ids.length, 234 % MOBILE_BATCH_SIZE);
}
} finally {
- PlacesTestUtils.clearHistory().then(() => {
- server.stop(do_test_finished);
- Svc.Prefs.resetBranch("");
- Service.recordManager.clearCache();
- });
+ await PlacesTestUtils.clearHistory();
+ await promiseStopServer(server);
+ Svc.Prefs.resetBranch("");
+ Service.recordManager.clearCache();
}
});
function run_test() {
generateNewKeys(Service.collectionKeys);
run_next_test();
}
--- a/services/sync/tests/unit/test_interval_triggers.js
+++ b/services/sync/tests/unit/test_interval_triggers.js
@@ -36,19 +36,17 @@ function sync_httpd_setup() {
"/1.1/johndoe/info/collections": collectionsHelper.handler,
"/1.1/johndoe/storage/crypto/keys":
upd("crypto", (new ServerWBO("keys")).handler()),
"/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler())
});
}
async function setUp(server) {
- await configureIdentity({username: "johndoe"});
- Service.serverURL = server.baseURI + "/";
- Service.clusterURL = server.baseURI + "/";
+ await configureIdentity({username: "johndoe"}, server);
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
serverKeys.upload(Service.resource(Service.cryptoKeysURL));
}
function run_test() {
initTestLogging("Trace");
--- a/services/sync/tests/unit/test_score_triggers.js
+++ b/services/sync/tests/unit/test_score_triggers.js
@@ -38,18 +38,18 @@ function sync_httpd_setup() {
let cl = new ServerCollection();
handlers["/1.1/johndoe/storage/clients"] =
upd("clients", cl.handler());
return httpd_setup(handlers);
}
-function setUp(server) {
- new SyncTestingInfrastructure(server, "johndoe", "ilovejane", "sekrit");
+async function setUp(server) {
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
}
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Service").level = Log.Level.Trace;
run_next_test();
@@ -73,77 +73,72 @@ add_test(function test_tracker_score_upd
do_check_eq(scoreUpdated, 1);
} finally {
Svc.Obs.remove("weave:engine:score:updated", onScoreUpdated);
tracker.resetScore();
run_next_test();
}
});
-add_test(function test_sync_triggered() {
+add_task(async function test_sync_triggered() {
let server = sync_httpd_setup();
- setUp(server);
+ await setUp(server);
Service.login();
Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
- Svc.Obs.add("weave:service:sync:finish", function onSyncFinish() {
- Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
- _("Sync completed!");
- server.stop(run_next_test);
- });
+
do_check_eq(Status.login, LOGIN_SUCCEEDED);
tracker.score += SCORE_INCREMENT_XLARGE;
+
+ await promiseOneObserver("weave:service:sync:finish")
+ await promiseStopServer(server);
});
-add_test(function test_clients_engine_sync_triggered() {
+add_task(async function test_clients_engine_sync_triggered() {
_("Ensure that client engine score changes trigger a sync.");
// The clients engine is not registered like other engines. Therefore,
// it needs special treatment throughout the code. Here, we verify the
// global score tracker gives it that treatment. See bug 676042 for more.
let server = sync_httpd_setup();
- setUp(server);
+ await setUp(server);
Service.login();
- const TOPIC = "weave:service:sync:finish";
- Svc.Obs.add(TOPIC, function onSyncFinish() {
- Svc.Obs.remove(TOPIC, onSyncFinish);
- _("Sync due to clients engine change completed.");
- server.stop(run_next_test);
- });
-
Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
do_check_eq(Status.login, LOGIN_SUCCEEDED);
Service.clientsEngine._tracker.score += SCORE_INCREMENT_XLARGE;
+
+ await promiseOneObserver("weave:service:sync:finish");
+ _("Sync due to clients engine change completed.");
+ await promiseStopServer(server);
});
-add_test(function test_incorrect_credentials_sync_not_triggered() {
+add_task(async function test_incorrect_credentials_sync_not_triggered() {
_("Ensure that score changes don't trigger a sync if Status.login != LOGIN_SUCCEEDED.");
let server = sync_httpd_setup();
- setUp(server);
+ await setUp(server);
// Ensure we don't actually try to sync.
function onSyncStart() {
do_throw("Should not get here!");
}
Svc.Obs.add("weave:service:sync:start", onSyncStart);
+ // Faking incorrect credentials to prevent score update.
+ Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ tracker.score += SCORE_INCREMENT_XLARGE;
+
// First wait >100ms (nsITimers can take up to that much time to fire, so
// we can account for the timer in delayedAutoconnect) and then one event
// loop tick (to account for a possible call to weave:service:sync:start).
- Utils.namedTimer(function() {
- Utils.nextTick(function() {
- Svc.Obs.remove("weave:service:sync:start", onSyncStart);
+ await promiseNamedTimer(150, {}, "timer");
+ await promiseNextTick();
- do_check_eq(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
+ Svc.Obs.remove("weave:service:sync:start", onSyncStart);
- Service.startOver();
- server.stop(run_next_test);
- });
- }, 150, {}, "timer");
+ do_check_eq(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
- // Faking incorrect credentials to prevent score update.
- Status.login = LOGIN_FAILED_LOGIN_REJECTED;
- tracker.score += SCORE_INCREMENT_XLARGE;
+ Service.startOver();
+ await promiseStopServer(server);
});
--- a/services/sync/tests/unit/test_service_sync_401.js
+++ b/services/sync/tests/unit/test_service_sync_401.js
@@ -4,62 +4,63 @@
Cu.import("resource://services-sync/constants.js");
Cu.import("resource://services-sync/policies.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
function login_handling(handler) {
return function (request, response) {
- if (basic_auth_matches(request, "johndoe", "ilovejane")) {
+ if (request.hasHeader("Authorization") &&
+ request.getHeader("Authorization").includes('Hawk id="id"')) {
handler(request, response);
} else {
let body = "Unauthorized";
response.setStatusLine(request.httpVersion, 401, "Unauthorized");
response.bodyOutputStream.write(body, body.length);
}
};
}
-function run_test() {
+add_task(async function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let collections = collectionsHelper.collections;
- do_test_pending();
let server = httpd_setup({
"/1.1/johndoe/storage/crypto/keys": upd("crypto", new ServerWBO("keys").handler()),
"/1.1/johndoe/storage/meta/global": upd("meta", new ServerWBO("global").handler()),
"/1.1/johndoe/info/collections": login_handling(collectionsHelper.handler)
});
const GLOBAL_SCORE = 42;
try {
_("Set up test fixtures.");
- new SyncTestingInfrastructure(server, "johndoe", "ilovejane", "foo");
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
Service.scheduler.globalScore = GLOBAL_SCORE;
// Avoid daily ping
Svc.Prefs.set("lastPing", Math.floor(Date.now() / 1000));
let threw = false;
Svc.Obs.add("weave:service:sync:error", function (subject, data) {
threw = true;
});
_("Initial state: We're successfully logged in.");
Service.login();
do_check_true(Service.isLoggedIn);
do_check_eq(Service.status.login, LOGIN_SUCCEEDED);
_("Simulate having changed the password somewhere else.");
- Service.identity.basicPassword = "ilovejosephine";
+ Service.identity._token.id = "somethingelse";
+ Service.identity.unlockAndVerifyAuthState = () => Promise.resolve(LOGIN_FAILED_LOGIN_REJECTED);
_("Let's try to sync.");
Service.sync();
_("Verify that sync() threw an exception.");
do_check_true(threw);
_("We're no longer logged in.");
@@ -74,11 +75,11 @@ function run_test() {
try {
Service.sync();
} catch (ex) {
}
do_check_eq(Service.status.login, LOGIN_FAILED_LOGIN_REJECTED);
} finally {
Svc.Prefs.resetBranch("");
- server.stop(do_test_finished);
+ await promiseStopServer(server);
}
-}
+});
--- a/services/sync/tests/unit/test_service_sync_specified.js
+++ b/services/sync/tests/unit/test_service_sync_specified.js
@@ -52,109 +52,108 @@ function sync_httpd_setup(handlers) {
let cl = new ServerCollection();
handlers["/1.1/johndoe/storage/clients"] =
upd("clients", cl.handler());
return httpd_setup(handlers);
}
-function setUp() {
+async function setUp() {
syncedEngines = [];
let engine = Service.engineManager.get("steam");
engine.enabled = true;
engine.syncPriority = 1;
engine = Service.engineManager.get("stirling");
engine.enabled = true;
engine.syncPriority = 2;
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": new ServerWBO("global", {}).handler(),
});
- new SyncTestingInfrastructure(server, "johndoe", "ilovejane",
- "abcdeabcdeabcdeabcdeabcdea");
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
return server;
}
function run_test() {
initTestLogging("Trace");
validate_all_future_pings();
Log.repository.getLogger("Sync.Service").level = Log.Level.Trace;
Log.repository.getLogger("Sync.ErrorHandler").level = Log.Level.Trace;
run_next_test();
}
-add_test(function test_noEngines() {
+add_task(async function test_noEngines() {
_("Test: An empty array of engines to sync does nothing.");
- let server = setUp();
+ let server = await setUp();
try {
_("Sync with no engines specified.");
Service.sync([]);
deepEqual(syncedEngines, [], "no engines were synced");
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_oneEngine() {
+add_task(async function test_oneEngine() {
_("Test: Only one engine is synced.");
- let server = setUp();
+ let server = await setUp();
try {
_("Sync with 1 engine specified.");
Service.sync(["steam"]);
deepEqual(syncedEngines, ["steam"])
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_bothEnginesSpecified() {
+add_task(async function test_bothEnginesSpecified() {
_("Test: All engines are synced when specified in the correct order (1).");
- let server = setUp();
+ let server = await setUp();
try {
_("Sync with both engines specified.");
Service.sync(["steam", "stirling"]);
deepEqual(syncedEngines, ["steam", "stirling"])
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_bothEnginesSpecified() {
+add_task(async function test_bothEnginesSpecified() {
_("Test: All engines are synced when specified in the correct order (2).");
- let server = setUp();
+ let server = await setUp();
try {
_("Sync with both engines specified.");
Service.sync(["stirling", "steam"]);
deepEqual(syncedEngines, ["stirling", "steam"])
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_bothEnginesDefault() {
+add_task(async function test_bothEnginesDefault() {
_("Test: All engines are synced when nothing is specified.");
- let server = setUp();
+ let server = await setUp();
try {
Service.sync();
deepEqual(syncedEngines, ["steam", "stirling"])
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
--- a/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
+++ b/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
@@ -63,19 +63,18 @@ function sync_httpd_setup(handlers) {
let cl = new ServerCollection();
handlers["/1.1/johndoe/storage/clients"] =
upd("clients", cl.handler());
return httpd_setup(handlers);
}
-function setUp(server) {
- new SyncTestingInfrastructure(server, "johndoe", "ilovejane",
- "abcdeabcdeabcdeabcdeabcdea");
+async function setUp(server) {
+ await SyncTestingInfrastructure(server, "johndoe", "ilovejane");
// Ensure that the server has valid keys so that logging in will work and not
// result in a server wipe, rendering many of these tests useless.
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
return serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success;
}
@@ -86,90 +85,90 @@ function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Service").level = Log.Level.Trace;
Log.repository.getLogger("Sync.ErrorHandler").level = Log.Level.Trace;
validate_all_future_pings();
run_next_test();
}
-add_test(function test_newAccount() {
+add_task(async function test_newAccount() {
_("Test: New account does not disable locally enabled engines.");
let engine = Service.engineManager.get("steam");
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": new ServerWBO("global", {}).handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler()
});
- setUp(server);
+ await setUp(server);
try {
_("Engine is enabled from the beginning.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
_("Sync.");
Service.sync();
_("Engine continues to be enabled.");
do_check_true(engine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_enabledLocally() {
+add_task(async function test_enabledLocally() {
_("Test: Engine is disabled on remote clients and enabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler()
});
- setUp(server);
+ await setUp(server);
try {
_("Enable engine locally.");
engine.enabled = true;
_("Sync.");
Service.sync();
_("Meta record now contains the new engine.");
do_check_true(!!metaWBO.data.engines.steam);
_("Engine continues to be enabled.");
do_check_true(engine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_disabledLocally() {
+add_task(async function test_disabledLocally() {
_("Test: Engine is enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: engine.syncID,
version: engine.version}}
});
let steamCollection = new ServerWBO("steam", PAYLOAD);
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": steamCollection.handler()
});
- setUp(server);
+ await setUp(server);
try {
_("Disable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
engine.enabled = false;
@@ -181,21 +180,21 @@ add_test(function test_disabledLocally()
_("Server records are wiped.");
do_check_eq(steamCollection.payload, undefined);
_("Engine continues to be disabled.");
do_check_false(engine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_disabledLocally_wipe503() {
+add_task(async function test_disabledLocally_wipe503() {
_("Test: Engine is enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: engine.syncID,
version: engine.version}}
@@ -208,55 +207,53 @@ add_test(function test_disabledLocally_w
response.setHeader("Retry-After", "23");
response.bodyOutputStream.write(body, body.length);
}
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": service_unavailable
});
- setUp(server);
+ await setUp(server);
_("Disable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
engine.enabled = false;
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
-
- do_check_eq(Service.status.sync, SERVER_MAINTENANCE);
-
- Service.startOver();
- server.stop(run_next_test);
- });
+ let promiseObserved = promiseOneObserver("weave:ui:sync:error");
_("Sync.");
Service.errorHandler.syncAndReportErrors();
+ await promiseObserved;
+ do_check_eq(Service.status.sync, SERVER_MAINTENANCE);
+
+ Service.startOver();
+ await promiseStopServer(server);
});
-add_test(function test_enabledRemotely() {
+add_task(async function test_enabledRemotely() {
_("Test: Engine is disabled locally and enabled on a remote client");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: engine.syncID,
version: engine.version}}
});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global":
upd("meta", metaWBO.handler()),
"/1.1/johndoe/storage/steam":
upd("steam", new ServerWBO("steam", {}).handler())
});
- setUp(server);
+ await setUp(server);
// We need to be very careful how we do this, so that we don't trigger a
// fresh start!
try {
_("Upload some keys to avoid a fresh start.");
let wbo = Service.collectionKeys.generateNewKeysWBO();
wbo.encrypt(Service.identity.syncKeyBundle);
do_check_eq(200, wbo.upload(Service.resource(Service.cryptoKeysURL)).status);
@@ -269,35 +266,35 @@ add_test(function test_enabledRemotely()
_("Engine is enabled.");
do_check_true(engine.enabled);
_("Meta record still present.");
do_check_eq(metaWBO.data.engines.steam.syncID, engine.syncID);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_disabledRemotelyTwoClients() {
+add_task(async function test_disabledRemotelyTwoClients() {
_("Test: Engine is enabled locally and disabled on a remote client... with two clients.");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global":
upd("meta", metaWBO.handler()),
"/1.1/johndoe/storage/steam":
upd("steam", new ServerWBO("steam", {}).handler())
});
- setUp(server);
+ await setUp(server);
try {
_("Enable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
_("Sync.");
@@ -313,65 +310,65 @@ add_test(function test_disabledRemotelyT
Service.clientsEngine._store._remoteClients["foobar"] = {name: "foobar", type: "desktop"};
Service.sync();
_("Engine is disabled.");
do_check_false(engine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_disabledRemotely() {
+add_task(async function test_disabledRemotely() {
_("Test: Engine is enabled locally and disabled on a remote client");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler()
});
- setUp(server);
+ await setUp(server);
try {
_("Enable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
_("Sync.");
Service.sync();
_("Engine is not disabled: only one client.");
do_check_true(engine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_dependentEnginesEnabledLocally() {
+add_task(async function test_dependentEnginesEnabledLocally() {
_("Test: Engine is disabled on remote clients and enabled locally");
Service.syncID = "abcdefghij";
let steamEngine = Service.engineManager.get("steam");
let stirlingEngine = Service.engineManager.get("stirling");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler(),
"/1.1/johndoe/storage/stirling": new ServerWBO("stirling", {}).handler()
});
- setUp(server);
+ await setUp(server);
try {
_("Enable engine locally. Doing it on one is enough.");
steamEngine.enabled = true;
_("Sync.");
Service.sync();
@@ -379,21 +376,21 @@ add_test(function test_dependentEnginesE
do_check_true(!!metaWBO.data.engines.steam);
do_check_true(!!metaWBO.data.engines.stirling);
_("Engines continue to be enabled.");
do_check_true(steamEngine.enabled);
do_check_true(stirlingEngine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
-add_test(function test_dependentEnginesDisabledLocally() {
+add_task(async function test_dependentEnginesDisabledLocally() {
_("Test: Two dependent engines are enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let steamEngine = Service.engineManager.get("steam");
let stirlingEngine = Service.engineManager.get("stirling");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: steamEngine.syncID,
@@ -405,17 +402,17 @@ add_test(function test_dependentEnginesD
let steamCollection = new ServerWBO("steam", PAYLOAD);
let stirlingCollection = new ServerWBO("stirling", PAYLOAD);
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": steamCollection.handler(),
"/1.1/johndoe/storage/stirling": stirlingCollection.handler()
});
- setUp(server);
+ await setUp(server);
try {
_("Disable engines locally. Doing it on one is enough.");
Service._ignorePrefObserver = true;
steamEngine.enabled = true;
do_check_true(stirlingEngine.enabled);
Service._ignorePrefObserver = false;
steamEngine.enabled = false;
@@ -432,11 +429,11 @@ add_test(function test_dependentEnginesD
do_check_eq(steamCollection.payload, undefined);
do_check_eq(stirlingCollection.payload, undefined);
_("Engines continue to be disabled.");
do_check_false(steamEngine.enabled);
do_check_false(stirlingEngine.enabled);
} finally {
Service.startOver();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
});
--- a/services/sync/tests/unit/test_service_wipeServer.js
+++ b/services/sync/tests/unit/test_service_wipeServer.js
@@ -55,17 +55,17 @@ add_identity_test(this, async function t
let server = httpd_setup({
"/1.1/johndoe/storage/steam": steam_coll.handler(),
"/1.1/johndoe/storage/diesel": diesel_coll.handler(),
"/1.1/johndoe/storage/petrol": httpd_handler(404, "Not Found")
});
try {
await setUpTestFixtures(server);
- new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
_("Confirm initial environment.");
do_check_false(steam_coll.deleted);
do_check_false(diesel_coll.deleted);
_("wipeServer() will happily ignore the non-existent collection and use the timestamp of the last DELETE that was successful.");
let timestamp = Service.wipeServer(["steam", "diesel", "petrol"]);
do_check_eq(timestamp, diesel_coll.timestamp);
@@ -89,17 +89,17 @@ add_identity_test(this, async function t
let server = httpd_setup({
"/1.1/johndoe/storage/steam": steam_coll.handler(),
"/1.1/johndoe/storage/petrol": httpd_handler(503, "Service Unavailable"),
"/1.1/johndoe/storage/diesel": diesel_coll.handler()
});
try {
await setUpTestFixtures(server);
- new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
_("Confirm initial environment.");
do_check_false(steam_coll.deleted);
do_check_false(diesel_coll.deleted);
_("wipeServer() will happily ignore the non-existent collection, delete the 'steam' collection and abort after an receiving an error on the 'petrol' collection.");
let error;
try {
@@ -137,17 +137,17 @@ add_identity_test(this, async function t
}
let server = httpd_setup({
"/1.1/johndoe/storage": storageHandler
});
await setUpTestFixtures(server);
_("Try deletion.");
- new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
let returnedTimestamp = Service.wipeServer();
do_check_true(deleted);
do_check_eq(returnedTimestamp, serverTimestamp);
await promiseStopServer(server);
Svc.Prefs.resetBranch("");
});
@@ -169,17 +169,17 @@ add_identity_test(this, async function t
}
let server = httpd_setup({
"/1.1/johndoe/storage": storageHandler
});
await setUpTestFixtures(server);
_("Try deletion.");
- new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
let returnedTimestamp = Service.wipeServer();
do_check_true(deleted);
do_check_eq(returnedTimestamp, serverTimestamp);
await promiseStopServer(server);
Svc.Prefs.resetBranch("");
});
@@ -198,17 +198,17 @@ add_identity_test(this, async function t
let server = httpd_setup({
"/1.1/johndoe/storage": storageHandler
});
await setUpTestFixtures(server);
_("Try deletion.");
let error;
try {
- new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
+ await SyncTestingInfrastructure(server, "johndoe", "irrelevant");
Service.wipeServer();
do_throw("Should have thrown!");
} catch (ex) {
error = ex;
}
do_check_eq(error.status, 503);
await promiseStopServer(server);
--- a/services/sync/tests/unit/test_syncengine.js
+++ b/services/sync/tests/unit/test_syncengine.js
@@ -5,55 +5,56 @@ Cu.import("resource://services-sync/engi
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
function makeSteamEngine() {
return new SyncEngine('Steam', Service);
}
-var server;
+var server = httpd_setup({});
+
-function test_url_attributes() {
+add_task(async function test_url_attributes() {
_("SyncEngine url attributes");
- let syncTesting = new SyncTestingInfrastructure(server);
- Service.clusterURL = "https://cluster/";
+ let syncTesting = await SyncTestingInfrastructure(server);
+ Service.clusterURL = "https://cluster/1.1/foo/";
let engine = makeSteamEngine();
try {
do_check_eq(engine.storageURL, "https://cluster/1.1/foo/storage/");
do_check_eq(engine.engineURL, "https://cluster/1.1/foo/storage/steam");
do_check_eq(engine.metaURL, "https://cluster/1.1/foo/storage/meta/global");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_syncID() {
+add_task(async function test_syncID() {
_("SyncEngine.syncID corresponds to preference");
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(Svc.Prefs.get("steam.syncID"), undefined);
// Performing the first get on the attribute will generate a new GUID.
do_check_eq(engine.syncID, "fake-guid-00");
do_check_eq(Svc.Prefs.get("steam.syncID"), "fake-guid-00");
Svc.Prefs.set("steam.syncID", Utils.makeGUID());
do_check_eq(Svc.Prefs.get("steam.syncID"), "fake-guid-01");
do_check_eq(engine.syncID, "fake-guid-01");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+})
-function test_lastSync() {
+add_task(async function test_lastSync() {
_("SyncEngine.lastSync and SyncEngine.lastSyncLocal correspond to preferences");
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(Svc.Prefs.get("steam.lastSync"), undefined);
do_check_eq(engine.lastSync, 0);
do_check_eq(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
do_check_eq(engine.lastSyncLocal, 0);
@@ -69,21 +70,21 @@ function test_lastSync() {
// resetLastSync() resets the value (and preference) to 0
engine.resetLastSync();
do_check_eq(engine.lastSync, 0);
do_check_eq(Svc.Prefs.get("steam.lastSync"), "0");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+})
-function test_toFetch() {
+add_task(async function test_toFetch() {
_("SyncEngine.toFetch corresponds to file on disk");
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
const filename = "weave/toFetch/steam.json";
let engine = makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(engine.toFetch.length, 0);
// Write file to disk
let toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
@@ -99,21 +100,21 @@ function test_toFetch() {
syncTesting.fakeFilesystem.fakeContents[filename] = JSON.stringify(toFetch);
engine.loadToFetch();
do_check_eq(engine.toFetch.length, 2);
do_check_eq(engine.toFetch[0], toFetch[0]);
do_check_eq(engine.toFetch[1], toFetch[1]);
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_previousFailed() {
+add_task(async function test_previousFailed() {
_("SyncEngine.previousFailed corresponds to file on disk");
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
const filename = "weave/failed/steam.json";
let engine = makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(engine.previousFailed.length, 0);
// Write file to disk
let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
@@ -129,21 +130,21 @@ function test_previousFailed() {
syncTesting.fakeFilesystem.fakeContents[filename] = JSON.stringify(previousFailed);
engine.loadPreviousFailed();
do_check_eq(engine.previousFailed.length, 2);
do_check_eq(engine.previousFailed[0], previousFailed[0]);
do_check_eq(engine.previousFailed[1], previousFailed[1]);
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_resetClient() {
+add_task(async function test_resetClient() {
_("SyncEngine.resetClient resets lastSync and toFetch");
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(Svc.Prefs.get("steam.lastSync"), undefined);
do_check_eq(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
do_check_eq(engine.toFetch.length, 0);
engine.lastSync = 123.45;
@@ -154,28 +155,28 @@ function test_resetClient() {
engine.resetClient();
do_check_eq(engine.lastSync, 0);
do_check_eq(engine.lastSyncLocal, 0);
do_check_eq(engine.toFetch.length, 0);
do_check_eq(engine.previousFailed.length, 0);
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_wipeServer() {
+add_task(async function test_wipeServer() {
_("SyncEngine.wipeServer deletes server data and resets the client.");
let engine = makeSteamEngine();
const PAYLOAD = 42;
let steamCollection = new ServerWBO("steam", PAYLOAD);
let server = httpd_setup({
"/1.1/foo/storage/steam": steamCollection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
do_test_pending();
try {
// Some data to reset.
engine.lastSync = 123.45;
engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
_("Wipe server data and reset client.");
@@ -183,22 +184,13 @@ function test_wipeServer() {
do_check_eq(steamCollection.payload, undefined);
do_check_eq(engine.lastSync, 0);
do_check_eq(engine.toFetch.length, 0);
} finally {
server.stop(do_test_finished);
Svc.Prefs.resetBranch("");
}
-}
+});
-function run_test() {
- server = httpd_setup({});
- test_url_attributes();
- test_syncID();
- test_lastSync();
- test_toFetch();
- test_previousFailed();
- test_resetClient();
- test_wipeServer();
-
- server.stop(run_next_test);
-}
+add_task(async function finish() {
+ await promiseStopServer(server);
+});
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -16,52 +16,50 @@ function makeRotaryEngine() {
}
function clean() {
Svc.Prefs.resetBranch("");
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
Service.recordManager.clearCache();
}
-function cleanAndGo(server) {
+async function cleanAndGo(server) {
clean();
- server.stop(run_next_test);
+ await promiseStopServer(server);
}
async function promiseClean(server) {
clean();
await promiseStopServer(server);
}
function configureService(server, username, password) {
Service.clusterURL = server.baseURI;
Service.identity.account = username || "foo";
Service.identity.basicPassword = password || "password";
}
-function createServerAndConfigureClient() {
+async function createServerAndConfigureClient() {
let engine = new RotaryEngine(Service);
let contents = {
meta: {global: {engines: {rotary: {version: engine.version,
syncID: engine.syncID}}}},
crypto: {},
rotary: {}
};
const USER = "foo";
let server = new SyncServer();
server.registerUser(USER, "password");
server.createContents(USER, contents);
server.start();
- Service.serverURL = server.baseURI;
- Service.clusterURL = server.baseURI;
- Service.identity.username = USER;
+ await SyncTestingInfrastructure(server, USER);
Service._updateCachedURLs();
return [engine, server, USER];
}
function run_test() {
generateNewKeys(Service.collectionKeys);
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
@@ -77,33 +75,33 @@ function run_test() {
* - _processIncoming()
* - _uploadOutgoing()
* - _syncFinish()
*
* In the spirit of unit testing, these are tested individually for
* different scenarios below.
*/
-add_test(function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
+add_task(async function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
_("SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record");
// Some server side data that's going to be wiped
let collection = new ServerCollection();
collection.insert('flying',
encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
collection.insert('scotsman',
encryptPayload({id: 'scotsman',
denomination: "Flying Scotsman"}));
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
let engine = makeRotaryEngine();
engine._store.items = {rekolok: "Rekonstruktionslokomotive"};
try {
// Confirm initial environment
do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
@@ -125,55 +123,55 @@ add_test(function test_syncStartup_empty
do_check_eq(engineData.syncID, engine.syncID);
// Sync was reset and server data was wiped
do_check_eq(engine.lastSync, 0);
do_check_eq(collection.payload("flying"), undefined);
do_check_eq(collection.payload("scotsman"), undefined);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_syncStartup_serverHasNewerVersion() {
+add_task(async function test_syncStartup_serverHasNewerVersion() {
_("SyncEngine._syncStartup ");
let global = new ServerWBO('global', {engines: {rotary: {version: 23456}}});
let server = httpd_setup({
"/1.1/foo/storage/meta/global": global.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
let engine = makeRotaryEngine();
try {
// The server has a newer version of the data and our engine can
// handle. That should give us an exception.
let error;
try {
engine._syncStartup();
} catch (ex) {
error = ex;
}
do_check_eq(error.failureCode, VERSION_OUT_OF_DATE);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_syncStartup_syncIDMismatchResetsClient() {
+add_task(async function test_syncStartup_syncIDMismatchResetsClient() {
_("SyncEngine._syncStartup resets sync if syncIDs don't match");
let server = sync_httpd_setup({});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
// global record with a different syncID than our engine has
let engine = makeRotaryEngine();
let global = new ServerWBO('global',
{engines: {rotary: {version: engine.version,
syncID: 'foobar'}}});
server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler());
@@ -190,46 +188,46 @@ add_test(function test_syncStartup_syncI
// The engine has assumed the server's syncID
do_check_eq(engine.syncID, 'foobar');
// Sync was reset
do_check_eq(engine.lastSync, 0);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_emptyServer() {
+add_task(async function test_processIncoming_emptyServer() {
_("SyncEngine._processIncoming working with an empty server backend");
let collection = new ServerCollection();
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
let engine = makeRotaryEngine();
try {
// Merely ensure that this code path is run without any errors
engine._processIncoming();
do_check_eq(engine.lastSync, 0);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_createFromServer() {
+add_task(async function test_processIncoming_createFromServer() {
_("SyncEngine._processIncoming creates new records from server data");
// Some server records that will be downloaded
let collection = new ServerCollection();
collection.insert('flying',
encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
collection.insert('scotsman',
@@ -242,17 +240,17 @@ add_test(function test_processIncoming_c
collection.insert('../pathological', pathologicalPayload);
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler(),
"/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
@@ -275,22 +273,22 @@ add_test(function test_processIncoming_c
do_check_true(engine.lastModified > 0);
// Local records have been created from the server data.
do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
do_check_eq(engine._store.items['../pathological'], "Pathological Case");
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_reconcile() {
+add_task(async function test_processIncoming_reconcile() {
_("SyncEngine._processIncoming updates local records");
let collection = new ServerCollection();
// This server record is newer than the corresponding client one,
// so it'll update its data.
collection.insert('newrecord',
encryptPayload({id: 'newrecord',
@@ -327,17 +325,17 @@ add_test(function test_processIncoming_r
encryptPayload({id: 'nukeme',
denomination: "Nuke me!",
deleted: true}));
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
let engine = makeRotaryEngine();
engine._store.items = {newerserver: "New data, but not as new as server!",
olderidentical: "Older but identical",
updateclient: "Got data?",
original: "Original Entry",
long_original: "Long Original Entry",
@@ -386,26 +384,26 @@ add_test(function test_processIncoming_r
// The incoming ID is preferred.
do_check_eq(engine._store.items.original, undefined);
do_check_eq(engine._store.items.duplication, "Original Entry");
do_check_neq(engine._delete.ids.indexOf("original"), -1);
// The 'nukeme' record marked as deleted is removed.
do_check_eq(engine._store.items.nukeme, undefined);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_reconcile_local_deleted() {
+add_task(async function test_processIncoming_reconcile_local_deleted() {
_("Ensure local, duplicate ID is deleted on server.");
// When a duplicate is resolved, the local ID (which is never taken) should
// be deleted on the server.
- let [engine, server, user] = createServerAndConfigureClient();
+ let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
@@ -422,50 +420,50 @@ add_test(function test_processIncoming_r
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
do_check_neq(undefined, collection.wbo("DUPE_INCOMING"));
- cleanAndGo(server);
+ await cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_equivalent() {
+add_task(async function test_processIncoming_reconcile_equivalent() {
_("Ensure proper handling of incoming records that match local.");
- let [engine, server, user] = createServerAndConfigureClient();
+ let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "entry", denomination: "denomination"});
let wbo = new ServerWBO("entry", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.items = {entry: "denomination"};
do_check_true(engine._store.itemExists("entry"));
engine._sync();
do_check_attribute_count(engine._store.items, 1);
- cleanAndGo(server);
+ await cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_locally_deleted_dupe_new() {
+add_task(async function test_processIncoming_reconcile_locally_deleted_dupe_new() {
_("Ensure locally deleted duplicate record newer than incoming is handled.");
// This is a somewhat complicated test. It ensures that if a client receives
// a modified record for an item that is deleted locally but with a different
// ID that the incoming record is ignored. This is a corner case for record
// handling, but it needs to be supported.
- let [engine, server, user] = createServerAndConfigureClient();
+ let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
@@ -484,26 +482,26 @@ add_test(function test_processIncoming_r
do_check_empty(engine._store.items);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(null, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_true(payload.deleted);
- cleanAndGo(server);
+ await cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_locally_deleted_dupe_old() {
+add_task(async function test_processIncoming_reconcile_locally_deleted_dupe_old() {
_("Ensure locally deleted duplicate record older than incoming is restored.");
// This is similar to the above test except it tests the condition where the
// incoming record is newer than the local deletion, therefore overriding it.
- let [engine, server, user] = createServerAndConfigureClient();
+ let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
@@ -523,23 +521,23 @@ add_test(function test_processIncoming_r
do_check_eq("incoming", engine._store.items.DUPE_INCOMING);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("incoming", payload.denomination);
- cleanAndGo(server);
+ await cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_changed_dupe() {
+add_task(async function test_processIncoming_reconcile_changed_dupe() {
_("Ensure that locally changed duplicate record is handled properly.");
- let [engine, server, user] = createServerAndConfigureClient();
+ let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
// The local record is newer than the incoming one, so it should be retained.
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
@@ -560,25 +558,25 @@ add_test(function test_processIncoming_r
// have its payload set to what was in the local record.
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(undefined, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("local", payload.denomination);
- cleanAndGo(server);
+ await cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_changed_dupe_new() {
+add_task(async function test_processIncoming_reconcile_changed_dupe_new() {
_("Ensure locally changed duplicate record older than incoming is ignored.");
// This test is similar to the above except the incoming record is younger
// than the local record. The incoming record should be authoritative.
- let [engine, server, user] = createServerAndConfigureClient();
+ let [engine, server, user] = await createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
@@ -597,20 +595,20 @@ add_test(function test_processIncoming_r
// On the server, the local ID should be deleted and the incoming ID should
// have its payload retained.
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(undefined, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("incoming", payload.denomination);
- cleanAndGo(server);
+ await cleanAndGo(server);
});
-add_test(function test_processIncoming_mobile_batchSize() {
+add_task(async function test_processIncoming_mobile_batchSize() {
_("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients");
Svc.Prefs.set("client.type", "mobile");
Service.identity.username = "foo";
// A collection that logs each GET
let collection = new ServerCollection();
collection.get_log = [];
@@ -629,17 +627,17 @@ add_test(function test_processIncoming_m
wbo.modified = Date.now()/1000 - 60*(i+10);
collection.insertWBO(wbo);
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
@@ -666,17 +664,17 @@ add_test(function test_processIncoming_m
do_check_eq(collection.get_log[i+1].limit, undefined);
if (i < Math.floor(234 / MOBILE_BATCH_SIZE))
do_check_eq(collection.get_log[i+1].ids.length, MOBILE_BATCH_SIZE);
else
do_check_eq(collection.get_log[i+1].ids.length, 234 % MOBILE_BATCH_SIZE);
}
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
add_task(async function test_processIncoming_store_toFetch() {
_("If processIncoming fails in the middle of a batch on mobile, state is saved in toFetch and lastSync.");
Service.identity.username = "foo";
Svc.Prefs.set("client.type", "mobile");
@@ -704,17 +702,17 @@ add_task(async function test_processInco
let engine = makeRotaryEngine();
engine.enabled = true;
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
@@ -738,17 +736,17 @@ add_task(async function test_processInco
do_check_eq(engine.lastSync, collection.wbo("record-no-99").modified);
} finally {
await promiseClean(server);
}
});
-add_test(function test_processIncoming_resume_toFetch() {
+add_task(async function test_processIncoming_resume_toFetch() {
_("toFetch and previousFailed items left over from previous syncs are fetched on the next sync, along with new items.");
Service.identity.username = "foo";
const LASTSYNC = Date.now() / 1000;
// Server records that will be downloaded
let collection = new ServerCollection();
collection.insert('flying',
@@ -777,17 +775,17 @@ add_test(function test_processIncoming_r
engine.lastSync = LASTSYNC;
engine.toFetch = ["flying", "scotsman"];
engine.previousFailed = ["failed0", "failed1", "failed2"];
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
@@ -802,22 +800,22 @@ add_test(function test_processIncoming_r
do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
do_check_eq(engine._store.items.rekolok, "Rekonstruktionslokomotive");
do_check_eq(engine._store.items.failed0, "Record No. 0");
do_check_eq(engine._store.items.failed1, "Record No. 1");
do_check_eq(engine._store.items.failed2, "Record No. 2");
do_check_eq(engine.previousFailed.length, 0);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_applyIncomingBatchSize_smaller() {
+add_task(async function test_processIncoming_applyIncomingBatchSize_smaller() {
_("Ensure that a number of incoming items less than applyIncomingBatchSize is still applied.");
Service.identity.username = "foo";
// Engine that doesn't like the first and last record it's given.
const APPLY_BATCH_SIZE = 10;
let engine = makeRotaryEngine();
engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
@@ -835,17 +833,17 @@ add_test(function test_processIncoming_a
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
@@ -857,22 +855,22 @@ add_test(function test_processIncoming_a
// Records have been applied and the expected failures have failed.
do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE - 1 - 2);
do_check_eq(engine.toFetch.length, 0);
do_check_eq(engine.previousFailed.length, 2);
do_check_eq(engine.previousFailed[0], "record-no-0");
do_check_eq(engine.previousFailed[1], "record-no-8");
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_applyIncomingBatchSize_multiple() {
+add_task(async function test_processIncoming_applyIncomingBatchSize_multiple() {
_("Ensure that incoming items are applied according to applyIncomingBatchSize.");
Service.identity.username = "foo";
const APPLY_BATCH_SIZE = 10;
// Engine that applies records in batches.
let engine = makeRotaryEngine();
engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
@@ -891,17 +889,17 @@ add_test(function test_processIncoming_a
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
@@ -910,22 +908,22 @@ add_test(function test_processIncoming_a
engine._syncStartup();
engine._processIncoming();
// Records have been applied in 3 batches.
do_check_eq(batchCalls, 3);
do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE * 3);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_notify_count() {
+add_task(async function test_processIncoming_notify_count() {
_("Ensure that failed records are reported only once.");
Service.identity.username = "foo";
const APPLY_BATCH_SIZE = 5;
const NUMBER_OF_RECORDS = 15;
// Engine that fails the first record.
let engine = makeRotaryEngine();
@@ -943,17 +941,17 @@ add_test(function test_processIncoming_n
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment.
do_check_eq(engine.lastSync, 0);
@@ -999,22 +997,22 @@ add_test(function test_processIncoming_n
do_check_eq(called, 2);
do_check_eq(counts.failed, 1);
do_check_eq(counts.applied, 3);
do_check_eq(counts.newFailed, 0);
do_check_eq(counts.succeeded, 2);
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_previousFailed() {
+add_task(async function test_processIncoming_previousFailed() {
_("Ensure that failed records are retried.");
Service.identity.username = "foo";
Svc.Prefs.set("client.type", "mobile");
const APPLY_BATCH_SIZE = 4;
const NUMBER_OF_RECORDS = 14;
// Engine that fails the first 2 records.
@@ -1033,17 +1031,17 @@ add_test(function test_processIncoming_p
let payload = encryptPayload({id: id, denomination: "Record No. " + i});
collection.insert(id, payload);
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment.
do_check_eq(engine.lastSync, 0);
@@ -1085,22 +1083,22 @@ add_test(function test_processIncoming_p
do_check_eq(engine.previousFailed[3], "record-no-9");
// Refetched items that didn't fail the second time are in engine._store.items.
do_check_eq(engine._store.items['record-no-4'], "Record No. 4");
do_check_eq(engine._store.items['record-no-5'], "Record No. 5");
do_check_eq(engine._store.items['record-no-12'], "Record No. 12");
do_check_eq(engine._store.items['record-no-13'], "Record No. 13");
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_processIncoming_failed_records() {
+add_task(async function test_processIncoming_failed_records() {
_("Ensure that failed records from _reconcile and applyIncomingBatch are refetched.");
Service.identity.username = "foo";
// Let's create three and a bit batches worth of server side records.
let collection = new ServerCollection();
const NUMBER_OF_RECORDS = MOBILE_BATCH_SIZE * 3 + 5;
for (let i = 0; i < NUMBER_OF_RECORDS; i++) {
let id = 'record-no-' + i;
@@ -1149,17 +1147,17 @@ add_test(function test_processIncoming_f
uris.push(req.path + "?" + req.queryString);
return h(req, res);
};
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": recording_handler(collection)
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
@@ -1219,17 +1217,17 @@ add_test(function test_processIncoming_f
// If we're on mobile, that limit is used by default.
_("Test batching with tiny mobile batch size.");
Svc.Prefs.set("client.type", "mobile");
engine.mobileGUIDFetchBatchSize = 2;
do_check_eq(batchDownload(BOGUS_RECORDS.length), 4);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
add_task(async function test_processIncoming_decrypt_failed() {
_("Ensure that records failing to decrypt are either replaced or refetched.");
Service.identity.username = "foo";
@@ -1262,17 +1260,17 @@ add_task(async function test_processInco
engine.enabled = true;
engine._store.items = {nojson: "Valid JSON",
nodecrypt: "Valid ciphertext"};
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial state
@@ -1305,31 +1303,31 @@ add_task(async function test_processInco
do_check_eq(observerSubject.failed, 4);
} finally {
await promiseClean(server);
}
});
-add_test(function test_uploadOutgoing_toEmptyServer() {
+add_task(async function test_uploadOutgoing_toEmptyServer() {
_("SyncEngine._uploadOutgoing uploads new records to server");
Service.identity.username = "foo";
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO('flying');
collection._wbos.scotsman = new ServerWBO('scotsman');
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler(),
"/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman"};
// Mark one of these records as changed
engine._tracker.addChangedID('scotsman', 0);
@@ -1359,33 +1357,33 @@ add_test(function test_uploadOutgoing_to
do_check_eq(JSON.parse(collection.wbo("scotsman").data.ciphertext).id,
"scotsman");
do_check_eq(engine._tracker.changedIDs["scotsman"], undefined);
// The 'flying' record wasn't marked so it wasn't uploaded
do_check_eq(collection.payload("flying"), undefined);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_uploadOutgoing_huge() {
+add_task(async function test_uploadOutgoing_huge() {
Service.identity.username = "foo";
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO('flying');
collection._wbos.scotsman = new ServerWBO('scotsman');
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler(),
"/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
engine.allowSkippedRecord = true;
engine.lastSync = 1;
engine._store.items = { flying: "a".repeat(1024 * 1024) };
engine._tracker.addChangedID("flying", 1000);
@@ -1424,17 +1422,17 @@ add_task(async function test_uploadOutgo
// We only define the "flying" WBO on the server, not the "scotsman"
// and "peppercorn" ones.
collection._wbos.flying = new ServerWBO('flying');
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman",
peppercorn: "Peppercorn Class"};
// Mark these records as changed
const FLYING_CHANGED = 12345;
@@ -1477,17 +1475,17 @@ add_task(async function test_uploadOutgo
await promiseClean(server);
}
});
/* A couple of "functional" tests to ensure we split records into appropriate
POST requests. More comprehensive unit-tests for this "batching" are in
test_postqueue.js.
*/
-add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
+add_task(async function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
_("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
Service.identity.username = "foo";
let collection = new ServerCollection();
// Let's count how many times the client posts to the server
var noOfUploads = 0;
collection.post = (function(orig) {
@@ -1519,17 +1517,17 @@ add_test(function test_uploadOutgoing_MA
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
try {
// Confirm initial environment.
do_check_eq(noOfUploads, 0);
engine._syncStartup();
engine._uploadOutgoing();
@@ -1538,21 +1536,21 @@ add_test(function test_uploadOutgoing_MA
for (i = 0; i < 234; i++) {
do_check_true(!!collection.payload('record-no-' + i));
}
// Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS.
do_check_eq(noOfUploads, Math.ceil(234/MAX_UPLOAD_RECORDS));
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_uploadOutgoing_largeRecords() {
+add_task(async function test_uploadOutgoing_largeRecords() {
_("SyncEngine._uploadOutgoing throws on records larger than MAX_UPLOAD_BYTES");
Service.identity.username = "foo";
let collection = new ServerCollection();
let engine = makeRotaryEngine();
engine._store.items["large-item"] = "Y".repeat(MAX_UPLOAD_BYTES*2);
@@ -1564,51 +1562,51 @@ add_test(function test_uploadOutgoing_la
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
try {
engine._syncStartup();
let error = null;
try {
engine._uploadOutgoing();
} catch (e) {
error = e;
}
ok(!!error);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_syncFinish_noDelete() {
+add_task(async function test_syncFinish_noDelete() {
_("SyncEngine._syncFinish resets tracker's score");
let server = httpd_setup({});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
engine._delete = {}; // Nothing to delete
engine._tracker.score = 100;
// _syncFinish() will reset the engine's score.
engine._syncFinish();
do_check_eq(engine.score, 0);
server.stop(run_next_test);
});
-add_test(function test_syncFinish_deleteByIds() {
+add_task(async function test_syncFinish_deleteByIds() {
_("SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs).");
Service.identity.username = "foo";
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
'flying', encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
collection._wbos.scotsman = new ServerWBO(
@@ -1616,39 +1614,39 @@ add_test(function test_syncFinish_delete
denomination: "Flying Scotsman"}));
collection._wbos.rekolok = new ServerWBO(
'rekolok', encryptPayload({id: 'rekolok',
denomination: "Rekonstruktionslokomotive"}));
let server = httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
try {
engine._delete = {ids: ['flying', 'rekolok']};
engine._syncFinish();
// The 'flying' and 'rekolok' records were deleted while the
// 'scotsman' one wasn't.
do_check_eq(collection.payload("flying"), undefined);
do_check_true(!!collection.payload("scotsman"));
do_check_eq(collection.payload("rekolok"), undefined);
// The deletion todo list has been reset.
do_check_eq(engine._delete.ids, undefined);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_syncFinish_deleteLotsInBatches() {
+add_task(async function test_syncFinish_deleteLotsInBatches() {
_("SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs).");
Service.identity.username = "foo";
let collection = new ServerCollection();
// Let's count how many times the client does a DELETE request to the server
var noOfUploads = 0;
collection.delete = (function(orig) {
@@ -1667,17 +1665,17 @@ add_test(function test_syncFinish_delete
wbo.modified = now / 1000 - 60 * (i + 110);
collection.insertWBO(wbo);
}
let server = httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
try {
// Confirm initial environment
do_check_eq(noOfUploads, 0);
// Declare what we want to have deleted: all records no. 100 and
@@ -1704,31 +1702,31 @@ add_test(function test_syncFinish_delete
// The deletion was done in batches
do_check_eq(noOfUploads, 2 + 1);
// The deletion todo list has been reset.
do_check_eq(engine._delete.ids, undefined);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
add_task(async function test_sync_partialUpload() {
_("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
Service.identity.username = "foo";
let collection = new ServerCollection();
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let engine = makeRotaryEngine();
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine.lastSyncLocal = 456;
// Let the third upload fail completely
var noOfUploads = 0;
@@ -1784,71 +1782,71 @@ add_task(async function test_sync_partia
do_check_false(id in engine._tracker.changedIDs);
}
} finally {
await promiseClean(server);
}
});
-add_test(function test_canDecrypt_noCryptoKeys() {
+add_task(async function test_canDecrypt_noCryptoKeys() {
_("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection.");
Service.identity.username = "foo";
// Wipe collection keys so we can test the desired scenario.
Service.collectionKeys.clear();
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
'flying', encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
try {
do_check_false(engine.canDecrypt());
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_canDecrypt_true() {
+add_task(async function test_canDecrypt_true() {
_("SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server.");
Service.identity.username = "foo";
generateNewKeys(Service.collectionKeys);
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
'flying', encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let engine = makeRotaryEngine();
try {
do_check_true(engine.canDecrypt());
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
}
});
-add_test(function test_syncapplied_observer() {
+add_task(async function test_syncapplied_observer() {
Service.identity.username = "foo";
const NUMBER_OF_RECORDS = 10;
let engine = makeRotaryEngine();
// Create a batch of server side records.
let collection = new ServerCollection();
@@ -1857,17 +1855,17 @@ add_test(function test_syncapplied_obser
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
let server = httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
let numApplyCalls = 0;
let engine_name;
@@ -1890,13 +1888,13 @@ add_test(function test_syncapplied_obser
do_check_attribute_count(engine._store.items, 10);
do_check_eq(numApplyCalls, 1);
do_check_eq(engine_name, "rotary");
do_check_eq(count.applied, 10);
do_check_true(Service.scheduler.hasIncomingItems);
} finally {
- cleanAndGo(server);
+ await cleanAndGo(server);
Service.scheduler.hasIncomingItems = false;
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
}
});
--- a/services/sync/tests/unit/test_syncscheduler.js
+++ b/services/sync/tests/unit/test_syncscheduler.js
@@ -53,19 +53,17 @@ function sync_httpd_setup() {
"/1.1/johndoe/storage/crypto/keys":
upd("crypto", (new ServerWBO("keys")).handler()),
"/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
"/user/1.0/johndoe/node/weave": httpd_handler(200, "OK", "null")
});
}
async function setUp(server) {
- await configureIdentity({username: "johndoe"});
-
- Service.clusterURL = server.baseURI + "/";
+ await configureIdentity({username: "johndoe"}, server);
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
let result = serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success;
return result;
}
@@ -687,23 +685,28 @@ add_identity_test(this, async function t
});
add_identity_test(this, async function test_no_sync_node() {
// Test when Status.sync == NO_SYNC_NODE_FOUND
// it is not overwritten on sync:finish
let server = sync_httpd_setup();
await setUp(server);
- Service.serverURL = server.baseURI + "/";
+ oldfc = Service._clusterManager._findCluster;
+ Service._clusterManager._findCluster = () => null;
+ Service.clusterURL = "";
+ try {
+ Service.sync();
+ do_check_eq(Status.sync, NO_SYNC_NODE_FOUND);
+ do_check_eq(scheduler.syncTimer.delay, NO_SYNC_NODE_INTERVAL);
- Service.sync();
- do_check_eq(Status.sync, NO_SYNC_NODE_FOUND);
- do_check_eq(scheduler.syncTimer.delay, NO_SYNC_NODE_INTERVAL);
-
- await cleanUpAndGo(server);
+ await cleanUpAndGo(server);
+ } finally {
+ Service._clusterManager._findCluster = oldfc;
+ }
});
add_identity_test(this, async function test_sync_failed_partial_500s() {
_("Test a 5xx status calls handleSyncError.");
scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
let server = sync_httpd_setup();
let engine = Service.engineManager.get("catapult");
--- a/services/sync/tests/unit/test_tab_engine.js
+++ b/services/sync/tests/unit/test_tab_engine.js
@@ -44,17 +44,17 @@ add_test(function test_getOpenURLs() {
_(" test matching works (too long)");
matches = openurlsset.has(superLongURL);
ok(!matches);
run_next_test();
});
-add_test(function test_tab_engine_skips_incoming_local_record() {
+add_task(async function test_tab_engine_skips_incoming_local_record() {
_("Ensure incoming records that match local client ID are never applied.");
let [engine, store] = getMocks();
let localID = engine.service.clientsEngine.localID;
let apply = store.applyIncoming;
let applied = [];
store.applyIncoming = function (record) {
notEqual(record.id, localID, "Only apply tab records from remote clients");
@@ -73,37 +73,40 @@ add_test(function test_tab_engine_skips_
let remoteRecord = encryptPayload({id: remoteID, clientName: "not local"});
collection.insert(remoteID, remoteRecord);
_("Setting up Sync server");
let server = sync_httpd_setup({
"/1.1/foo/storage/tabs": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ let syncTesting = await SyncTestingInfrastructure(server);
Service.identity.username = "foo";
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {tabs: {version: engine.version,
syncID: engine.syncID}};
generateNewKeys(Service.collectionKeys);
- let syncFinish = engine._syncFinish;
- engine._syncFinish = function () {
- equal(applied.length, 1, "Remote client record was applied");
- equal(applied[0].id, remoteID, "Remote client ID matches");
+ let promiseFinished = new Promise(resolve => {
+ let syncFinish = engine._syncFinish;
+ engine._syncFinish = function () {
+ equal(applied.length, 1, "Remote client record was applied");
+ equal(applied[0].id, remoteID, "Remote client ID matches");
- syncFinish.call(engine);
- run_next_test();
- }
+ syncFinish.call(engine);
+ resolve();
+ }
+ });
_("Start sync");
engine._sync();
+ await promiseFinished;
});
add_test(function test_reconcile() {
let [engine, store] = getMocks();
_("Setup engine for reconciling");
engine._syncStartup();
--- a/services/sync/tests/unit/test_telemetry.js
+++ b/services/sync/tests/unit/test_telemetry.js
@@ -96,17 +96,17 @@ add_identity_test(this, async function t
add_task(async function test_processIncoming_error() {
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bookmarks: {version: engine.version,
syncID: engine.syncID}}}},
bookmarks: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
try {
// Create a bogus record that when synced down will provoke a
// network error which in turn provokes an exception in _processIncoming.
const BOGUS_GUID = "zzzzzzzzzzzz";
let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
bogus_record.get = function get() {
throw "Sync this!";
@@ -119,17 +119,17 @@ add_task(async function test_processInco
let error, ping;
try {
await sync_engine_and_validate_telem(engine, true, errPing => ping = errPing);
} catch(ex) {
error = ex;
}
ok(!!error);
ok(!!ping);
- equal(ping.uid, "0".repeat(32));
+ equal(ping.uid, "f".repeat(32)); // as setup by SyncTestingInfrastructure
deepEqual(ping.failureReason, {
name: "othererror",
error: "error.engine.reason.record_download_fail"
});
equal(ping.engines.length, 1);
equal(ping.engines[0].name, "bookmarks");
deepEqual(ping.engines[0].failureReason, {
@@ -146,17 +146,17 @@ add_task(async function test_processInco
add_task(async function test_uploading() {
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bookmarks: {version: engine.version,
syncID: engine.syncID}}}},
bookmarks: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let parent = PlacesUtils.toolbarFolderId;
let uri = Utils.makeURI("http://getfirefox.com/");
let title = "Get Firefox";
let bmk_id = PlacesUtils.bookmarks.insertBookmark(parent, uri,
PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
@@ -195,17 +195,17 @@ add_task(async function test_upload_fail
Service.identity.username = "foo";
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO('flying');
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ await SyncTestingInfrastructure(server);
let engine = new RotaryEngine(Service);
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine.lastSyncLocal = 456;
engine._store.items = {
flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman",
peppercorn: "Peppercorn Class"
@@ -243,17 +243,17 @@ add_task(async function test_upload_fail
add_task(async function test_sync_partialUpload() {
Service.identity.username = "foo";
let collection = new ServerCollection();
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
- let syncTesting = new SyncTestingInfrastructure(server);
+ await SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
let engine = new RotaryEngine(Service);
engine.lastSync = 123;
engine.lastSyncLocal = 456;
// Create a bunch of records (and server side handlers)
@@ -326,17 +326,17 @@ add_task(async function test_generic_eng
let engine = Service.engineManager.get("steam");
engine.enabled = true;
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {steam: {version: engine.version,
syncID: engine.syncID}}}},
steam: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
let e = new Error("generic failure message")
engine._errToThrow = e;
try {
let ping = await sync_and_validate_telem(true);
equal(ping.status.service, SYNC_FAILED_PARTIAL);
deepEqual(ping.engines.find(e => e.name === "steam").failureReason, {
name: "unexpectederror",
@@ -353,17 +353,17 @@ add_task(async function test_engine_fail
let engine = Service.engineManager.get("steam");
engine.enabled = true;
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {steam: {version: engine.version,
syncID: engine.syncID}}}},
steam: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
// create an IOError to re-throw as part of Sync.
try {
// (Note that fakeservices.js has replaced Utils.jsonMove etc, but for
// this test we need the real one so we get real exceptions from the
// filesystem.)
await Utils._real_jsonMove("file-does-not-exist", "anything", {});
} catch (ex) {
engine._errToThrow = ex;
@@ -393,17 +393,17 @@ add_task(async function test_initial_syn
// These are the only ones who actually have things to sync at startup.
let engineNames = ["clients", "bookmarks", "prefs", "tabs"];
let conf = { meta: { global: { engines } } };
for (let e of engineNames) {
engines[e] = { version: engine.version, syncID: engine.syncID };
conf[e] = {};
}
let server = serverForUsers({"foo": "password"}, conf);
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
try {
let ping = await wait_for_ping(() => Service.sync(), true);
equal(ping.engines.find(e => e.name === "clients").outgoing[0].sent, 1);
equal(ping.engines.find(e => e.name === "tabs").outgoing[0].sent, 1);
// for the rest we don't care about specifics
for (let e of ping.engines) {
@@ -426,17 +426,17 @@ add_task(async function test_nserror() {
let engine = Service.engineManager.get("steam");
engine.enabled = true;
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {steam: {version: engine.version,
syncID: engine.syncID}}}},
steam: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
engine._errToThrow = Components.Exception("NS_ERROR_UNKNOWN_HOST", Cr.NS_ERROR_UNKNOWN_HOST);
try {
let ping = await sync_and_validate_telem(true);
deepEqual(ping.status, {
service: SYNC_FAILED_PARTIAL,
sync: LOGIN_FAILED_NETWORK_ERROR
});
let enginePing = ping.engines.find(e => e.name === "steam");
@@ -456,31 +456,30 @@ add_identity_test(this, async function t
let telem = get_sync_test_telemetry();
telem.maxPayloadCount = 2;
telem.submissionInterval = Infinity;
let oldSubmit = telem.submit;
let server;
try {
- await configureIdentity({ username: "johndoe" });
let handlers = {
"/1.1/johndoe/info/collections": helper.handler,
"/1.1/johndoe/storage/crypto/keys": upd("crypto", new ServerWBO("keys").handler()),
"/1.1/johndoe/storage/meta/global": upd("meta", new ServerWBO("global").handler())
};
let collections = ["clients", "bookmarks", "forms", "history", "passwords", "prefs", "tabs"];
for (let coll of collections) {
handlers["/1.1/johndoe/storage/" + coll] = upd(coll, new ServerCollection({}, true).handler());
}
server = httpd_setup(handlers);
- Service.serverURL = server.baseURI;
+ await configureIdentity({ username: "johndoe" }, server);
telem.submit = () => ok(false, "Submitted telemetry ping when we should not have");
for (let i = 0; i < 5; ++i) {
Service.sync();
}
telem.submit = oldSubmit;
telem.submissionInterval = -1;
let ping = await sync_and_validate_telem(true, true); // with this we've synced 6 times
@@ -501,17 +500,17 @@ add_task(async function test_no_foreign_
let engine = Service.engineManager.get("bogus");
engine.enabled = true;
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bogus: {version: engine.version, syncID: engine.syncID}}}},
steam: {}
});
engine._errToThrow = new Error("Oh no!");
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
try {
let ping = await sync_and_validate_telem(true);
equal(ping.status.service, SYNC_FAILED_PARTIAL);
ok(ping.engines.every(e => e.name !== "bogus"));
} finally {
Service.engineManager.unregister(engine);
await cleanAndGo(server);
}
@@ -522,17 +521,17 @@ add_task(async function test_sql_error()
let engine = Service.engineManager.get("steam");
engine.enabled = true;
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {steam: {version: engine.version,
syncID: engine.syncID}}}},
steam: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
engine._sync = function() {
// Just grab a DB connection and issue a bogus SQL statement synchronously.
let db = PlacesUtils.history.QueryInterface(Ci.nsPIPlacesDatabase).DBConnection;
Async.querySpinningly(db.createAsyncStatement("select bar from foo"));
};
try {
let ping = await sync_and_validate_telem(true);
let enginePing = ping.engines.find(e => e.name === "steam");
@@ -548,17 +547,17 @@ add_task(async function test_no_foreign_
let engine = Service.engineManager.get("bogus");
engine.enabled = true;
let store = engine._store;
let server = serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bogus: {version: engine.version, syncID: engine.syncID}}}},
steam: {}
});
- new SyncTestingInfrastructure(server.server);
+ await SyncTestingInfrastructure(server);
try {
let ping = await sync_and_validate_telem();
ok(ping.engines.every(e => e.name !== "bogus"));
} finally {
Service.engineManager.unregister(engine);
await cleanAndGo(server);
}
});
\ No newline at end of file