--- a/services/common/async.js
+++ b/services/common/async.js
@@ -136,41 +136,56 @@ this.Async = {
* Check if the passed exception is one raised by checkAppReady. Typically
* this will be used in exception handlers to allow such exceptions to
* make their way to the top frame and allow the app to actually terminate.
*/
isShutdownException(exception) {
return exception && exception.appIsShuttingDown === true;
},
+ /*
+ * Wait for a promise to resolve while spinning an event loop.
+ * This is an interim step on the way to removing call nested event-loop
+ * spinning in Sync, and should eventually be removed.
+ */
+ promiseSpinningly: function(promise) {
+ let cb = Async.makeSpinningCallback();
+ promise.then(
+ result => cb(null, result),
+ error => cb(error)
+ );
+ return cb.wait();
+ },
+
/**
* Return the two things you need to make an asynchronous call synchronous
* by spinning the event loop.
*/
makeSpinningCallback: function makeSpinningCallback() {
let cb = Async.makeSyncCallback();
function callback(error, ret) {
if (error)
cb.throw(error);
else
cb(ret);
}
callback.wait = () => Async.waitForSyncCallback(cb);
return callback;
},
- // Prototype for mozIStorageCallback, used in querySpinningly.
+ // Prototype for mozIStorageCallback, used in promiseQuery.
// This allows us to define the handle* functions just once rather
- // than on every querySpinningly invocation.
+ // than on every invocation.
_storageCallbackPrototype: {
results: null,
- // These are set by queryAsync.
+ // These are set by promiseQuery.
names: null,
- syncCb: null,
+ resolve: null,
+ reject: null,
handleResult: function handleResult(results) {
if (!this.names) {
return;
}
if (!this.results) {
this.results = [];
}
@@ -179,36 +194,49 @@ this.Async = {
let item = {};
for each (let name in this.names) {
item[name] = row.getResultByName(name);
}
this.results.push(item);
}
},
handleError: function handleError(error) {
- this.syncCb.throw(error);
+ this.reject(error);
},
handleCompletion: function handleCompletion(reason) {
// If we got an error, handleError will also have been called, so don't
// call the callback! We never cancel statements, so we don't need to
// address that quandary.
if (reason == REASON_ERROR)
return;
// If we were called with column names but didn't find any results,
// the calling code probably still expects an array as a return value.
if (this.names && !this.results) {
this.results = [];
}
- this.syncCb(this.results);
+ this.resolve(this.results);
}
},
- querySpinningly: function querySpinningly(query, names) {
- // 'Synchronously' asyncExecute, fetching all results by name.
- let storageCallback = Object.create(Async._storageCallbackPrototype);
- storageCallback.names = names;
- storageCallback.syncCb = Async.makeSyncCallback();
- query.executeAsync(storageCallback);
- return Async.waitForSyncCallback(storageCallback.syncCb);
+ promiseQuery: function (query, names) {
+ // asyncExecute, fetching all results by name.
+ return new Promise((resolve, reject) => {
+ let storageCallback = Object.create(Async._storageCallbackPrototype);
+ storageCallback.names = names;
+ storageCallback.resolve = resolve;
+ storageCallback.reject = reject;
+ query.executeAsync(storageCallback);
+ });
},
+
+ /**
+ * A "tight loop" of promises can still lock up the browser for some time.
+ * Periodically waiting for a promise returned by this function will solve
+ * that.
+ **/
+ promiseYield() {
+ return new Promise(resolve => {
+ Services.tm.currentThread.dispatch(resolve, Ci.nsIThread.DISPATCH_NORMAL);
+ })
+ }
};
--- a/services/common/tests/unit/head_helpers.js
+++ b/services/common/tests/unit/head_helpers.js
@@ -101,16 +101,22 @@ function httpd_handler(statusCode, statu
response.setStatusLine(request.httpVersion, statusCode, status);
if (body) {
response.bodyOutputStream.write(body, body.length);
}
};
}
+function promiseStopServer(server) {
+ return new Promise(resolve => {
+ server.stop(resolve);
+ });
+}
+
/*
* Read bytes string from an nsIInputStream. If 'count' is omitted,
* all available input is read.
*/
function readBytesFromInputStream(inputStream, count) {
return CommonUtils.readBytesFromInputStream(inputStream, count);
}
--- a/services/common/tests/unit/test_async_querySpinningly.js
+++ b/services/common/tests/unit/test_async_querySpinningly.js
@@ -1,103 +1,108 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://services-common/async.js");
Cu.import("resource://services-common/utils.js");
-_("Make sure querySpinningly will synchronously fetch rows for a query asyncly");
+// XXX - note that this file used to test 'querySpinningly', but since the
+// move to a task/promise based sync, the equivalent routine, and what we are
+// testing here, is promiseQuery.
+_("Make sure promiseQuery will promisely fetch rows for a query asyncly");
const SQLITE_CONSTRAINT_VIOLATION = 19; // http://www.sqlite.org/c3ref/c_abort.html
var Svc = {};
XPCOMUtils.defineLazyServiceGetter(Svc, "Form",
"@mozilla.org/satchel/form-history;1",
"nsIFormHistory2");
-function querySpinningly(query, names) {
+function* promiseQuery(query, names) {
let q = Svc.Form.DBConnection.createStatement(query);
- let r = Async.querySpinningly(q, names);
+ let r = yield Async.promiseQuery(q, names);
q.finalize();
return r;
}
function run_test() {
initTestLogging("Trace");
+}
+add_task(function* () {
_("Make sure the call is async and allows other events to process");
let isAsync = false;
CommonUtils.nextTick(function() { isAsync = true; });
do_check_false(isAsync);
_("Empty out the formhistory table");
- let r0 = querySpinningly("DELETE FROM moz_formhistory");
+ let r0 = yield promiseQuery("DELETE FROM moz_formhistory");
do_check_eq(r0, null);
_("Make sure there's nothing there");
- let r1 = querySpinningly("SELECT 1 FROM moz_formhistory");
+ let r1 = yield promiseQuery("SELECT 1 FROM moz_formhistory");
do_check_eq(r1, null);
_("Insert a row");
- let r2 = querySpinningly("INSERT INTO moz_formhistory (fieldname, value) VALUES ('foo', 'bar')");
+ let r2 = yield promiseQuery("INSERT INTO moz_formhistory (fieldname, value) VALUES ('foo', 'bar')");
do_check_eq(r2, null);
_("Request a known value for the one row");
- let r3 = querySpinningly("SELECT 42 num FROM moz_formhistory", ["num"]);
+ let r3 = yield promiseQuery("SELECT 42 num FROM moz_formhistory", ["num"]);
do_check_eq(r3.length, 1);
do_check_eq(r3[0].num, 42);
_("Get multiple columns");
- let r4 = querySpinningly("SELECT fieldname, value FROM moz_formhistory", ["fieldname", "value"]);
+ let r4 = yield promiseQuery("SELECT fieldname, value FROM moz_formhistory", ["fieldname", "value"]);
do_check_eq(r4.length, 1);
do_check_eq(r4[0].fieldname, "foo");
do_check_eq(r4[0].value, "bar");
_("Get multiple columns with a different order");
- let r5 = querySpinningly("SELECT fieldname, value FROM moz_formhistory", ["value", "fieldname"]);
+ let r5 = yield promiseQuery("SELECT fieldname, value FROM moz_formhistory", ["value", "fieldname"]);
do_check_eq(r5.length, 1);
do_check_eq(r5[0].fieldname, "foo");
do_check_eq(r5[0].value, "bar");
_("Add multiple entries (sqlite doesn't support multiple VALUES)");
- let r6 = querySpinningly("INSERT INTO moz_formhistory (fieldname, value) SELECT 'foo', 'baz' UNION SELECT 'more', 'values'");
+ let r6 = yield promiseQuery("INSERT INTO moz_formhistory (fieldname, value) SELECT 'foo', 'baz' UNION SELECT 'more', 'values'");
do_check_eq(r6, null);
_("Get multiple rows");
- let r7 = querySpinningly("SELECT fieldname, value FROM moz_formhistory WHERE fieldname = 'foo'", ["fieldname", "value"]);
+ let r7 = yield promiseQuery("SELECT fieldname, value FROM moz_formhistory WHERE fieldname = 'foo'", ["fieldname", "value"]);
do_check_eq(r7.length, 2);
do_check_eq(r7[0].fieldname, "foo");
do_check_eq(r7[1].fieldname, "foo");
_("Make sure updates work");
- let r8 = querySpinningly("UPDATE moz_formhistory SET value = 'updated' WHERE fieldname = 'more'");
+ let r8 = yield promiseQuery("UPDATE moz_formhistory SET value = 'updated' WHERE fieldname = 'more'");
do_check_eq(r8, null);
_("Get the updated");
- let r9 = querySpinningly("SELECT value, fieldname FROM moz_formhistory WHERE fieldname = 'more'", ["fieldname", "value"]);
+ let r9 = yield promiseQuery("SELECT value, fieldname FROM moz_formhistory WHERE fieldname = 'more'", ["fieldname", "value"]);
do_check_eq(r9.length, 1);
do_check_eq(r9[0].fieldname, "more");
do_check_eq(r9[0].value, "updated");
_("Grabbing fewer fields than queried is fine");
- let r10 = querySpinningly("SELECT value, fieldname FROM moz_formhistory", ["fieldname"]);
+ let r10 = yield promiseQuery("SELECT value, fieldname FROM moz_formhistory", ["fieldname"]);
do_check_eq(r10.length, 3);
_("Generate an execution error");
let query = "INSERT INTO moz_formhistory (fieldname, value) VALUES ('one', NULL)";
let stmt = Svc.Form.DBConnection.createStatement(query);
let r11, except; ;
try {
- r11 = Async.querySpinningly(stmt);
+ r11 = yield Async.promiseQuery(stmt);
} catch(e) {
except = e;
}
stmt.finalize()
do_check_true(!!except);
do_check_eq(except.result, SQLITE_CONSTRAINT_VIOLATION);
_("Cleaning up");
- querySpinningly("DELETE FROM moz_formhistory");
+ yield promiseQuery("DELETE FROM moz_formhistory");
_("Make sure the timeout got to run before this function ends");
do_check_true(isAsync);
-}
+});
--- a/services/sync/modules-testing/fakeservices.js
+++ b/services/sync/modules-testing/fakeservices.js
@@ -17,29 +17,29 @@ Cu.import("resource://services-sync/reco
Cu.import("resource://services-sync/util.js");
var btoa = Cu.import("resource://gre/modules/Log.jsm").btoa;
this.FakeFilesystemService = function FakeFilesystemService(contents) {
this.fakeContents = contents;
let self = this;
- Utils.jsonSave = function jsonSave(filePath, that, obj, callback) {
+ Utils.jsonSave = function jsonSave(filePath, that, obj) {
let json = typeof obj == "function" ? obj.call(that) : obj;
self.fakeContents["weave/" + filePath + ".json"] = JSON.stringify(json);
- callback.call(that);
+ return Promise.resolve();
};
- Utils.jsonLoad = function jsonLoad(filePath, that, cb) {
+ Utils.jsonLoad = function jsonLoad(filePath, that) {
let obj;
let json = self.fakeContents["weave/" + filePath + ".json"];
if (json) {
obj = JSON.parse(json);
}
- cb.call(that, obj);
+ return Promise.resolve(obj);
};
};
this.fakeSHA256HMAC = function fakeSHA256HMAC(message) {
message = message.substr(0, 64);
while (message.length < 64) {
message += " ";
}
--- a/services/sync/modules-testing/rotaryengine.js
+++ b/services/sync/modules-testing/rotaryengine.js
@@ -34,91 +34,95 @@ Utils.deferGetSet(RotaryRecord, "clearte
this.RotaryStore = function RotaryStore(name, engine) {
Store.call(this, name, engine);
this.items = {};
}
RotaryStore.prototype = {
__proto__: Store.prototype,
- create: function create(record) {
+ create: Task.async(function* create(record) {
this.items[record.id] = record.denomination;
- },
+ }),
- remove: function remove(record) {
+ remove: Task.async(function* remove(record) {
delete this.items[record.id];
- },
+ }),
- update: function update(record) {
+ update: Task.async(function* update(record) {
this.items[record.id] = record.denomination;
- },
+ }),
- itemExists: function itemExists(id) {
+ itemExists: Task.async(function* itemExists(id) {
return (id in this.items);
- },
+ }),
- createRecord: function createRecord(id, collection) {
+ createRecord: Task.async(function* createRecord(id, collection) {
let record = new RotaryRecord(collection, id);
if (!(id in this.items)) {
record.deleted = true;
return record;
}
record.denomination = this.items[id] || "Data for new record: " + id;
return record;
- },
+ }),
- changeItemID: function changeItemID(oldID, newID) {
+ changeItemID: Task.async(function* changeItemID(oldID, newID) {
if (oldID in this.items) {
this.items[newID] = this.items[oldID];
}
delete this.items[oldID];
- },
+ }),
- getAllIDs: function getAllIDs() {
+ getAllIDs: Task.async(function* getAllIDs() {
let ids = {};
for (let id in this.items) {
ids[id] = true;
}
return ids;
- },
+ }),
- wipe: function wipe() {
+ wipe: Task.async(function* wipe() {
this.items = {};
- }
+ }),
};
this.RotaryTracker = function RotaryTracker(name, engine) {
Tracker.call(this, name, engine);
}
RotaryTracker.prototype = {
__proto__: Tracker.prototype
};
this.RotaryEngine = function RotaryEngine(service) {
SyncEngine.call(this, "Rotary", service);
- // Ensure that the engine starts with a clean slate.
- this.toFetch = [];
- this.previousFailed = [];
}
RotaryEngine.prototype = {
__proto__: SyncEngine.prototype,
_storeObj: RotaryStore,
_trackerObj: RotaryTracker,
_recordObj: RotaryRecord,
- _findDupe: function _findDupe(item) {
+ promiseInitialized: Task.async(function* () {
+ yield SyncEngine.prototype.promiseInitialized.call(this)
+ // Ensure that the engine starts with a clean slate.
+ this.toFetch = [];
+ this.previousFailed = [];
+ }),
+
+ _findDupe: Task.async(function* (item) {
// This is a semaphore used for testing proper reconciling on dupe
// detection.
if (item.id == "DUPE_INCOMING") {
return "DUPE_LOCAL";
}
for (let [id, value] in Iterator(this._store.items)) {
if (item.denomination == value) {
return id;
}
}
- }
+ }),
};
--- a/services/sync/modules/addonsreconciler.js
+++ b/services/sync/modules/addonsreconciler.js
@@ -119,21 +119,21 @@ this.AddonsReconciler = function AddonsR
Svc.Obs.add("xpcom-shutdown", this.stopListening, this);
};
AddonsReconciler.prototype = {
/** Flag indicating whether we are listening to AddonManager events. */
_listening: false,
/**
- * Whether state has been loaded from a file.
+ * A promise that is resolved when state has been loaded from a file.
*
* State is loaded on demand if an operation requires it.
*/
- _stateLoaded: false,
+ _stateLoadPromise: null,
/**
* Define this as false if the reconciler should not persist state
* to disk when handling events.
*
* This allows test code to avoid spinning to write during observer
* notifications and xpcom shutdown, which appears to cause hangs on WinXP
* (Bug 873861).
@@ -162,92 +162,78 @@ AddonsReconciler.prototype = {
/**
* Objects subscribed to changes made to this instance.
*/
_listeners: [],
/**
* Accessor for add-ons in this object.
*
- * Returns an object mapping add-on IDs to objects containing metadata.
+ * Returns a promise that resolves with an object mapping add-on IDs to
+ * objects containing metadata.
*/
get addons() {
- this._ensureStateLoaded();
- return this._addons;
+ return this._ensureStateLoaded().then(
+ () => { return this._addons; }
+ );
},
/**
* Load reconciler state from a file.
*
* The path is relative to the weave directory in the profile. If no
* path is given, the default one is used.
*
* If the file does not exist or there was an error parsing the file, the
* state will be transparently defined as empty.
*
* @param path
* Path to load. ".json" is appended automatically. If not defined,
* a default path will be consulted.
- * @param callback
- * Callback to be executed upon file load. The callback receives a
- * truthy error argument signifying whether an error occurred and a
- * boolean indicating whether data was loaded.
+ * @return Promise<bool> Indicates if data was loaded.
*/
- loadState: function loadState(path, callback) {
+ loadState: Task.async(function* (path) {
let file = path || DEFAULT_STATE_FILE;
- Utils.jsonLoad(file, this, function(json) {
- this._addons = {};
- this._changes = [];
+ let json = yield Utils.jsonLoad(file, this);
+ this._addons = {};
+ this._changes = [];
- if (!json) {
- this._log.debug("No data seen in loaded file: " + file);
- if (callback) {
- callback(null, false);
- }
-
- return;
- }
+ if (!json) {
+ this._log.debug("No data seen in loaded file: " + file);
+ return false
+ }
- let version = json.version;
- if (!version || version != 1) {
- this._log.error("Could not load JSON file because version not " +
- "supported: " + version);
- if (callback) {
- callback(null, false);
- }
-
- return;
- }
+ let version = json.version;
+ if (!version || version != 1) {
+ this._log.error("Could not load JSON file because version not " +
+ "supported: " + version);
+ return false;
+ }
- this._addons = json.addons;
- for each (let record in this._addons) {
- record.modified = new Date(record.modified);
- }
+ this._addons = json.addons;
+ for each (let record in this._addons) {
+ record.modified = new Date(record.modified);
+ }
- for each (let [time, change, id] in json.changes) {
- this._changes.push([new Date(time), change, id]);
- }
-
- if (callback) {
- callback(null, true);
- }
- });
- },
+ for each (let [time, change, id] in json.changes) {
+ this._changes.push([new Date(time), change, id]);
+ }
+ return true;
+ }),
/**
* Saves the current state to a file in the local profile.
*
* @param path
* String path in profile to save to. If not defined, the default
* will be used.
- * @param callback
- * Function to be invoked on save completion. No parameters will be
- * passed to callback.
+ *
+ * @return Promise<>
*/
- saveState: function saveState(path, callback) {
+ saveState: function saveState(path) {
let file = path || DEFAULT_STATE_FILE;
let state = {version: 1, addons: {}, changes: []};
for (let [id, record] in Iterator(this._addons)) {
state.addons[id] = {};
for (let [k, v] in Iterator(record)) {
if (k == "modified") {
state.addons[id][k] = v.getTime();
@@ -258,17 +244,17 @@ AddonsReconciler.prototype = {
}
}
for each (let [time, change, id] in this._changes) {
state.changes.push([time.getTime(), change, id]);
}
this._log.info("Saving reconciler state to file: " + file);
- Utils.jsonSave(file, this, state, callback);
+ return Utils.jsonSave(file, this, state);
},
/**
* Registers a change listener with this instance.
*
* Change listeners are called every time a change is recorded. The listener
* is an object with the function "changeListener" that takes 3 arguments,
* the Date at which the change happened, the type of change (a CHANGE_*
@@ -335,94 +321,96 @@ AddonsReconciler.prototype = {
this._log.debug("Stopping listening and removing AddonManager listeners.");
AddonManager.removeInstallListener(this);
AddonManager.removeAddonListener(this);
this._listening = false;
},
/**
* Refreshes the global state of add-ons by querying the AddonManager.
+ * @return Promise<>
*/
- refreshGlobalState: function refreshGlobalState(callback) {
+ refreshGlobalState: Task.async(function* () {
this._log.info("Refreshing global state from AddonManager.");
- this._ensureStateLoaded();
+ yield this._ensureStateLoaded();
let installs;
+ let addons = yield new Promise(resolve => {
+ AddonManager.getAllAddons(function (addons) {
+ resolve(addons);
+ });
+ });
- AddonManager.getAllAddons(function (addons) {
- let ids = {};
+ let ids = {};
- for each (let addon in addons) {
- ids[addon.id] = true;
- this.rectifyStateFromAddon(addon);
+ for each (let addon in addons) {
+ ids[addon.id] = true;
+ yield this.rectifyStateFromAddon(addon);
+ }
+
+ // Look for locally-defined add-ons that no longer exist and update their
+ // record.
+ for (let [id, addon] in Iterator(this._addons)) {
+ if (id in ids) {
+ continue;
}
- // Look for locally-defined add-ons that no longer exist and update their
- // record.
- for (let [id, addon] in Iterator(this._addons)) {
- if (id in ids) {
- continue;
- }
-
- // If the id isn't in ids, it means that the add-on has been deleted or
- // the add-on is in the process of being installed. We detect the
- // latter by seeing if an AddonInstall is found for this add-on.
-
- if (!installs) {
- let cb = Async.makeSyncCallback();
- AddonManager.getAllInstalls(cb);
- installs = Async.waitForSyncCallback(cb);
- }
+ // If the id isn't in ids, it means that the add-on has been deleted or
+ // the add-on is in the process of being installed. We detect the
+ // latter by seeing if an AddonInstall is found for this add-on.
+ if (!installs) {
+ installs = yield new Promise(resolve => {
+ AddonManager.getAllInstalls(result => resolve(result));
+ });
+ }
- let installFound = false;
- for each (let install in installs) {
- if (install.addon && install.addon.id == id &&
- install.state == AddonManager.STATE_INSTALLED) {
-
- installFound = true;
- break;
- }
- }
+ let installFound = false;
+ for each (let install in installs) {
+ if (install.addon && install.addon.id == id &&
+ install.state == AddonManager.STATE_INSTALLED) {
- if (installFound) {
- continue;
- }
-
- if (addon.installed) {
- addon.installed = false;
- this._log.debug("Adding change because add-on not present in " +
- "Add-on Manager: " + id);
- this._addChange(new Date(), CHANGE_UNINSTALLED, addon);
+ installFound = true;
+ break;
}
}
- // See note for _shouldPersist.
- if (this._shouldPersist) {
- this.saveState(null, callback);
- } else {
- callback();
+ if (installFound) {
+ continue;
}
- }.bind(this));
- },
+
+ if (addon.installed) {
+ addon.installed = false;
+ this._log.debug("Adding change because add-on not present in " +
+ "Add-on Manager: " + id);
+ this._addChange(new Date(), CHANGE_UNINSTALLED, addon);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ yield this.saveState(null);
+ }
+ }),
/**
* Rectifies the state of an add-on from an Addon instance.
*
* This basically says "given an Addon instance, assume it is truth and
* apply changes to the local state to reflect it."
*
* This function could result in change listeners being called if the local
* state differs from the passed add-on's state.
*
* @param addon
* Addon instance being updated.
+ * @return Promise<>
*/
- rectifyStateFromAddon: function rectifyStateFromAddon(addon) {
+ rectifyStateFromAddon: Task.async(function* (addon) {
this._log.debug("Rectifying state for addon: " + addon.id);
- this._ensureStateLoaded();
+ yield this._ensureStateLoaded();
let id = addon.id;
let enabled = !addon.userDisabled;
let guid = addon.syncGUID;
let now = new Date();
if (!(id in this._addons)) {
let record = {
@@ -462,17 +450,17 @@ AddonsReconciler.prototype = {
}
if (record.guid != guid) {
record.guid = guid;
// We don't record a change because the Sync engine rectifies this on its
// own. This is tightly coupled with Sync. If this code is ever lifted
// outside of Sync, this exception should likely be removed.
}
- },
+ }),
/**
* Record a change in add-on state.
*
* @param date
* Date at which the change occurred.
* @param change
* The type of the change. A CHANGE_* constant.
@@ -491,106 +479,92 @@ AddonsReconciler.prototype = {
Utils.exceptionStr(ex));
}
}
},
/**
* Obtain the set of changes to add-ons since the date passed.
*
- * This will return an array of arrays. Each entry in the array has the
- * elements [date, change_type, id], where
+ * This returns a promise that resolves with an array of arrays. Each entry
+ * in the array has the elements [date, change_type, id], where
*
* date - Date instance representing when the change occurred.
* change_type - One of CHANGE_* constants.
* id - ID of add-on that changed.
*/
- getChangesSinceDate: function getChangesSinceDate(date) {
- this._ensureStateLoaded();
+ getChangesSinceDate: Task.async(function* (date) {
+ yield this._ensureStateLoaded();
let length = this._changes.length;
for (let i = 0; i < length; i++) {
if (this._changes[i][0] >= date) {
return this._changes.slice(i);
}
}
return [];
- },
+ }),
/**
* Prunes all recorded changes from before the specified Date.
*
* @param date
* Entries older than this Date will be removed.
*/
- pruneChangesBeforeDate: function pruneChangesBeforeDate(date) {
- this._ensureStateLoaded();
+ pruneChangesBeforeDate: Task.async(function* (date) {
+ yield this._ensureStateLoaded();
this._changes = this._changes.filter(function test_age(change) {
return change[0] >= date;
});
- },
-
- /**
- * Obtains the set of all known Sync GUIDs for add-ons.
- *
- * @return Object with guids as keys and values of true.
- */
- getAllSyncGUIDs: function getAllSyncGUIDs() {
- let result = {};
- for (let id in this.addons) {
- result[id] = true;
- }
-
- return result;
- },
+ }),
/**
* Obtain the add-on state record for an add-on by Sync GUID.
*
* If the add-on could not be found, returns null.
*
* @param guid
* Sync GUID of add-on to retrieve.
- * @return Object on success on null on failure.
+ * @return Promise<Object> on success, Promise<null> on failure.
*/
- getAddonStateFromSyncGUID: function getAddonStateFromSyncGUID(guid) {
- for each (let addon in this.addons) {
+ getAddonStateFromSyncGUID: Task.async(function* (guid) {
+ let addons = yield this.addons;
+ for each (let addon in addons) {
if (addon.guid == guid) {
return addon;
}
}
return null;
- },
+ }),
/**
* Ensures that state is loaded before continuing.
*
* This is called internally by anything that accesses the internal data
* structures. It effectively just-in-time loads serialized state.
*/
_ensureStateLoaded: function _ensureStateLoaded() {
- if (this._stateLoaded) {
- return;
+ if (!this._stateLoadPromise) {
+ this._stateLoadPromise = this.loadState();
}
-
- let cb = Async.makeSpinningCallback();
- this.loadState(null, cb);
- cb.wait();
- this._stateLoaded = true;
+ return this._stateLoadPromise;
},
/**
* Handler that is invoked as part of the AddonManager listeners.
*/
_handleListener: function _handlerListener(action, addon, requiresRestart) {
// Since this is called as an observer, we explicitly trap errors and
// log them to ourselves so we don't see errors reported elsewhere.
+ // We also "block" on promises to ensure we are completely done before
+ // the listener returns - this will be turned into a fully async operation
+ // later.
try {
let id = addon.id;
this._log.debug("Add-on change: " + action + " to " + id);
// We assume that every event for non-restartless add-ons is
// followed by another event and that this follow-up event is the most
// appropriate to react to. Currently we ignore onEnabling, onDisabling,
// and onUninstalling for non-restartless add-ons.
@@ -602,39 +576,37 @@ AddonsReconciler.prototype = {
switch (action) {
case "onEnabling":
case "onEnabled":
case "onDisabling":
case "onDisabled":
case "onInstalled":
case "onInstallEnded":
case "onOperationCancelled":
- this.rectifyStateFromAddon(addon);
+ Async.promiseSpinningly(this.rectifyStateFromAddon(addon));
break;
case "onUninstalling":
case "onUninstalled":
let id = addon.id;
- let addons = this.addons;
+ let addons = Async.promiseSpinningly(this.addons);
if (id in addons) {
let now = new Date();
let record = addons[id];
record.installed = false;
record.modified = now;
this._log.debug("Adding change because of uninstall listener: " +
id);
this._addChange(now, CHANGE_UNINSTALLED, record);
}
}
// See note for _shouldPersist.
if (this._shouldPersist) {
- let cb = Async.makeSpinningCallback();
- this.saveState(null, cb);
- cb.wait();
+ Async.promiseSpinningly(this.saveState(null));
}
}
catch (ex) {
this._log.warn("Exception: " + Utils.exceptionStr(ex));
}
},
// AddonListeners
--- a/services/sync/modules/browserid_identity.js
+++ b/services/sync/modules/browserid_identity.js
@@ -248,16 +248,17 @@ this.BrowserIDManager.prototype = {
}).catch(err => {
let authErr = err; // note that we must reject with this error and not a
// subsequent one
// report what failed...
this._log.error("Background fetch for key bundle failed", authErr);
// check if the account still exists
this._fxaService.accountStatus().then(exists => {
if (!exists) {
+ this.account = null;
return fxAccounts.signOut(true);
}
}).catch(err => {
this._log.error("Error while trying to determine FXA existence", err);
}).then(() => {
this._shouldHaveSyncKeyBundle = true; // but we probably don't have one...
this.whenReadyToAuthenticate.reject(authErr)
});
@@ -419,16 +420,17 @@ this.BrowserIDManager.prototype = {
/**
* Pre-fetches any information that might help with migration away from this
* identity. Called after every sync and is really just an optimization that
* allows us to avoid a network request for when we actually need the
* migration info.
*/
prefetchMigrationSentinel: function(service) {
// nothing to do here until we decide to migrate away from FxA.
+ return Promise.resolve();
},
/**
* Return credentials hosts for this identity only.
*/
_getSyncCredentialsHosts: function() {
return Utils.getSyncCredentialsHostsFxA();
},
@@ -752,18 +754,28 @@ this.BrowserIDManager.prototype = {
function BrowserIDClusterManager(service) {
ClusterManager.call(this, service);
}
BrowserIDClusterManager.prototype = {
__proto__: ClusterManager.prototype,
- _findCluster: function() {
- let endPointFromIdentityToken = function() {
+ _findCluster: Task.async(function* () {
+ try {
+ yield this.identity.whenReadyToAuthenticate.promise;
+ // We need to handle node reassignment here. If we are being asked
+ // for a clusterURL while the service already has a clusterURL, then
+ // it's likely a 401 was received using the existing token - in which
+ // case we just discard the existing token and fetch a new one.
+ if (this.service.clusterURL) {
+ log.debug("_findCluster has a pre-existing clusterURL, so discarding the current token");
+ this.identity._token = null;
+ }
+ yield this.identity._ensureValidToken();
// The only reason (in theory ;) that we can end up with a null token
// is when this.identity._canFetchKeys() returned false. In turn, this
// should only happen if the master-password is locked or the credentials
// storage is screwed, and in those cases we shouldn't have started
// syncing so shouldn't get here anyway.
// But better safe than sorry! To keep things clearer, throw an explicit
// exception - the message will appear in the logs and the error will be
// treated as transient.
@@ -774,64 +786,36 @@ BrowserIDClusterManager.prototype = {
// For Sync 1.5 storage endpoints, we use the base endpoint verbatim.
// However, it should end in "/" because we will extend it with
// well known path components. So we add a "/" if it's missing.
if (!endpoint.endsWith("/")) {
endpoint += "/";
}
log.debug("_findCluster returning " + endpoint);
return endpoint;
- }.bind(this);
-
- // Spinningly ensure we are ready to authenticate and have a valid token.
- let promiseClusterURL = function() {
- return this.identity.whenReadyToAuthenticate.promise.then(
- () => {
- // We need to handle node reassignment here. If we are being asked
- // for a clusterURL while the service already has a clusterURL, then
- // it's likely a 401 was received using the existing token - in which
- // case we just discard the existing token and fetch a new one.
- if (this.service.clusterURL) {
- log.debug("_findCluster has a pre-existing clusterURL, so discarding the current token");
- this.identity._token = null;
- }
- return this.identity._ensureValidToken();
- }
- ).then(endPointFromIdentityToken
- );
- }.bind(this);
-
- let cb = Async.makeSpinningCallback();
- promiseClusterURL().then(function (clusterURL) {
- cb(null, clusterURL);
- }).then(
- null, err => {
- log.info("Failed to fetch the cluster URL", err);
+ } catch (err) {
// service.js's verifyLogin() method will attempt to fetch a cluster
// URL when it sees a 401. If it gets null, it treats it as a "real"
// auth error and sets Status.login to LOGIN_FAILED_LOGIN_REJECTED, which
// in turn causes a notification bar to appear informing the user they
// need to re-authenticate.
// On the other hand, if fetching the cluster URL fails with an exception,
// verifyLogin() assumes it is a transient error, and thus doesn't show
// the notification bar under the assumption the issue will resolve
// itself.
// Thus:
// * On a real 401, we must return null.
// * On any other problem we must let an exception bubble up.
if (err instanceof AuthenticationError) {
- // callback with no error and a null result - cb.wait() returns null.
- cb(null, null);
+ return null;
} else {
- // callback with an error - cb.wait() completes by raising an exception.
- cb(err);
+ throw err;
}
- });
- return cb.wait();
- },
+ }
+ }),
getUserBaseURL: function() {
// Legacy Sync and FxA Sync construct the userBaseURL differently. Legacy
// Sync appends path components onto an empty path, and in FxA Sync the
// token server constructs this for us in an opaque manner. Since the
// cluster manager already sets the clusterURL on Service and also has
// access to the current identity, we added this functionality here.
return this.service.clusterURL;
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -45,25 +45,33 @@ this.Tracker = function Tracker(name, en
this._log = Log.repository.getLogger("Sync.Tracker." + name);
let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
this._log.level = Log.Level[level];
this._score = 0;
this._ignored = [];
this.ignoreAll = false;
this.changedIDs = {};
- this.loadChangedIDs();
Svc.Obs.add("weave:engine:start-tracking", this);
Svc.Obs.add("weave:engine:stop-tracking", this);
Svc.Prefs.observe("engine." + this.engine.prefName, this);
+ this.whenInitialized = this.promiseInitialized();
};
Tracker.prototype = {
+ /* Returns a promise that initializes the tracker and is resolved once the
+ tracker is ready to roll. Note that typically you just want
+ this.whenInitialized - calling this will re-initialize.
+ */
+ promiseInitialized() {
+ return this.loadChangedIDs();
+ },
+
/*
* Score can be called as often as desired to decide which engines to sync
*
* Valid values for score:
* -1: Do not sync unless the user specifically requests it (almost disabled)
* 0: Nothing has changed
* 100: Please sync me ASAP!
*
@@ -82,59 +90,61 @@ Tracker.prototype = {
resetScore: function () {
this._score = 0;
},
persistChangedIDs: true,
/**
* Persist changedIDs to disk at a later date.
- * Optionally pass a callback to be invoked when the write has occurred.
+ * Optionally pass a callback to be invoked when the write has occurred, which
+ * is used for tests.
+ * Note that for consistency and future-proofing this returns a promise, but
+ * that promise is resolved immediately rather than after the save - the
+ * actual save happens some time in the future and callers don't care when
+ * it is actually done (other than tests, that use the callback)
*/
- saveChangedIDs: function (cb) {
+ saveChangedIDs: Task.async(function* (cb) {
if (!this.persistChangedIDs) {
this._log.debug("Not saving changedIDs.");
return;
}
Utils.namedTimer(function () {
this._log.debug("Saving changed IDs to " + this.file);
- Utils.jsonSave("changes/" + this.file, this, this.changedIDs, cb);
+ Utils.jsonSave("changes/" + this.file, this, this.changedIDs).then(cb);
}, 1000, this, "_lazySave");
- },
+ }),
- loadChangedIDs: function (cb) {
- Utils.jsonLoad("changes/" + this.file, this, function(json) {
- if (json && (typeof(json) == "object")) {
- this.changedIDs = json;
- } else {
- this._log.warn("Changed IDs file " + this.file + " contains non-object value.");
- json = null;
- }
- if (cb) {
- cb.call(this, json);
- }
- });
- },
+ loadChangedIDs: Task.async(function* () {
+ let json = yield Utils.jsonLoad("changes/" + this.file, this);
+ if (json && (typeof(json) == "object")) {
+ this.changedIDs = json;
+ } else {
+ this._log.warn("Changed IDs file " + this.file + " contains non-object value.");
+ json = null;
+ }
+ return json;
+ }),
// ignore/unignore specific IDs. Useful for ignoring items that are
// being processed, or that shouldn't be synced.
// But note: not persisted to disk
ignoreID: function (id) {
this.unignoreID(id);
this._ignored.push(id);
},
unignoreID: function (id) {
let index = this._ignored.indexOf(id);
if (index != -1)
this._ignored.splice(index, 1);
},
- addChangedID: function (id, when) {
+ addChangedID: Task.async(function* (id, when) {
if (!id) {
this._log.warn("Attempted to add undefined ID to tracker");
return false;
}
if (this.ignoreAll || (id in this._ignored)) {
return false;
}
@@ -143,42 +153,42 @@ Tracker.prototype = {
if (when == null) {
when = Math.floor(Date.now() / 1000);
}
// Add/update the entry if we have a newer time.
if ((this.changedIDs[id] || -Infinity) < when) {
this._log.trace("Adding changed ID: " + id + ", " + when);
this.changedIDs[id] = when;
- this.saveChangedIDs(this.onSavedChangedIDs);
+ yield this.saveChangedIDs(this.onSavedChangedIDs);
}
return true;
- },
+ }),
- removeChangedID: function (id) {
+ removeChangedID: Task.async(function* (id) {
if (!id) {
this._log.warn("Attempted to remove undefined ID to tracker");
return false;
}
if (this.ignoreAll || (id in this._ignored))
return false;
if (this.changedIDs[id] != null) {
this._log.trace("Removing changed ID " + id);
delete this.changedIDs[id];
- this.saveChangedIDs();
+ yield this.saveChangedIDs();
}
return true;
- },
+ }),
- clearChangedIDs: function () {
+ clearChangedIDs: Task.async(function* () {
this._log.trace("Clearing changed ID list");
this.changedIDs = {};
- this.saveChangedIDs();
- },
+ yield this.saveChangedIDs();
+ }),
_isTracking: false,
// Override these in your subclasses.
startTracking: function () {
},
stopTracking: function () {
@@ -187,30 +197,30 @@ Tracker.prototype = {
engineIsEnabled: function () {
if (!this.engine) {
// Can't tell -- we must be running in a test!
return true;
}
return this.engine.enabled;
},
- onEngineEnabledChanged: function (engineEnabled) {
+ onEngineEnabledChanged: Task.async(function* (engineEnabled) {
if (engineEnabled == this._isTracking) {
return;
}
if (engineEnabled) {
this.startTracking();
this._isTracking = true;
} else {
this.stopTracking();
this._isTracking = false;
- this.clearChangedIDs();
+ yield this.clearChangedIDs();
}
- },
+ }),
observe: function (subject, topic, data) {
switch (topic) {
case "weave:engine:start-tracking":
if (!this.engineIsEnabled()) {
return;
}
this._log.trace("Got start-tracking.");
@@ -223,17 +233,18 @@ Tracker.prototype = {
this._log.trace("Got stop-tracking.");
if (this._isTracking) {
this.stopTracking();
this._isTracking = false;
}
return;
case "nsPref:changed":
if (data == PREFS_BRANCH + "engine." + this.engine.prefName) {
- this.onEngineEnabledChanged(this.engine.enabled);
+ // We currently spin waiting for promises in observer notifications.
+ Async.promiseSpinningly(this.onEngineEnabledChanged(this.engine.enabled));
}
return;
}
}
};
@@ -264,27 +275,24 @@ this.Store = function Store(name, engine
name = name || "Unnamed";
this.name = name.toLowerCase();
this.engine = engine;
this._log = Log.repository.getLogger("Sync.Store." + name);
let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
this._log.level = Log.Level[level];
-
- XPCOMUtils.defineLazyGetter(this, "_timer", function() {
- return Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
- });
}
Store.prototype = {
_sleep: function _sleep(delay) {
- let cb = Async.makeSyncCallback();
- this._timer.initWithCallback(cb, delay, Ci.nsITimer.TYPE_ONE_SHOT);
- Async.waitForSyncCallback(cb);
+ return new Promise(resolve => {
+ let timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ timer.initWithCallback(resolve, delay, Ci.nsITimer.TYPE_ONE_SHOT);
+ });
},
/**
* Apply multiple incoming records against the store.
*
* This is called with a set of incoming records to process. The function
* should look at each record, reconcile with the current local state, and
* make the local changes required to bring its state in alignment with the
@@ -292,57 +300,60 @@ Store.prototype = {
*
* The default implementation simply iterates over all records and calls
* applyIncoming(). Store implementations may overwrite this function
* if desired.
*
* @param records Array of records to apply
* @return Array of record IDs which did not apply cleanly
*/
- applyIncomingBatch: function (records) {
+ applyIncomingBatch: Task.async(function* (records) {
let failed = [];
for each (let record in records) {
try {
- this.applyIncoming(record);
+ yield this.applyIncoming(record);
} catch (ex if (ex.code == Engine.prototype.eEngineAbortApplyIncoming)) {
// This kind of exception should have a 'cause' attribute, which is an
// originating exception.
// ex.cause will carry its stack with it when rethrown.
throw ex.cause;
} catch (ex if !Async.isShutdownException(ex)) {
this._log.warn("Failed to apply incoming record " + record.id);
this._log.warn("Encountered exception: " + Utils.exceptionStr(ex));
this.engine._noteApplyFailure();
failed.push(record.id);
}
};
return failed;
- },
+ }),
/**
* Apply a single record against the store.
*
* This takes a single record and makes the local changes required so the
* local state matches what's in the record.
*
* The default implementation calls one of remove(), create(), or update()
* depending on the state obtained from the store itself. Store
* implementations may overwrite this function if desired.
*
* @param record
* Record to apply
*/
- applyIncoming: function (record) {
+ applyIncoming: Task.async(function* (record) {
if (record.deleted)
- this.remove(record);
- else if (!this.itemExists(record.id))
- this.create(record);
- else
- this.update(record);
- },
+ yield this.remove(record);
+ else {
+ let exists = yield this.itemExists(record.id);
+ if (!exists)
+ yield this.create(record);
+ else
+ yield this.update(record);
+ }
+ }),
// override these in derived objects
/**
* Create an item in the store from a record.
*
* This is called by the default implementation of applyIncoming(). If using
* applyIncomingBatch(), this won't be called unless your store calls it.
@@ -607,33 +618,43 @@ this.Engine = function Engine(name, serv
if (!service) {
throw new Error("Engine must be associated with a Service instance.");
}
this.Name = name || "Unnamed";
this.name = name.toLowerCase();
this.service = service;
- this._notify = Utils.notify("weave:engine:");
+ this._promiseNotify = Utils.promiseNotify("weave:engine:");
this._log = Log.repository.getLogger("Sync.Engine." + this.Name);
let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
this._log.level = Log.Level[level];
- this._tracker; // initialize tracker to load previously changed IDs
+ // A promise that is resolved once the engine finishes async initialization.
+ this.whenInitialized = this.promiseInitialized();
+
this._log.debug("Engine initialized");
}
Engine.prototype = {
// _storeObj, and _trackerObj should to be overridden in subclasses
_storeObj: Store,
_trackerObj: Tracker,
// Local 'constant'.
// Signal to the engine that processing further records is pointless.
eEngineAbortApplyIncoming: "error.engine.abort.applyincoming",
+ // A function to perform deferred initialization of the engine - useful when
+ // initialization functions are async/promise based. Note that typically you
+ // want this.whenInitialized - calling this funciton will re-initialized.
+ promiseInitialized() {
+ // We have nothing to do, but our tracker might.
+ return this._tracker.whenInitialized;
+ },
+
get prefName() {
return this.name;
},
get enabled() {
return Svc.Prefs.get("engine." + this.prefName, false);
},
@@ -661,49 +682,46 @@ Engine.prototype = {
if (!this.enabled) {
return;
}
if (!this._sync) {
throw "engine does not implement _sync method";
}
- this._notify("sync", this.name, this._sync)();
+ return this._promiseNotify("sync", this.name, this._sync)();
},
/**
* Get rid of any local meta-data.
*/
resetClient: function () {
if (!this._resetClient) {
throw "engine does not implement _resetClient method";
}
- this._notify("reset-client", this.name, this._resetClient)();
+ return this._promiseNotify("reset-client", this.name, this._resetClient)();
},
- _wipeClient: function () {
- this.resetClient();
+ _wipeClient: Task.async(function* () {
+ yield this.resetClient();
this._log.debug("Deleting all local data");
this._tracker.ignoreAll = true;
- this._store.wipe();
+ yield this._store.wipe();
this._tracker.ignoreAll = false;
- this._tracker.clearChangedIDs();
- },
+ yield this._tracker.clearChangedIDs();
+ }),
wipeClient: function () {
- this._notify("wipe-client", this.name, this._wipeClient)();
+ return this._promiseNotify("wipe-client", this.name, this._wipeClient)();
}
};
this.SyncEngine = function SyncEngine(name, service) {
Engine.call(this, name || "SyncEngine", service);
-
- this.loadToFetch();
- this.loadPreviousFailed();
}
// Enumeration to define approaches to handling bad records.
// Attached to the constructor to allow use as a kind of static enumeration.
SyncEngine.kRecoveryStrategy = {
ignore: "ignore",
retry: "retry",
error: "error"
@@ -731,16 +749,24 @@ SyncEngine.prototype = {
// How many records to pull at one time when specifying IDs. This is to avoid
// URI length limitations.
guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
mobileGUIDFetchBatchSize: DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE,
// How many records to process in a single batch.
applyIncomingBatchSize: DEFAULT_STORE_BATCH_SIZE,
+ // Start async initialization.
+ promiseInitialized: Task.async(function* () {
+ yield Engine.prototype.promiseInitialized.call(this);
+ yield this.loadToFetch();
+ yield this.loadPreviousFailed();
+ }),
+
+
get storageURL() {
return this.service.storageURL;
},
get engineURL() {
return this.storageURL + this.name;
},
@@ -794,50 +820,49 @@ SyncEngine.prototype = {
return;
}
this._toFetch = val;
Utils.namedTimer(function () {
Utils.jsonSave("toFetch/" + this.name, this, val, cb);
}, 0, this, "_toFetchDelay");
},
- loadToFetch: function () {
+ loadToFetch: Task.async(function* () {
// Initialize to empty if there's no file.
this._toFetch = [];
- Utils.jsonLoad("toFetch/" + this.name, this, function(toFetch) {
- if (toFetch) {
- this._toFetch = toFetch;
- }
- });
- },
+ let toFetch = yield Utils.jsonLoad("toFetch/" + this.name, this);
+ if (toFetch) {
+ this._toFetch = toFetch;
+ }
+ }),
get previousFailed() {
return this._previousFailed;
},
set previousFailed(val) {
let cb = (error) => this._log.error(Utils.exceptionStr(error));
// Coerce the array to a string for more efficient comparison.
if (val + "" == this._previousFailed) {
return;
}
this._previousFailed = val;
+ // Note we aren't returning a promise here, so the save happens in the b/g
Utils.namedTimer(function () {
Utils.jsonSave("failed/" + this.name, this, val, cb);
}, 0, this, "_previousFailedDelay");
},
- loadPreviousFailed: function () {
+ loadPreviousFailed: Task.async(function* () {
// Initialize to empty if there's no file
this._previousFailed = [];
- Utils.jsonLoad("failed/" + this.name, this, function(previousFailed) {
- if (previousFailed) {
- this._previousFailed = previousFailed;
- }
- });
- },
+ let previousFailed = yield Utils.jsonLoad("failed/" + this.name, this);
+ if (previousFailed) {
+ this._previousFailed = previousFailed;
+ }
+ }),
/*
* lastSyncLocal is a timestamp in local time.
*/
get lastSyncLocal() {
return parseInt(Svc.Prefs.get(this.name + ".lastSyncLocal", "0"), 10);
},
set lastSyncLocal(value) {
@@ -846,32 +871,34 @@ SyncEngine.prototype = {
},
/*
* Returns a mapping of IDs -> changed timestamp. Engine implementations
* can override this method to bypass the tracker for certain or all
* changed items.
*/
getChangedIDs: function () {
- return this._tracker.changedIDs;
+ return Promise.resolve(this._tracker.changedIDs);
},
// Create a new record using the store and add in crypto fields.
- _createRecord: function (id) {
- let record = this._store.createRecord(id, this.name);
+ _createRecord: Task.async(function* (id) {
+ let record = yield this._store.createRecord(id, this.name);
record.id = id;
record.collection = this.name;
return record;
- },
+ }),
// Any setup that needs to happen at the beginning of each sync.
- _syncStartup: function () {
+ _syncStartup: Task.async(function* () {
+ // First wait for async initialization.
+ yield this.whenInitialized;
// Determine if we need to wipe on outdated versions
- let metaGlobal = this.service.recordManager.get(this.metaURL);
+ let metaGlobal = yield this.service.recordManager.get(this.metaURL);
let engines = metaGlobal.payload.engines || {};
let engineData = engines[this.name] || {};
let needsWipe = false;
// Assume missing versions are 0 and wipe the server
if ((engineData.version || 0) < this.version) {
this._log.debug("Old engine data: " + [engineData.version, this.version]);
@@ -894,67 +921,67 @@ SyncEngine.prototype = {
let error = new String("New data: " + [engineData.version, this.version]);
error.failureCode = VERSION_OUT_OF_DATE;
throw error;
}
// Changes to syncID mean we'll need to upload everything
else if (engineData.syncID != this.syncID) {
this._log.debug("Engine syncIDs: " + [engineData.syncID, this.syncID]);
this.syncID = engineData.syncID;
- this._resetClient();
+ yield this._resetClient();
};
// Delete any existing data and reupload on bad version or missing meta.
// No crypto component here...? We could regenerate per-collection keys...
if (needsWipe) {
- this.wipeServer();
+ yield this.wipeServer();
}
// Save objects that need to be uploaded in this._modified. We also save
// the timestamp of this fetch in this.lastSyncLocal. As we successfully
// upload objects we remove them from this._modified. If an error occurs
// or any objects fail to upload, they will remain in this._modified. At
// the end of a sync, or after an error, we add all objects remaining in
// this._modified to the tracker.
this.lastSyncLocal = Date.now();
if (this.lastSync) {
- this._modified = this.getChangedIDs();
+ this._modified = yield this.getChangedIDs();
} else {
// Mark all items to be uploaded, but treat them as changed from long ago
this._log.debug("First sync, uploading all items");
this._modified = {};
- for (let id in this._store.getAllIDs()) {
+ for (let id in yield this._store.getAllIDs()) {
this._modified[id] = 0;
}
}
// Clear the tracker now. If the sync fails we'll add the ones we failed
// to upload back.
- this._tracker.clearChangedIDs();
+ yield this._tracker.clearChangedIDs();
this._log.info(Object.keys(this._modified).length +
" outgoing items pre-reconciliation");
// Keep track of what to delete at the end of sync
this._delete = {};
- },
+ }),
/**
* A tiny abstraction to make it easier to test incoming record
* application.
*/
_itemSource: function () {
return new Collection(this.engineURL, this._recordObj, this.service);
},
/**
* Process incoming records.
* In the most awful and untestable way possible.
* This now accepts something that makes testing vaguely less impossible.
*/
- _processIncoming: function (newitems) {
+ _processIncoming: Task.async(function* (newitems) {
this._log.trace("Downloading & applying server changes");
// Figure out how many total items to fetch this sync; do less on mobile.
let batchSize = this.downloadLimit || Infinity;
let isMobile = (Svc.Prefs.get("client.type") == "mobile");
if (!newitems) {
newitems = this._itemSource();
@@ -984,52 +1011,56 @@ SyncEngine.prototype = {
// Reset previousFailed for each sync since previously failed items may not fail again.
this.previousFailed = [];
// Used (via exceptions) to allow the record handler/reconciliation/etc.
// methods to signal that they would like processing of incoming records to
// cease.
let aborting = undefined;
- function doApplyBatch() {
+ function* doApplyBatch() {
+ // using promises it is critical we nuke the batch early.
+ let toApply = applyBatch;
+ applyBatch = [];
this._tracker.ignoreAll = true;
try {
- failed = failed.concat(this._store.applyIncomingBatch(applyBatch));
+ let thisFailed = yield this._store.applyIncomingBatch(toApply);
+ failed = failed.concat(thisFailed);
} catch (ex if !Async.isShutdownException(ex)) {
// Catch any error that escapes from applyIncomingBatch. At present
// those will all be abort events.
this._log.warn("Got exception " + Utils.exceptionStr(ex) +
", aborting processIncoming.");
aborting = ex;
}
this._tracker.ignoreAll = false;
- applyBatch = [];
}
- function doApplyBatchAndPersistFailed() {
+ function* doApplyBatchAndPersistFailed() {
// Apply remaining batch.
if (applyBatch.length) {
- doApplyBatch.call(this);
+ yield doApplyBatch.call(this);
}
// Persist failed items so we refetch them.
if (failed.length) {
this.previousFailed = Utils.arrayUnion(failed, this.previousFailed);
count.failed += failed.length;
this._log.debug("Records that failed to apply: " + failed);
failed = [];
}
}
let key = this.service.collectionKeys.keyForCollection(this.name);
// Not binding this method to 'this' for performance reasons. It gets
// called for every incoming record.
+ // XXX - Task.async/Promises is presumably *much* worse than .bind() ;)
let self = this;
- newitems.recordHandler = function(item) {
+ newitems.recordHandler = Task.async(function* (item) {
if (aborting) {
return;
}
// Grab a later last modified if possible
if (self.lastModified == null || item.modified > self.lastModified)
self.lastModified = item.modified;
@@ -1038,27 +1069,28 @@ SyncEngine.prototype = {
// Remember which records were processed
handled.push(item.id);
try {
try {
item.decrypt(key);
} catch (ex if Utils.isHMACMismatch(ex)) {
- let strategy = self.handleHMACMismatch(item, true);
+ let strategy = yield self.handleHMACMismatch(item, true);
+
if (strategy == SyncEngine.kRecoveryStrategy.retry) {
// You only get one retry.
try {
// Try decrypting again, typically because we've got new keys.
self._log.info("Trying decrypt again...");
key = self.service.collectionKeys.keyForCollection(self.name);
item.decrypt(key);
strategy = null;
} catch (ex if Utils.isHMACMismatch(ex)) {
- strategy = self.handleHMACMismatch(item, false);
+ strategy = yield self.handleHMACMismatch(item, false);
}
}
switch (strategy) {
case null:
// Retry succeeded! No further handling.
break;
case SyncEngine.kRecoveryStrategy.retry:
@@ -1079,17 +1111,17 @@ SyncEngine.prototype = {
self._log.warn("Error decrypting record: " + Utils.exceptionStr(ex));
self._noteApplyFailure();
failed.push(item.id);
return;
}
let shouldApply;
try {
- shouldApply = self._reconcile(item);
+ shouldApply = yield self._reconcile(item);
} catch (ex if (ex.code == Engine.prototype.eEngineAbortApplyIncoming)) {
self._log.warn("Reconciliation failed: aborting incoming processing.");
self._noteApplyFailure();
failed.push(item.id);
aborting = ex.cause;
} catch (ex if !Async.isShutdownException(ex)) {
self._log.warn("Failed to reconcile incoming record " + item.id);
self._log.warn("Encountered exception: " + Utils.exceptionStr(ex));
@@ -1102,25 +1134,26 @@ SyncEngine.prototype = {
count.applied++;
applyBatch.push(item);
} else {
count.reconciled++;
self._log.trace("Skipping reconciled incoming item " + item.id);
}
if (applyBatch.length == self.applyIncomingBatchSize) {
- doApplyBatch.call(self);
+ yield doApplyBatch.call(self);
}
- self._store._sleep(0);
- };
+ yield self._store._sleep(0);
+
+ }); // end of .recordHandler()
// Only bother getting data from the server if there's new things
if (this.lastModified == null || this.lastModified > this.lastSync) {
- let resp = newitems.get();
- doApplyBatchAndPersistFailed.call(this);
+ let resp = yield newitems.get();
+ yield doApplyBatchAndPersistFailed.call(this);
if (!resp.success) {
resp.failureCode = ENGINE_DOWNLOAD_FAIL;
throw resp;
}
if (aborting) {
throw aborting;
}
@@ -1132,17 +1165,17 @@ SyncEngine.prototype = {
// Sort and limit so that on mobile we only get the last X records.
guidColl.limit = this.downloadLimit;
guidColl.newer = this.lastSync;
// index: Orders by the sortindex descending (highest weight first).
guidColl.sort = "index";
- let guids = guidColl.get();
+ let guids = yield guidColl.get();
if (!guids.success)
throw guids;
// Figure out which guids weren't just fetched then remove any guids that
// were already waiting and prepend the new ones
let extra = Utils.arraySub(guids.obj, handled);
if (extra.length > 0) {
fetchBatch = Utils.arrayUnion(extra, fetchBatch);
@@ -1165,17 +1198,17 @@ SyncEngine.prototype = {
while (fetchBatch.length && !aborting) {
// Reuse the original query, but get rid of the restricting params
// and batch remaining records.
newitems.limit = 0;
newitems.newer = 0;
newitems.ids = fetchBatch.slice(0, batchSize);
// Reuse the existing record handler set earlier
- let resp = newitems.get();
+ let resp = yield newitems.get();
if (!resp.success) {
resp.failureCode = ENGINE_DOWNLOAD_FAIL;
throw resp;
}
// This batch was successfully applied. Not using
// doApplyBatchAndPersistFailed() here to avoid writing toFetch twice.
fetchBatch = fetchBatch.slice(batchSize);
@@ -1192,84 +1225,84 @@ SyncEngine.prototype = {
}
if (this.lastSync < this.lastModified) {
this.lastSync = this.lastModified;
}
}
// Apply remaining items.
- doApplyBatchAndPersistFailed.call(this);
+ yield doApplyBatchAndPersistFailed.call(this);
count.newFailed = this.previousFailed.reduce((count, engine) => {
if (failedInPreviousSync.indexOf(engine) == -1) {
count++;
this._noteApplyNewFailure();
}
return count;
}, 0);
count.succeeded = Math.max(0, count.applied - count.failed);
this._log.info(["Records:",
count.applied, "applied,",
count.succeeded, "successfully,",
count.failed, "failed to apply,",
count.newFailed, "newly failed to apply,",
count.reconciled, "reconciled."].join(" "));
Observers.notify("weave:engine:sync:applied", count, this.name);
- },
+ }),
_noteApplyFailure: function () {
Services.telemetry.getKeyedHistogramById(
"WEAVE_ENGINE_APPLY_FAILURES").add(this.name);
},
_noteApplyNewFailure: function () {
Services.telemetry.getKeyedHistogramById(
"WEAVE_ENGINE_APPLY_NEW_FAILURES").add(this.name);
},
/**
* Find a GUID of an item that is a duplicate of the incoming item but happens
* to have a different GUID
*
- * @return GUID of the similar item; falsy otherwise
+ * @return Promise<GUID> of the similar item; falsy otherwise
*/
_findDupe: function (item) {
// By default, assume there's no dupe items for the engine
+ return Promise.resolve();
},
- _deleteId: function (id) {
- this._tracker.removeChangedID(id);
+ _deleteId: Task.async(function* (id) {
+ yield this._tracker.removeChangedID(id);
// Remember this id to delete at the end of sync
if (this._delete.ids == null)
this._delete.ids = [id];
else
this._delete.ids.push(id);
- },
+ }),
/**
* Reconcile incoming record with local state.
*
* This function essentially determines whether to apply an incoming record.
*
* @param item
* Record from server to be tested for application.
- * @return boolean
+ * @return Promise<boolean>
* Truthy if incoming record should be applied. False if not.
*/
- _reconcile: function (item) {
+ _reconcile: Task.async(function* (item) {
if (this._log.level <= Log.Level.Trace) {
this._log.trace("Incoming: " + item);
}
-
// We start reconciling by collecting a bunch of state. We do this here
// because some state may change during the course of this function and we
// need to operate on the original values.
- let existsLocally = this._store.itemExists(item.id);
+ let existsLocally = yield this._store.itemExists(item.id);
let locallyModified = item.id in this._modified;
// TODO Handle clock drift better. Tracked in bug 721181.
let remoteAge = AsyncResource.serverTime - item.modified;
let localAge = locallyModified ?
(Date.now() / 1000 - this._modified[item.id]) : null;
let remoteIsNewer = remoteAge < localAge;
@@ -1314,35 +1347,35 @@ SyncEngine.prototype = {
// data. If the incoming record does not exist locally, we check for a local
// duplicate existing under a different ID. The default implementation of
// _findDupe() is empty, so engines have to opt in to this functionality.
//
// If we find a duplicate, we change the local ID to the incoming ID and we
// refresh the metadata collected above. See bug 710448 for the history
// of this logic.
if (!existsLocally) {
- let dupeID = this._findDupe(item);
+ let dupeID = yield this._findDupe(item);
if (dupeID) {
this._log.trace("Local item " + dupeID + " is a duplicate for " +
"incoming item " + item.id);
// The local, duplicate ID is always deleted on the server.
- this._deleteId(dupeID);
+ yield this._deleteId(dupeID);
// The current API contract does not mandate that the ID returned by
// _findDupe() actually exists. Therefore, we have to perform this
// check.
- existsLocally = this._store.itemExists(dupeID);
+ existsLocally = yield this._store.itemExists(dupeID);
// We unconditionally change the item's ID in case the engine knows of
// an item but doesn't expose it through itemExists. If the API
// contract were stronger, this could be changed.
this._log.debug("Switching local ID to incoming: " + dupeID + " -> " +
item.id);
- this._store.changeItemID(dupeID, item.id);
+ yield this._store.changeItemID(dupeID, item.id);
// If the local item was modified, we carry its metadata forward so
// appropriate reconciling can be performed.
if (dupeID in this._modified) {
locallyModified = true;
localAge = Date.now() / 1000 - this._modified[dupeID];
remoteIsNewer = remoteAge < localAge;
@@ -1391,17 +1424,17 @@ SyncEngine.prototype = {
}
// If the remote and local records are the same, there is nothing to be
// done, so we don't do anything. In the ideal world, this logic wouldn't
// be here and the engine would take a record and apply it. The reason we
// want to defer this logic is because it would avoid a redundant and
// possibly expensive dip into the storage layer to query item state.
// This should get addressed in the async rewrite, so we ignore it for now.
- let localRecord = this._createRecord(item.id);
+ let localRecord = yield this._createRecord(item.id);
let recordsEqual = Utils.deepEquals(item.cleartext,
localRecord.cleartext);
// If the records are the same, we don't need to do anything. This does
// potentially throw away a local modification time. But, if the records
// are the same, does it matter?
if (recordsEqual) {
this._log.trace("Ignoring incoming item because the local item is " +
@@ -1421,36 +1454,36 @@ SyncEngine.prototype = {
// At this point, records are different and the local record is modified.
// We resolve conflicts by record age, where the newest one wins. This does
// result in data loss and should be handled by giving the engine an
// opportunity to merge the records. Bug 720592 tracks this feature.
this._log.warn("DATA LOSS: Both local and remote changes to record: " +
item.id);
return remoteIsNewer;
- },
+ }),
// Upload outgoing records.
- _uploadOutgoing: function () {
+ _uploadOutgoing: Task.async(function* () {
this._log.trace("Uploading local changes to server.");
let modifiedIDs = Object.keys(this._modified);
if (modifiedIDs.length) {
this._log.trace("Preparing " + modifiedIDs.length +
" outgoing records");
// collection we'll upload
let up = new Collection(this.engineURL, null, this.service);
let count = 0;
// Upload what we've got so far in the collection
- let doUpload = Utils.bind2(this, function(desc) {
+ let doUpload = Utils.bind2(this, Task.async(function* (desc) {
this._log.info("Uploading " + desc + " of " + modifiedIDs.length +
" records");
- let resp = up.post();
+ let resp = yield up.post();
if (!resp.success) {
this._log.debug("Uploading records failed: " + resp);
resp.failureCode = ENGINE_UPLOAD_FAIL;
throw resp;
}
// Update server timestamp from the upload.
let modified = resp.headers["x-weave-timestamp"];
@@ -1464,98 +1497,98 @@ SyncEngine.prototype = {
+ failed_ids.join(", "));
// Clear successfully uploaded objects.
for each (let id in resp.obj.success) {
delete this._modified[id];
}
up.clearRecords();
- });
+ }));
for each (let id in modifiedIDs) {
try {
- let out = this._createRecord(id);
+ let out = yield this._createRecord(id);
if (this._log.level <= Log.Level.Trace)
this._log.trace("Outgoing: " + out);
out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
up.pushData(out);
} catch (ex if !Async.isShutdownException(ex)) {
this._log.warn("Error creating record: " + Utils.exceptionStr(ex));
}
// Partial upload
if ((++count % MAX_UPLOAD_RECORDS) == 0)
- doUpload((count - MAX_UPLOAD_RECORDS) + " - " + count + " out");
+ yield doUpload((count - MAX_UPLOAD_RECORDS) + " - " + count + " out");
- this._store._sleep(0);
+ yield this._store._sleep(0);
}
// Final upload
if (count % MAX_UPLOAD_RECORDS > 0)
- doUpload(count >= MAX_UPLOAD_RECORDS ? "last batch" : "all");
+ yield doUpload(count >= MAX_UPLOAD_RECORDS ? "last batch" : "all");
}
- },
+ }),
// Any cleanup necessary.
// Save the current snapshot so as to calculate changes at next sync
- _syncFinish: function () {
+ _syncFinish: Task.async(function* () {
this._log.trace("Finishing up sync");
this._tracker.resetScore();
let doDelete = Utils.bind2(this, function(key, val) {
let coll = new Collection(this.engineURL, this._recordObj, this.service);
coll[key] = val;
- coll.delete();
+ return coll.delete();
});
for (let [key, val] in Iterator(this._delete)) {
// Remove the key for future uses
delete this._delete[key];
// Send a simple delete for the property
if (key != "ids" || val.length <= 100)
- doDelete(key, val);
+ yield doDelete(key, val);
else {
// For many ids, split into chunks of at most 100
while (val.length > 0) {
- doDelete(key, val.slice(0, 100));
+ yield doDelete(key, val.slice(0, 100));
val = val.slice(100);
}
}
}
- },
+ }),
- _syncCleanup: function () {
+ _syncCleanup: Task.async(function* () {
if (!this._modified) {
return;
}
// Mark failed WBOs as changed again so they are reuploaded next time.
for (let [id, when] in Iterator(this._modified)) {
- this._tracker.addChangedID(id, when);
+ yield this._tracker.addChangedID(id, when);
}
this._modified = {};
- },
+ }),
- _sync: function () {
+ _sync: Task.async(function* () {
try {
- this._syncStartup();
+ yield this._syncStartup();
Observers.notify("weave:engine:sync:status", "process-incoming");
- this._processIncoming();
+ yield this._processIncoming();
Observers.notify("weave:engine:sync:status", "upload-outgoing");
- this._uploadOutgoing();
- this._syncFinish();
+ yield this._uploadOutgoing();
+ yield this._syncFinish();
} finally {
- this._syncCleanup();
+ yield this._syncCleanup();
}
- },
+ }),
- canDecrypt: function () {
+ canDecrypt: Task.async(function* () {
// Report failure even if there's nothing to decrypt
let canDecrypt = false;
// Fetch the most recently uploaded record and try to decrypt it
let test = new Collection(this.engineURL, this._recordObj, this.service);
test.limit = 1;
test.sort = "newest";
test.full = true;
@@ -1564,41 +1597,43 @@ SyncEngine.prototype = {
test.recordHandler = function recordHandler(record) {
record.decrypt(key);
canDecrypt = true;
}.bind(this);
// Any failure fetching/decrypting will just result in false
try {
this._log.trace("Trying to decrypt a record from the server..");
- test.get();
+ yield test.get();
} catch (ex if !Async.isShutdownException(ex)) {
this._log.debug("Failed test decrypt: " + Utils.exceptionStr(ex));
}
return canDecrypt;
- },
+ }),
_resetClient: function () {
this.resetLastSync();
this.previousFailed = [];
this.toFetch = [];
+ return Promise.resolve();
},
- wipeServer: function () {
- let response = this.service.resource(this.engineURL).delete();
+ wipeServer: Task.async(function* () {
+ let response = yield this.service.resource(this.engineURL).delete();
if (response.status != 200 && response.status != 404) {
throw response;
}
- this._resetClient();
- },
+ yield this._resetClient();
+ }),
removeClientData: function () {
// Implement this method in engines that store client specific data
// on the server.
+ return Promise.resolve();
},
/*
* Decide on (and partially effect) an error-handling strategy.
*
* Asks the Service to respond to an HMAC error, which might result in keys
* being downloaded. That call returns true if an action which might allow a
* retry to occur.
@@ -1609,13 +1644,17 @@ SyncEngine.prototype = {
*
* Subclasses of SyncEngine can override this method to allow for different
* behavior -- e.g., to delete and ignore erroneous entries.
*
* All return values will be part of the kRecoveryStrategy enumeration.
*/
handleHMACMismatch: function (item, mayRetry) {
// By default we either try again, or bail out noisily.
- return (this.service.handleHMACEvent() && mayRetry) ?
- SyncEngine.kRecoveryStrategy.retry :
- SyncEngine.kRecoveryStrategy.error;
+ return this.service.handleHMACEvent().then(
+ result => {
+ return (result && mayRetry) ?
+ SyncEngine.kRecoveryStrategy.retry :
+ SyncEngine.kRecoveryStrategy.error;
+ }
+ );
}
};
--- a/services/sync/modules/engines/addons.js
+++ b/services/sync/modules/engines/addons.js
@@ -50,16 +50,28 @@ XPCOMUtils.defineLazyModuleGetter(this,
XPCOMUtils.defineLazyModuleGetter(this, "AddonRepository",
"resource://gre/modules/addons/AddonRepository.jsm");
this.EXPORTED_SYMBOLS = ["AddonsEngine"];
// 7 days in milliseconds.
const PRUNE_ADDON_CHANGES_THRESHOLD = 60 * 60 * 24 * 7 * 1000;
+// A utility function suitable for passing to addonutils, which still uses
+// a "callback(error, result)" pattern.
+function makeAddonUtilsCallback(resolve, reject) {
+ return function(error, result) {
+ if (error) {
+ reject(error);
+ } else {
+ resolve(result);
+ }
+ }
+}
+
/**
* AddonRecord represents the state of an add-on in an application.
*
* Each add-on has its own record for each application ID it is installed
* on.
*
* The ID of add-on records is a randomly-generated GUID. It is random instead
* of deterministic so the URIs of the records cannot be guessed and so
@@ -121,121 +133,119 @@ AddonsEngine.prototype = {
syncPriority: 5,
_reconciler: null,
/**
* Override parent method to find add-ons by their public ID, not Sync GUID.
*/
- _findDupe: function _findDupe(item) {
+ _findDupe: Task.async(function* (item) {
let id = item.addonID;
// The reconciler should have been updated at the top of the sync, so we
// can assume it is up to date when this function is called.
- let addons = this._reconciler.addons;
+ let addons = yield this._reconciler.addons;
if (!(id in addons)) {
return null;
}
let addon = addons[id];
if (addon.guid != item.id) {
return addon.guid;
}
return null;
- },
+ }),
/**
* Override getChangedIDs to pull in tracker changes plus changes from the
* reconciler log.
*/
- getChangedIDs: function getChangedIDs() {
+ getChangedIDs: Task.async(function* () {
let changes = {};
for (let [id, modified] in Iterator(this._tracker.changedIDs)) {
changes[id] = modified;
}
let lastSyncDate = new Date(this.lastSync * 1000);
// The reconciler should have been refreshed at the beginning of a sync and
// we assume this function is only called from within a sync.
- let reconcilerChanges = this._reconciler.getChangesSinceDate(lastSyncDate);
- let addons = this._reconciler.addons;
+ let reconcilerChanges = yield this._reconciler.getChangesSinceDate(lastSyncDate);
+ let addons = yield this._reconciler.addons;
for each (let change in reconcilerChanges) {
let changeTime = change[0];
let id = change[2];
if (!(id in addons)) {
continue;
}
// Keep newest modified time.
if (id in changes && changeTime < changes[id]) {
continue;
}
- if (!this._store.isAddonSyncable(addons[id])) {
+ if (!(yield this._store.isAddonSyncable(addons[id]))) {
continue;
}
this._log.debug("Adding changed add-on from changes log: " + id);
let addon = addons[id];
changes[addon.guid] = changeTime.getTime() / 1000;
}
return changes;
- },
+ }),
/**
* Override start of sync function to refresh reconciler.
*
* Many functions in this class assume the reconciler is refreshed at the
* top of a sync. If this ever changes, those functions should be revisited.
*
* Technically speaking, we don't need to refresh the reconciler on every
* sync since it is installed as an AddonManager listener. However, add-ons
* are complicated and we force a full refresh, just in case the listeners
* missed something.
*/
- _syncStartup: function _syncStartup() {
+ _syncStartup: Task.async(function* () {
// We refresh state before calling parent because syncStartup in the parent
// looks for changed IDs, which is dependent on add-on state being up to
// date.
- this._refreshReconcilerState();
-
- SyncEngine.prototype._syncStartup.call(this);
- },
+ yield this._refreshReconcilerState();
+ yield SyncEngine.prototype._syncStartup.call(this)
+ }),
/**
* Override end of sync to perform a little housekeeping on the reconciler.
*
* We prune changes to prevent the reconciler state from growing without
* bound. Even if it grows unbounded, there would have to be many add-on
* changes (thousands) for it to slow things down significantly. This is
* highly unlikely to occur. Still, we exercise defense just in case.
*/
- _syncCleanup: function _syncCleanup() {
+ _syncCleanup: Task.async(function* () {
let ms = 1000 * this.lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
- this._reconciler.pruneChangesBeforeDate(new Date(ms));
+ yield this._reconciler.pruneChangesBeforeDate(new Date(ms));
- SyncEngine.prototype._syncCleanup.call(this);
- },
+ yield SyncEngine.prototype._syncCleanup.call(this);
+ }),
/**
* Helper function to ensure reconciler is up to date.
*
- * This will synchronously load the reconciler's state from the file
+ * This will asynchronously load the reconciler's state from the file
* system (if needed) and refresh the state of the reconciler.
+ * Returns promise.
*/
_refreshReconcilerState: function _refreshReconcilerState() {
this._log.debug("Refreshing reconciler state");
- let cb = Async.makeSpinningCallback();
- this._reconciler.refreshGlobalState(cb);
- cb.wait();
+ return this._reconciler.refreshGlobalState();
}
};
/**
* This is the primary interface between Sync and the Addons Manager.
*
* In addition to the core store APIs, we provide convenience functions to wrap
* Add-on Manager APIs with Sync-specific semantics.
@@ -253,17 +263,17 @@ AddonsStore.prototype = {
get reconciler() {
return this.engine._reconciler;
},
/**
* Override applyIncoming to filter out records we can't handle.
*/
- applyIncoming: function applyIncoming(record) {
+ applyIncoming: Task.async(function* (record) {
// The fields we look at aren't present when the record is deleted.
if (!record.deleted) {
// Ignore records not belonging to our application ID because that is the
// current policy.
if (record.applicationID != Services.appinfo.ID) {
this._log.info("Ignoring incoming record from other App ID: " +
record.id);
return;
@@ -273,127 +283,122 @@ AddonsStore.prototype = {
// is our current policy.
if (record.source != "amo") {
this._log.info("Ignoring unknown add-on source (" + record.source + ")" +
" for " + record.id);
return;
}
}
- Store.prototype.applyIncoming.call(this, record);
- },
+ yield Store.prototype.applyIncoming.call(this, record);
+ }),
/**
* Provides core Store API to create/install an add-on from a record.
*/
- create: function create(record) {
- let cb = Async.makeSpinningCallback();
- AddonUtils.installAddons([{
- id: record.addonID,
- syncGUID: record.id,
- enabled: record.enabled,
- requireSecureURI: !Svc.Prefs.get("addons.ignoreRepositoryChecking", false),
- }], cb);
-
- // This will throw if there was an error. This will get caught by the sync
+ create: Task.async(function* (record) {
+ // This will be rejected if there was an error. This will get caught by the sync
// engine and the record will try to be applied later.
- let results = cb.wait();
+ let results = yield new Promise((resolve, reject) => {
+ AddonUtils.installAddons([{
+ id: record.addonID,
+ syncGUID: record.id,
+ enabled: record.enabled,
+ requireSecureURI: !Svc.Prefs.get("addons.ignoreRepositoryChecking", false),
+ }], makeAddonUtilsCallback(resolve, reject));
+ });
let addon;
for each (let a in results.addons) {
if (a.id == record.addonID) {
addon = a;
break;
}
}
// This should never happen, but is present as a fail-safe.
if (!addon) {
throw new Error("Add-on not found after install: " + record.addonID);
}
this._log.info("Add-on installed: " + record.addonID);
- },
+ }),
/**
* Provides core Store API to remove/uninstall an add-on from a record.
*/
- remove: function remove(record) {
+ remove: Task.async(function* (record) {
// If this is called, the payload is empty, so we have to find by GUID.
- let addon = this.getAddonByGUID(record.id);
+ let addon = yield this.getAddonByGUID(record.id);
if (!addon) {
// We don't throw because if the add-on could not be found then we assume
// it has already been uninstalled and there is nothing for this function
// to do.
return;
}
-
- this._log.info("Uninstalling add-on: " + addon.id);
- let cb = Async.makeSpinningCallback();
- AddonUtils.uninstallAddon(addon, cb);
- cb.wait();
- },
+ yield new Promise((resolve, reject) => {
+ this._log.info("Uninstalling add-on: " + addon.id);
+ AddonUtils.uninstallAddon(addon, makeAddonUtilsCallback(resolve, reject));
+ });
+ }),
/**
* Provides core Store API to update an add-on from a record.
*/
- update: function update(record) {
- let addon = this.getAddonByID(record.addonID);
+ update: Task.async(function* (record) {
+ let addon = yield this.getAddonByID(record.addonID);
// update() is called if !this.itemExists. And, since itemExists consults
// the reconciler only, we need to take care of some corner cases.
//
// First, the reconciler could know about an add-on that was uninstalled
// and no longer present in the add-ons manager.
if (!addon) {
- this.create(record);
+ yield this.create(record);
return;
}
// It's also possible that the add-on is non-restartless and has pending
// install/uninstall activity.
//
// We wouldn't get here if the incoming record was for a deletion. So,
// check for pending uninstall and cancel if necessary.
if (addon.pendingOperations & AddonManager.PENDING_UNINSTALL) {
addon.cancelUninstall();
// We continue with processing because there could be state or ID change.
}
- let cb = Async.makeSpinningCallback();
- this.updateUserDisabled(addon, !record.enabled, cb);
- cb.wait();
- },
+ yield this.updateUserDisabled(addon, !record.enabled);
+ }),
/**
* Provide core Store API to determine if a record exists.
*/
- itemExists: function itemExists(guid) {
- let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
-
+ itemExists: Task.async(function* (guid) {
+ let addon = yield this.reconciler.getAddonStateFromSyncGUID(guid);
return !!addon;
- },
+ }),
/**
* Create an add-on record from its GUID.
*
* @param guid
* Add-on GUID (from extensions DB)
* @param collection
* Collection to add record to.
*
* @return AddonRecord instance
*/
- createRecord: function createRecord(guid, collection) {
+ createRecord: Task.async(function* (guid, collection) {
let record = new AddonRecord(collection, guid);
record.applicationID = Services.appinfo.ID;
- let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+ let addon = yield this.reconciler.getAddonStateFromSyncGUID(guid);
// If we don't know about this GUID or if it has been uninstalled, we mark
// the record as deleted.
if (!addon || !addon.installed) {
record.deleted = true;
return record;
}
@@ -401,126 +406,123 @@ AddonsStore.prototype = {
record.addonID = addon.id;
record.enabled = addon.enabled;
// This needs to be dynamic when add-ons don't come from AddonRepository.
record.source = "amo";
return record;
- },
+ }),
/**
* Changes the id of an add-on.
*
* This implements a core API of the store.
*/
- changeItemID: function changeItemID(oldID, newID) {
+ changeItemID: Task.async(function* (oldID, newID) {
// We always update the GUID in the reconciler because it will be
// referenced later in the sync process.
- let state = this.reconciler.getAddonStateFromSyncGUID(oldID);
+ let state = yield this.reconciler.getAddonStateFromSyncGUID(oldID);
if (state) {
state.guid = newID;
- let cb = Async.makeSpinningCallback();
- this.reconciler.saveState(null, cb);
- cb.wait();
+ yield this.reconciler.saveState(null);
}
- let addon = this.getAddonByGUID(oldID);
+ let addon = yield this.getAddonByGUID(oldID);
if (!addon) {
this._log.debug("Cannot change item ID (" + oldID + ") in Add-on " +
"Manager because old add-on not present: " + oldID);
return;
}
addon.syncGUID = newID;
- },
+ }),
/**
* Obtain the set of all syncable add-on Sync GUIDs.
*
* This implements a core Store API.
*/
- getAllIDs: function getAllIDs() {
+ getAllIDs: Task.async(function* () {
let ids = {};
- let addons = this.reconciler.addons;
+ let addons = yield this.reconciler.addons;
for each (let addon in addons) {
- if (this.isAddonSyncable(addon)) {
+ if ((yield this.isAddonSyncable(addon))) {
ids[addon.guid] = true;
}
}
return ids;
- },
+ }),
/**
* Wipe engine data.
*
* This uninstalls all syncable addons from the application. In case of
* error, it logs the error and keeps trying with other add-ons.
*/
- wipe: function wipe() {
+ wipe: Task.async(function* () {
this._log.info("Processing wipe.");
- this.engine._refreshReconcilerState();
-
+ yield this.engine._refreshReconcilerState();
// We only wipe syncable add-ons. Wipe is a Sync feature not a security
// feature.
- for (let guid in this.getAllIDs()) {
- let addon = this.getAddonByGUID(guid);
+ for (let guid in (yield this.getAllIDs())) {
+ let addon = yield this.getAddonByGUID(guid);
if (!addon) {
this._log.debug("Ignoring add-on because it couldn't be obtained: " +
guid);
continue;
}
this._log.info("Uninstalling add-on as part of wipe: " + addon.id);
Utils.catch(addon.uninstall)();
}
- },
+ }),
/***************************************************************************
* Functions below are unique to this store and not part of the Store API *
***************************************************************************/
/**
- * Synchronously obtain an add-on from its public ID.
+ * Asynchronously obtain an add-on from its public ID.
*
* @param id
* Add-on ID
- * @return Addon or undefined if not found
+ * @return Promise<Addon or undefined if not found>
*/
getAddonByID: function getAddonByID(id) {
- let cb = Async.makeSyncCallback();
- AddonManager.getAddonByID(id, cb);
- return Async.waitForSyncCallback(cb);
+ return new Promise(resolve => {
+ AddonManager.getAddonByID(id, resolve);
+ });
},
/**
- * Synchronously obtain an add-on from its Sync GUID.
+ * Asynchronously obtain an add-on from its Sync GUID.
*
* @param guid
* Add-on Sync GUID
- * @return DBAddonInternal or null
+ * @return Promise<DBAddonInternal or null>
*/
getAddonByGUID: function getAddonByGUID(guid) {
- let cb = Async.makeSyncCallback();
- AddonManager.getAddonBySyncGUID(guid, cb);
- return Async.waitForSyncCallback(cb);
+ return new Promise(resolve => {
+ AddonManager.getAddonBySyncGUID(guid, resolve);
+ });
},
/**
* Determines whether an add-on is suitable for Sync.
*
* @param addon
* Addon instance
- * @return Boolean indicating whether it is appropriate for Sync
+ * @return Promise<Boolean> indicating whether it is appropriate for Sync
*/
- isAddonSyncable: function isAddonSyncable(addon) {
+ isAddonSyncable: Task.async(function* (addon) {
// Currently, we limit syncable add-ons to those that are:
// 1) In a well-defined set of types
// 2) Installed in the current profile
// 3) Not installed by a foreign entity (i.e. installed by the app)
// since they act like global extensions.
// 4) Is not a hotfix.
// 5) Are installed from AMO
@@ -559,28 +561,28 @@ AddonsStore.prototype = {
// We provide a back door to skip the repository checking of an add-on.
// This is utilized by the tests to make testing easier. Users could enable
// this, but it would sacrifice security.
if (Svc.Prefs.get("addons.ignoreRepositoryChecking", false)) {
return true;
}
- let cb = Async.makeSyncCallback();
- AddonRepository.getCachedAddonByID(addon.id, cb);
- let result = Async.waitForSyncCallback(cb);
+ let result = yield new Promise(resolve => {
+ AddonRepository.getCachedAddonByID(addon.id, resolve);
+ });
if (!result) {
this._log.debug(addon.id + " not syncable: add-on not found in add-on " +
"repository.");
return false;
}
return this.isSourceURITrusted(result.sourceURI);
- },
+ }),
/**
* Determine whether an add-on's sourceURI field is trusted and the add-on
* can be installed.
*
* This function should only ever be called from isAddonSyncable(). It is
* exposed as a separate function to make testing easier.
*
@@ -622,37 +624,38 @@ AddonsStore.prototype = {
* This will enable or disable an add-on and call the supplied callback when
* the action is complete. If no action is needed, the callback gets called
* immediately.
*
* @param addon
* Addon instance to manipulate.
* @param value
* Boolean to which to set userDisabled on the passed Addon.
- * @param callback
- * Function to be called when action is complete. Will receive 2
- * arguments, a truthy value that signifies error, and the Addon
- * instance passed to this function.
+ *
+ * @return promise<Addon>
+ * Will be resolved with the addon instance passed to this function,
+ * or rejected.
*/
- updateUserDisabled: function updateUserDisabled(addon, value, callback) {
+ updateUserDisabled: Task.async(function* (addon, value) {
if (addon.userDisabled == value) {
- callback(null, addon);
- return;
+ return addon;
}
// A pref allows changes to the enabled flag to be ignored.
if (Svc.Prefs.get("addons.ignoreUserEnabledChanges", false)) {
this._log.info("Ignoring enabled state change due to preference: " +
addon.id);
- callback(null, addon);
- return;
+ return addon;
}
- AddonUtils.updateUserDisabled(addon, value, callback);
- },
+ yield new Promise((resolve, reject) => {
+ AddonUtils.updateUserDisabled(addon, value, makeAddonUtilsCallback(resolve, reject));
+ });
+ return addon;
+ }),
};
/**
* The add-ons tracker keeps track of real-time changes to add-ons.
*
* It hooks up to the reconciler and receives notifications directly from it.
*/
function AddonsTracker(name, engine) {
@@ -668,30 +671,31 @@ AddonsTracker.prototype = {
get store() {
return this.engine._store;
},
/**
* This callback is executed whenever the AddonsReconciler sends out a change
* notification. See AddonsReconciler.addChangeListener().
*/
- changeListener: function changeHandler(date, change, addon) {
+ changeListener: function (date, change, addon) {
this._log.debug("changeListener invoked: " + change + " " + addon.id);
// Ignore changes that occur during sync.
if (this.ignoreAll) {
return;
}
- if (!this.store.isAddonSyncable(addon)) {
+ // XXX - at this stage we still spin for observer notifications.
+ if (!Async.promiseSpinningly(this.store.isAddonSyncable(addon))) {
this._log.debug("Ignoring change because add-on isn't syncable: " +
addon.id);
return;
}
- this.addChangedID(addon.guid, date.getTime() / 1000);
+ Async.promiseSpinningly(this.addChangedID(addon.guid, date.getTime() / 1000));
this.score += SCORE_INCREMENT_XLARGE;
},
startTracking: function() {
if (this.engine.enabled) {
this.reconciler.startListening();
}
--- a/services/sync/modules/engines/bookmarks.js
+++ b/services/sync/modules/engines/bookmarks.js
@@ -213,71 +213,56 @@ BookmarksEngine.prototype = {
_recordObj: PlacesItem,
_storeObj: BookmarksStore,
_trackerObj: BookmarksTracker,
version: 2,
_defaultSort: "index",
syncPriority: 4,
- _sync: function _sync() {
- let engine = this;
- let batchEx = null;
-
- // Try running sync in batch mode
- PlacesUtils.bookmarks.runInBatchMode({
- runBatched: function wrappedSync() {
- try {
- SyncEngine.prototype._sync.call(engine);
- }
- catch(ex) {
- batchEx = ex;
- }
- }
- }, null);
-
- // Expose the exception if something inside the batch failed
- if (batchEx != null) {
- throw batchEx;
- }
- },
+ // XXX - we used to override _sync() and call PlacesUtils.bookmarks.runInBatchMode
+ // before calling the base implementation - but runInBatchMode kinda sucks
+ // for our purposes here as it doesn't support async operations. So for
+ // now, we take the perf hit of not running in batch mode.
+ // We really just need runInBatchMode() to support promises - bug 890203.
// A diagnostic helper to get the string value for a bookmark's URL given
// its ID. Always returns a string - on error will return a string in the
// form of "<description of error>" as this is purely for, eg, logging.
// (This means hitting the DB directly and we don't bother using a cached
// statement - we should rarely hit this.)
- _getStringUrlForId(id) {
+ _getStringUrlForId: Task.async(function* (id) {
let url;
try {
let stmt = this._store._getStmt(`
SELECT h.url
FROM moz_places h
JOIN moz_bookmarks b ON h.id = b.fk
WHERE b.id = :id`);
stmt.params.id = id;
- let rows = Async.querySpinningly(stmt, ["url"]);
+ let rows = yield Async.promiseQuery(stmt, ["url"]);
url = rows.length == 0 ? "<not found>" : rows[0].url;
} catch (ex if !Async.isShutdownException(ex)) {
if (ex instanceof Ci.mozIStorageError) {
url = `<failed: Storage error: ${ex.message} (${ex.result})>`;
} else {
url = `<failed: ${ex.toString()}>`;
}
}
return url;
- },
+ }),
_guidMapFailed: false,
- _buildGUIDMap: function _buildGUIDMap() {
+ _buildGUIDMap: Task.async(function* () {
+ let allIds = yield this._store.getAllIDs();
let guidMap = {};
- for (let guid in this._store.getAllIDs()) {
+ for (let guid in allIds) {
// Figure out with which key to store the mapping.
let key;
- let id = this._store.idForGUID(guid);
+ let id = yield this._store.idForGUID(guid);
let itemType;
try {
itemType = PlacesUtils.bookmarks.getItemType(id);
} catch (ex) {
this._log.warn("Deleting invalid bookmark record with id", id);
try {
PlacesUtils.bookmarks.removeItem(id);
} catch (ex) {
@@ -299,17 +284,17 @@ BookmarksEngine.prototype = {
key = "q" + queryId;
} else {
let uri;
try {
uri = PlacesUtils.bookmarks.getBookmarkURI(id);
} catch (ex) {
// Bug 1182366 - NS_ERROR_MALFORMED_URI here stops bookmarks sync.
// Try and get the string value of the URL for diagnostic purposes.
- let url = this._getStringUrlForId(id);
+ let url = yield this._getStringUrlForId(id);
this._log.warn(`Deleting bookmark with invalid URI. url="${url}", id=${id}`);
try {
PlacesUtils.bookmarks.removeItem(id);
} catch (ex) {
this._log.warn("Failed to delete invalid bookmark", ex);
}
continue;
}
@@ -337,23 +322,24 @@ BookmarksEngine.prototype = {
// If the entry already exists, remember that there are explicit dupes.
let entry = new String(guid);
entry.hasDupe = guidMap[parentName][key] != null;
// Remember this item's GUID for its parent-name/key pair.
guidMap[parentName][key] = entry;
this._log.trace("Mapped: " + [parentName, key, entry, entry.hasDupe]);
+ // Make sure this loop doesn't hog the event-loop.
+ yield Async.promiseYield();
}
-
return guidMap;
- },
+ }),
// Helper function to get a dupe GUID for an item.
- _mapDupe: function _mapDupe(item) {
+ _mapDupe: Task.async(function* (item) {
// Figure out if we have something to key with.
let key;
let altKey;
switch (item.type) {
case "query":
// Prior to Bug 610501, records didn't carry their Smart Bookmark
// anno, so we won't be able to dupe them correctly. This altKey
// hack should get them to dupe correctly.
@@ -375,17 +361,17 @@ BookmarksEngine.prototype = {
key = "s" + item.pos;
break;
default:
return;
}
// Figure out if we have a map to use!
// This will throw in some circumstances. That's fine.
- let guidMap = this._guidMap;
+ let guidMap = yield this._guidMap;
// Give the GUID if we have the matching pair.
this._log.trace("Finding mapping: " + item.parentName + ", " + key);
let parent = guidMap[item.parentName];
if (!parent) {
this._log.trace("No parent => no dupe.");
return undefined;
@@ -403,108 +389,102 @@ BookmarksEngine.prototype = {
if (dupe) {
this._log.trace("Mapped dupe using altKey " + altKey + ": " + dupe);
return dupe;
}
}
this._log.trace("No dupe found for key " + key + "/" + altKey + ".");
return undefined;
- },
-
- _syncStartup: function _syncStart() {
- SyncEngine.prototype._syncStartup.call(this);
+ }),
- let cb = Async.makeSpinningCallback();
- Task.spawn(function() {
- // For first-syncs, make a backup for the user to restore
- if (this.lastSync == 0) {
- this._log.debug("Bookmarks backup starting.");
+ _syncStartup: Task.async(function* () {
+ yield SyncEngine.prototype._syncStartup.call(this)
+ // For first-syncs, make a backup for the user to restore
+ if (this.lastSync == 0) {
+ this._log.debug("Bookmarks backup starting.");
+ try {
yield PlacesBackups.create(null, true);
this._log.debug("Bookmarks backup done.");
- }
- }.bind(this)).then(
- cb, ex => {
+ } catch (ex) {
// Failure to create a backup is somewhat bad, but probably not bad
// enough to prevent syncing of bookmarks - so just log the error and
// continue.
this._log.warn("Got exception \"" + Utils.exceptionStr(ex) +
"\" backing up bookmarks, but continuing with sync.");
- cb();
}
- );
+ }
- cb.wait();
-
+ this._store._childrenToOrder = {};
this.__defineGetter__("_guidMap", function() {
+ delete this._guidMap;
// Create a mapping of folder titles and separator positions to GUID.
// We do this lazily so that we don't do any work unless we reconcile
// incoming items.
- let guidMap;
- try {
- guidMap = this._buildGUIDMap();
- } catch (ex if !Async.isShutdownException(ex)) {
- this._log.warn("Got exception \"" + Utils.exceptionStr(ex) +
- "\" building GUID map." +
- " Skipping all other incoming items.");
- throw {code: Engine.prototype.eEngineAbortApplyIncoming,
- cause: ex};
- }
- delete this._guidMap;
- return this._guidMap = guidMap;
+ return this._guidMap = new Promise((resolve, reject) => {
+ this._buildGUIDMap().then(resolve, ex => {
+ if (Async.isShutdownException(ex)) {
+ reject(ex);
+ return;
+ }
+ this._log.warn("Got exception \"" + Utils.exceptionStr(ex) +
+ "\" building GUID map." +
+ " Skipping all other incoming items.");
+ reject({code: Engine.prototype.eEngineAbortApplyIncoming,
+ cause: ex});
+ });
+ });
});
-
- this._store._childrenToOrder = {};
- },
+ }),
- _processIncoming: function (newitems) {
+ _processIncoming: Task.async(function* (newitems) {
try {
- SyncEngine.prototype._processIncoming.call(this, newitems);
+ yield SyncEngine.prototype._processIncoming.call(this, newitems);
} finally {
// Reorder children.
this._tracker.ignoreAll = true;
- this._store._orderChildren();
+ yield this._store._orderChildren();
this._tracker.ignoreAll = false;
delete this._store._childrenToOrder;
}
- },
+ }),
- _syncFinish: function _syncFinish() {
- SyncEngine.prototype._syncFinish.call(this);
+ _syncFinish: Task.async(function* () {
+ yield SyncEngine.prototype._syncFinish.call(this);
this._tracker._ensureMobileQuery();
- },
+ }),
- _syncCleanup: function _syncCleanup() {
- SyncEngine.prototype._syncCleanup.call(this);
+ _syncCleanup: Task.async(function* () {
+ yield SyncEngine.prototype._syncCleanup.call(this);
delete this._guidMap;
- },
+ }),
- _createRecord: function _createRecord(id) {
+ _createRecord: Task.async(function* (id) {
// Create the record as usual, but mark it as having dupes if necessary.
- let record = SyncEngine.prototype._createRecord.call(this, id);
- let entry = this._mapDupe(record);
+ let record = yield SyncEngine.prototype._createRecord.call(this, id);
+ let entry = yield this._mapDupe(record);
if (entry != null && entry.hasDupe) {
record.hasDupe = true;
}
return record;
- },
+ }),
- _findDupe: function _findDupe(item) {
+ _findDupe: Task.async(function* (item) {
this._log.trace("Finding dupe for " + item.id +
" (already duped: " + item.hasDupe + ").");
// Don't bother finding a dupe if the incoming item has duplicates.
if (item.hasDupe) {
this._log.trace(item.id + " already a dupe: not finding one.");
return;
}
- let mapped = this._mapDupe(item);
+ let mapped = yield this._mapDupe(item);
this._log.debug(item.id + " mapped to " + mapped);
return mapped;
- }
+ }),
};
function BookmarksStore(name, engine) {
Store.call(this, name, engine);
// Explicitly nullify our references to our cached services so we don't leak
Svc.Obs.add("places-shutdown", function() {
for each (let [query, stmt] in Iterator(this._stmts)) {
@@ -512,17 +492,19 @@ function BookmarksStore(name, engine) {
}
this._stmts = {};
}, this);
}
BookmarksStore.prototype = {
__proto__: Store.prototype,
itemExists: function BStore_itemExists(id) {
- return this.idForGUID(id, true) > 0;
+ return this.idForGUID(id, true).then(
+ result => result > 0
+ );
},
/*
* If the record is a tag query, rewrite it to refer to the local tag ID.
*
* Otherwise, just return.
*/
preprocessTagQuery: function preprocessTagQuery(record) {
@@ -573,28 +555,28 @@ BookmarksStore.prototype = {
}
}
}
finally {
tags.containerOpen = false;
}
},
- applyIncoming: function BStore_applyIncoming(record) {
+ applyIncoming: Task.async(function* (record) {
this._log.debug("Applying record " + record.id);
let isSpecial = record.id in kSpecialIds;
if (record.deleted) {
if (isSpecial) {
this._log.warn("Ignoring deletion for special record " + record.id);
return;
}
// Don't bother with pre and post-processing for deletions.
- Store.prototype.applyIncoming.call(this, record);
+ yield Store.prototype.applyIncoming.call(this, record);
return;
}
// For special folders we're only interested in child ordering.
if (isSpecial && record.children) {
this._log.debug("Processing special node: " + record.id);
// Reorder children later
this._childrenToOrder[record.id] = record.children;
@@ -613,92 +595,92 @@ BookmarksStore.prototype = {
// Figure out the local id of the parent GUID if available
let parentGUID = record.parentid;
if (!parentGUID) {
throw "Record " + record.id + " has invalid parentid: " + parentGUID;
}
this._log.debug("Local parent is " + parentGUID);
- let parentId = this.idForGUID(parentGUID);
+ let parentId = yield this.idForGUID(parentGUID);
if (parentId > 0) {
// Save the parent id for modifying the bookmark later
record._parent = parentId;
record._orphan = false;
this._log.debug("Record " + record.id + " is not an orphan.");
} else {
this._log.trace("Record " + record.id +
" is an orphan: could not find parent " + parentGUID);
record._orphan = true;
}
// Do the normal processing of incoming records
- Store.prototype.applyIncoming.call(this, record);
+ yield Store.prototype.applyIncoming.call(this, record);
// Do some post-processing if we have an item
- let itemId = this.idForGUID(record.id);
+ let itemId = yield this.idForGUID(record.id);
if (itemId > 0) {
// Move any children that are looking for this folder as a parent
if (record.type == "folder") {
- this._reparentOrphans(itemId);
+ yield this._reparentOrphans(itemId);
// Reorder children later
if (record.children)
this._childrenToOrder[record.id] = record.children;
}
// Create an annotation to remember that it needs reparenting.
if (record._orphan) {
PlacesUtils.annotations.setItemAnnotation(
itemId, PARENT_ANNO, parentGUID, 0,
PlacesUtils.annotations.EXPIRE_NEVER);
}
}
- },
+ }),
/**
* Find all ids of items that have a given value for an annotation
*/
_findAnnoItems: function BStore__findAnnoItems(anno, val) {
return PlacesUtils.annotations.getItemsWithAnnotation(anno, {})
.filter(function(id) {
return PlacesUtils.annotations.getItemAnnotation(id, anno) == val;
});
},
/**
* For the provided parent item, attach its children to it
*/
- _reparentOrphans: function _reparentOrphans(parentId) {
+ _reparentOrphans: Task.async(function* (parentId) {
// Find orphans and reunite with this folder parent
- let parentGUID = this.GUIDForId(parentId);
+ let parentGUID = yield this.GUIDForId(parentId);
let orphans = this._findAnnoItems(PARENT_ANNO, parentGUID);
this._log.debug("Reparenting orphans " + orphans + " to " + parentId);
- orphans.forEach(function(orphan) {
+ for (let orphan of orphans) {
// Move the orphan to the parent and drop the missing parent annotation
- if (this._reparentItem(orphan, parentId)) {
+ if (yield this._reparentItem(orphan, parentId)) {
PlacesUtils.annotations.removeItemAnnotation(orphan, PARENT_ANNO);
}
- }, this);
- },
+ }
+ }),
- _reparentItem: function _reparentItem(itemId, parentId) {
+ _reparentItem: Task.async(function* (itemId, parentId) {
this._log.trace("Attempting to move item " + itemId + " to new parent " +
parentId);
try {
if (parentId > 0) {
PlacesUtils.bookmarks.moveItem(itemId, parentId,
PlacesUtils.bookmarks.DEFAULT_INDEX);
return true;
}
} catch(ex) {
this._log.debug("Failed to reparent item. " + Utils.exceptionStr(ex));
}
return false;
- },
+ }),
// Turn a record's nsINavBookmarksService constant and other attributes into
// a granular type for comparison.
_recordType: function _recordType(itemId) {
let bms = PlacesUtils.bookmarks;
let type = bms.getItemType(itemId);
switch (type) {
@@ -719,17 +701,17 @@ BookmarksStore.prototype = {
case bms.TYPE_SEPARATOR:
return "separator";
default:
return null;
}
},
- create: function BStore_create(record) {
+ create: Task.async(function* (record) {
// Default to unfiled if we don't have the parent yet.
// Valid parent IDs are all positive integers. Other values -- undefined,
// null, -1 -- all compare false for > 0, so this catches them all. We
// don't just use <= without the !, because undefined and null compare
// false for that, too!
if (!(record._parent > 0)) {
this._log.debug("Parent is " + record._parent + "; reparenting to unfiled.");
@@ -795,39 +777,30 @@ BookmarksStore.prototype = {
.itemHasAnnotation(record._parent, PlacesUtils.LMANNO_FEEDURI)) {
this._log.debug("Invalid parent: skipping livemark record " + record.id);
return;
}
if (record.siteUri != null)
siteURI = Utils.makeURI(record.siteUri);
- // Until this engine can handle asynchronous error reporting, we need to
- // detect errors on creation synchronously.
- let spinningCb = Async.makeSpinningCallback();
-
let livemarkObj = {title: record.title,
parentId: record._parent,
index: PlacesUtils.bookmarks.DEFAULT_INDEX,
feedURI: Utils.makeURI(record.feedUri),
siteURI: siteURI,
guid: record.id};
- PlacesUtils.livemarks.addLivemark(livemarkObj).then(
- aLivemark => { spinningCb(null, [Components.results.NS_OK, aLivemark]) },
- ex => {
- this._log.error("creating livemark failed: " + ex);
- spinningCb(null, [Components.results.NS_ERROR_UNEXPECTED, null])
- }
- );
- let [status, livemark] = spinningCb.wait();
- if (!Components.isSuccessCode(status)) {
- throw status;
+ let livemark;
+ try {
+ livemark = yield PlacesUtils.livemarks.addLivemark(livemarkObj);
+ } catch (_) {
+ this._log.error("creating livemark failed", ex);
+ throw Components.results.NS_ERROR_UNEXPECTED;
}
-
this._log.debug("Created livemark " + livemark.id + " under " +
livemark.parentId + " as " + livemark.title +
", " + livemark.siteURI.spec + ", " +
livemark.feedURI.spec + ", GUID " +
livemark.guid);
break;
case "separator":
newId = PlacesUtils.bookmarks.insertSeparator(
@@ -841,19 +814,19 @@ BookmarksStore.prototype = {
this._log.error("_create: Unknown item type: " + record.type);
return;
}
if (newId) {
// Livemarks can set the GUID through the API, so there's no need to
// do that here.
this._log.trace("Setting GUID of new item " + newId + " to " + record.id);
- this._setGUID(newId, record.id);
+ yield this._setGUID(newId, record.id);
}
- },
+ }),
// Factored out of `remove` to avoid redundant DB queries when the Places ID
// is already known.
removeById: function removeById(itemId, guid) {
let type = PlacesUtils.bookmarks.getItemType(itemId);
switch (type) {
case PlacesUtils.bookmarks.TYPE_BOOKMARK:
@@ -869,37 +842,37 @@ BookmarksStore.prototype = {
PlacesUtils.bookmarks.removeItem(itemId);
break;
default:
this._log.error("remove: Unknown item type: " + type);
break;
}
},
- remove: function BStore_remove(record) {
+ remove: Task.async(function* (record) {
if (kSpecialIds.isSpecialGUID(record.id)) {
this._log.warn("Refusing to remove special folder " + record.id);
return;
}
- let itemId = this.idForGUID(record.id);
+ let itemId = yield this.idForGUID(record.id);
if (itemId <= 0) {
this._log.debug("Item " + record.id + " already removed");
return;
}
this.removeById(itemId, record.id);
- },
+ }),
_taggableTypes: ["bookmark", "microsummary", "query"],
isTaggable: function isTaggable(recordType) {
return this._taggableTypes.indexOf(recordType) != -1;
},
- update: function BStore_update(record) {
- let itemId = this.idForGUID(record.id);
+ update: Task.async(function* (record) {
+ let itemId = yield this.idForGUID(record.id);
if (itemId <= 0) {
this._log.debug("Skipping update for unknown item: " + record.id);
return;
}
// Two items are the same type if they have the same ItemType in Places,
// and also share some key characteristics (e.g., both being livemarks).
@@ -911,26 +884,26 @@ BookmarksStore.prototype = {
let remoteRecordType = record.type;
this._log.trace("Local type: " + localItemType + ". " +
"Remote type: " + remoteRecordType + ".");
if (localItemType != remoteRecordType) {
this._log.debug("Local record and remote record differ in type. " +
"Deleting and recreating.");
this.removeById(itemId, record.id);
- this.create(record);
+ yield this.create(record);
return;
}
this._log.trace("Updating " + record.id + " (" + itemId + ")");
// Move the bookmark to a new parent or new position if necessary
if (record._parent > 0 &&
PlacesUtils.bookmarks.getFolderIdForItem(itemId) != record._parent) {
- this._reparentItem(itemId, record._parent);
+ yield this._reparentItem(itemId, record._parent);
}
for (let [key, val] in Iterator(record.cleartext)) {
switch (key) {
case "title":
PlacesUtils.bookmarks.setItemTitle(itemId, val);
break;
case "bmkUri":
@@ -968,26 +941,26 @@ BookmarksStore.prototype = {
break;
case "queryId":
PlacesUtils.annotations.setItemAnnotation(
itemId, SMART_BOOKMARKS_ANNO, val, 0,
PlacesUtils.annotations.EXPIRE_NEVER);
break;
}
}
- },
+ }),
- _orderChildren: function _orderChildren() {
+ _orderChildren: Task.async(function* () {
for (let [guid, children] in Iterator(this._childrenToOrder)) {
// Reorder children according to the GUID list. Gracefully deal
// with missing items, e.g. locally deleted.
let delta = 0;
let parent = null;
for (let idx = 0; idx < children.length; idx++) {
- let itemid = this.idForGUID(children[idx]);
+ let itemid = yield this.idForGUID(children[idx]);
if (itemid == -1) {
delta += 1;
this._log.trace("Could not locate record " + children[idx]);
continue;
}
try {
// This code path could be optimized by caching the parent earlier.
// Doing so should take in count any edge case due to reparenting
@@ -996,28 +969,28 @@ BookmarksStore.prototype = {
parent = PlacesUtils.bookmarks.getFolderIdForItem(itemid);
}
PlacesUtils.bookmarks.moveItem(itemid, parent, idx - delta);
} catch (ex) {
this._log.debug("Could not move item " + children[idx] + ": " + ex);
}
}
}
- },
+ }),
- changeItemID: function BStore_changeItemID(oldID, newID) {
+ changeItemID: Task.async(function* (oldID, newID) {
this._log.debug("Changing GUID " + oldID + " to " + newID);
// Make sure there's an item to change GUIDs
- let itemId = this.idForGUID(oldID);
+ let itemId = yield this.idForGUID(oldID);
if (itemId <= 0)
return;
- this._setGUID(itemId, newID);
- },
+ yield this._setGUID(itemId, newID);
+ }),
_getNode: function BStore__getNode(folder) {
let query = PlacesUtils.history.getNewQuery();
query.setFolders([folder], 1);
return PlacesUtils.history.executeQuery(
query, PlacesUtils.history.getNewQueryOptions()).root;
},
@@ -1047,32 +1020,32 @@ BookmarksStore.prototype = {
return this._getStmt(
"SELECT id AS item_id, guid " +
"FROM moz_bookmarks " +
"WHERE parent = :parent " +
"ORDER BY position");
},
_childGUIDsCols: ["item_id", "guid"],
- _getChildGUIDsForId: function _getChildGUIDsForId(itemid) {
+ _getChildGUIDsForId: Task.async(function* (itemid) {
let stmt = this._childGUIDsStm;
stmt.params.parent = itemid;
- let rows = Async.querySpinningly(stmt, this._childGUIDsCols);
- return rows.map(function (row) {
- if (row.guid) {
- return row.guid;
- }
- // A GUID hasn't been assigned to this item yet, do this now.
- return this.GUIDForId(row.item_id);
- }, this);
- },
+ let rows = yield Async.promiseQuery(stmt, this._childGUIDsCols);
+ let result = [];
+ for (let row of rows) {
+ // If a GUID hasn't been assigned to this item yet, do this now.
+ let guid = row.guid ? row.guid : yield this.GUIDForId(row.item_id)
+ result.push(guid);
+ }
+ return result;
+ }),
// Create a record starting from the weave id (places guid)
- createRecord: function createRecord(id, collection) {
- let placeId = this.idForGUID(id);
+ createRecord: Task.async(function* (id, collection) {
+ let placeId = yield this.idForGUID(id);
let record;
if (placeId <= 0) { // deleted item
record = new PlacesItem(collection, id);
record.deleted = true;
return record;
}
let parent = PlacesUtils.bookmarks.getFolderIdForItem(placeId);
@@ -1130,38 +1103,38 @@ BookmarksStore.prototype = {
} else {
record = new BookmarkFolder(collection, id);
}
if (parent > 0)
record.parentName = PlacesUtils.bookmarks.getItemTitle(parent);
record.title = PlacesUtils.bookmarks.getItemTitle(placeId);
record.description = this._getDescription(placeId);
- record.children = this._getChildGUIDsForId(placeId);
+ record.children = yield this._getChildGUIDsForId(placeId);
break;
case PlacesUtils.bookmarks.TYPE_SEPARATOR:
record = new BookmarkSeparator(collection, id);
if (parent > 0)
record.parentName = PlacesUtils.bookmarks.getItemTitle(parent);
// Create a positioning identifier for the separator, used by _mapDupe
record.pos = PlacesUtils.bookmarks.getItemIndex(placeId);
break;
default:
record = new PlacesItem(collection, id);
this._log.warn("Unknown item type, cannot serialize: " +
PlacesUtils.bookmarks.getItemType(placeId));
}
- record.parentid = this.GUIDForId(parent);
- record.sortindex = this._calculateIndex(record);
+ record.parentid = yield this.GUIDForId(parent);
+ record.sortindex = yield this._calculateIndex(record);
return record;
- },
+ }),
_stmts: {},
_getStmt: function(query) {
if (query in this._stmts) {
return this._stmts[query];
}
this._log.trace("Creating SQL statement: " + query);
@@ -1182,135 +1155,135 @@ BookmarksStore.prototype = {
get _setGUIDStm() {
return this._getStmt(
"UPDATE moz_bookmarks " +
"SET guid = :guid " +
"WHERE id = :item_id");
},
// Some helper functions to handle GUIDs
- _setGUID: function _setGUID(id, guid) {
+ _setGUID: Task.async(function* (id, guid) {
if (!guid)
guid = Utils.makeGUID();
let stmt = this._setGUIDStm;
stmt.params.guid = guid;
stmt.params.item_id = id;
- Async.querySpinningly(stmt);
+ yield Async.promiseQuery(stmt);
PlacesUtils.invalidateCachedGuidFor(id);
return guid;
- },
+ }),
get _guidForIdStm() {
return this._getStmt(
"SELECT guid " +
"FROM moz_bookmarks " +
"WHERE id = :item_id");
},
_guidForIdCols: ["guid"],
- GUIDForId: function GUIDForId(id) {
+ GUIDForId: Task.async(function* (id) {
let special = kSpecialIds.specialGUIDForId(id);
if (special)
return special;
let stmt = this._guidForIdStm;
stmt.params.item_id = id;
// Use the existing GUID if it exists
- let result = Async.querySpinningly(stmt, this._guidForIdCols)[0];
+ let result = (yield Async.promiseQuery(stmt, this._guidForIdCols))[0];
if (result && result.guid)
return result.guid;
// Give the uri a GUID if it doesn't have one
- return this._setGUID(id);
- },
+ return yield this._setGUID(id);
+ }),
get _idForGUIDStm() {
return this._getStmt(
"SELECT id AS item_id " +
"FROM moz_bookmarks " +
"WHERE guid = :guid");
},
_idForGUIDCols: ["item_id"],
// noCreate is provided as an optional argument to prevent the creation of
// non-existent special records, such as "mobile".
- idForGUID: function idForGUID(guid, noCreate) {
+ idForGUID: Task.async(function* (guid, noCreate) {
if (kSpecialIds.isSpecialGUID(guid))
return kSpecialIds.specialIdForGUID(guid, !noCreate);
let stmt = this._idForGUIDStm;
// guid might be a String object rather than a string.
stmt.params.guid = guid.toString();
- let results = Async.querySpinningly(stmt, this._idForGUIDCols);
+ let results = yield Async.promiseQuery(stmt, this._idForGUIDCols);
this._log.trace("Number of rows matching GUID " + guid + ": "
+ results.length);
// Here's the one we care about: the first.
let result = results[0];
if (!result)
return -1;
return result.item_id;
- },
+ }),
- _calculateIndex: function _calculateIndex(record) {
+ _calculateIndex: Task.async(function* (record) {
// Ensure folders have a very high sort index so they're not synced last.
if (record.type == "folder")
return FOLDER_SORTINDEX;
// For anything directly under the toolbar, give it a boost of more than an
// unvisited bookmark
let index = 0;
if (record.parentid == "toolbar")
index += 150;
// Add in the bookmark's frecency if we have something.
if (record.bmkUri != null) {
this._frecencyStm.params.url = record.bmkUri;
- let result = Async.querySpinningly(this._frecencyStm, this._frecencyCols);
+ let result = yield Async.promiseQuery(this._frecencyStm, this._frecencyCols);
if (result.length)
index += result[0].frecency;
}
return index;
- },
+ }),
- _getChildren: function BStore_getChildren(guid, items) {
+ _getChildren: Task.async(function* (guid, items) {
let node = guid; // the recursion case
if (typeof(node) == "string") { // callers will give us the guid as the first arg
- let nodeID = this.idForGUID(guid, true);
+ let nodeID = yield this.idForGUID(guid, true);
if (!nodeID) {
this._log.debug("No node for GUID " + guid + "; returning no children.");
return items;
}
node = this._getNode(nodeID);
}
-
+
if (node.type == node.RESULT_TYPE_FOLDER) {
node.QueryInterface(Ci.nsINavHistoryQueryResultNode);
node.containerOpen = true;
try {
// Remember all the children GUIDs and recursively get more
for (let i = 0; i < node.childCount; i++) {
let child = node.getChild(i);
- items[this.GUIDForId(child.itemId)] = true;
- this._getChildren(child, items);
+ items[(yield this.GUIDForId(child.itemId))] = true;
+ yield this._getChildren(child, items);
}
}
finally {
node.containerOpen = false;
}
}
return items;
- },
+ }),
/**
* Associates the URI of the item with the provided ID with the
* provided array of tags.
* If the provided ID does not identify an item with a URI,
* returns immediately.
*/
_tagID: function _tagID(itemID, tags) {
@@ -1345,41 +1318,36 @@ BookmarksStore.prototype = {
// Temporarily tag a dummy URI to preserve tag ids when untagging.
let dummyURI = Utils.makeURI("about:weave#BStore_tagURI");
PlacesUtils.tagging.tagURI(dummyURI, tags);
PlacesUtils.tagging.untagURI(bookmarkURI, null);
PlacesUtils.tagging.tagURI(bookmarkURI, tags);
PlacesUtils.tagging.untagURI(dummyURI, null);
},
- getAllIDs: function BStore_getAllIDs() {
+ getAllIDs: Task.async(function* () {
let items = {"menu": true,
"toolbar": true};
for each (let guid in kSpecialIds.guids) {
if (guid != "places" && guid != "tags")
- this._getChildren(guid, items);
+ yield this._getChildren(guid, items);
}
return items;
- },
+ }),
- wipe: function BStore_wipe() {
- let cb = Async.makeSpinningCallback();
- Task.spawn(function() {
- // Save a backup before clearing out all bookmarks.
- yield PlacesBackups.create(null, true);
- for each (let guid in kSpecialIds.guids)
- if (guid != "places") {
- let id = kSpecialIds.specialIdForGUID(guid);
- if (id)
- PlacesUtils.bookmarks.removeFolderChildren(id);
- }
- cb();
- });
- cb.wait();
- }
+ wipe: Task.async(function*() {
+ // Save a backup before clearing out all bookmarks.
+ yield PlacesBackups.create(null, true);
+ for each (let guid in kSpecialIds.guids)
+ if (guid != "places") {
+ let id = kSpecialIds.specialIdForGUID(guid);
+ if (id)
+ PlacesUtils.bookmarks.removeFolderChildren(id);
+ }
+ }),
};
function BookmarksTracker(name, engine) {
Tracker.call(this, name, engine);
Svc.Obs.add("places-shutdown", this);
}
BookmarksTracker.prototype = {
@@ -1407,19 +1375,22 @@ BookmarksTracker.prototype = {
this._log.debug("Ignoring changes from importing bookmarks.");
this.ignoreAll = true;
break;
case "bookmarks-restore-success":
this._log.debug("Tracking all items on successful import.");
this.ignoreAll = false;
this._log.debug("Restore succeeded: wiping server and other clients.");
- this.engine.service.resetClient([this.name]);
- this.engine.service.wipeServer([this.name]);
- this.engine.service.clientsEngine.sendCommand("wipeEngine", [this.name]);
+ // At the moment all observers still spin, so we wait for these promises.
+ Async.promiseSpinningly(Task.spawn(function* () {
+ yield this.engine.service.resetClient([this.name]);
+ yield this.engine.service.wipeServer([this.name]);
+ yield this.engine.service.clientsEngine.sendCommand("wipeEngine", [this.name]);
+ }.bind(this)));
break;
case "bookmarks-restore-failed":
this._log.debug("Tracking all items on failed import.");
this.ignoreAll = false;
break;
}
},
@@ -1430,36 +1401,36 @@ BookmarksTracker.prototype = {
]),
/**
* Add a bookmark GUID to be uploaded and bump up the sync score.
*
* @param itemGuid
* GUID of the bookmark to upload.
*/
- _add: function BMT__add(itemId, guid) {
+ _add: Task.async(function* (itemId, guid) {
guid = kSpecialIds.specialGUIDForId(itemId) || guid;
- if (this.addChangedID(guid))
+ if ((yield this.addChangedID(guid)))
this._upScore();
- },
+ }),
/* Every add/remove/change will trigger a sync for MULTI_DEVICE. */
_upScore: function BMT__upScore() {
this.score += SCORE_INCREMENT_XLARGE;
},
/**
* Determine if a change should be ignored.
*
* @param itemId
* Item under consideration to ignore
* @param folder (optional)
* Folder of the item being changed
*/
- _ignore: function BMT__ignore(itemId, folder, guid) {
+ _ignore: Task.async(function* (itemId, folder, guid) {
// Ignore unconditionally if the engine tells us to.
if (this.ignoreAll)
return true;
// Get the folder id if we weren't given one.
if (folder == null) {
try {
folder = PlacesUtils.bookmarks.getFolderIdForItem(itemId);
@@ -1479,43 +1450,49 @@ BookmarksTracker.prototype = {
return true;
// Ignore tag items (the actual instance of a tag for a bookmark).
if (PlacesUtils.bookmarks.getFolderIdForItem(folder) == tags)
return true;
// Make sure to remove items that have the exclude annotation.
if (PlacesUtils.annotations.itemHasAnnotation(itemId, EXCLUDEBACKUP_ANNO)) {
- this.removeChangedID(guid);
+ yield this.removeChangedID(guid);
return true;
}
return false;
- },
+ }),
onItemAdded: function BMT_onItemAdded(itemId, folder, index,
itemType, uri, title, dateAdded,
guid, parentGuid) {
- if (this._ignore(itemId, folder, guid))
- return;
+ // All observers still currently spin.
+ Async.promiseSpinningly(Task.spawn(function* () {
+ if (yield this._ignore(itemId, folder, guid))
+ return;
- this._log.trace("onItemAdded: " + itemId);
- this._add(itemId, guid);
- this._add(folder, parentGuid);
+ this._log.trace("onItemAdded: " + itemId);
+ yield this._add(itemId, guid);
+ yield this._add(folder, parentGuid);
+ }.bind(this)));
},
onItemRemoved: function (itemId, parentId, index, type, uri,
guid, parentGuid) {
- if (this._ignore(itemId, parentId, guid)) {
- return;
- }
+ // All observers still currently spin.
+ Async.promiseSpinningly(Task.spawn(function* () {
+ if (yield this._ignore(itemId, parentId, guid)) {
+ return;
+ }
- this._log.trace("onItemRemoved: " + itemId);
- this._add(itemId, guid);
- this._add(parentId, parentGuid);
+ this._log.trace("onItemRemoved: " + itemId);
+ yield this._add(itemId, guid);
+ yield this._add(parentId, parentGuid);
+ }.bind(this)));
},
_ensureMobileQuery: function _ensureMobileQuery() {
let find = val =>
PlacesUtils.annotations.getItemsWithAnnotation(ORGANIZERQUERY_ANNO, {}).filter(
id => PlacesUtils.annotations.getItemAnnotation(id, ORGANIZERQUERY_ANNO) == val
);
@@ -1553,50 +1530,54 @@ BookmarksTracker.prototype = {
},
// This method is oddly structured, but the idea is to return as quickly as
// possible -- this handler gets called *every time* a bookmark changes, for
// *each change*.
onItemChanged: function BMT_onItemChanged(itemId, property, isAnno, value,
lastModified, itemType, parentId,
guid, parentGuid) {
- // Quicker checks first.
- if (this.ignoreAll)
- return;
+ Async.promiseSpinningly(Task.spawn(function* () {
+ // Quicker checks first.
+ if (this.ignoreAll)
+ return;
- if (isAnno && (ANNOS_TO_TRACK.indexOf(property) == -1))
- // Ignore annotations except for the ones that we sync.
- return;
+ if (isAnno && (ANNOS_TO_TRACK.indexOf(property) == -1))
+ // Ignore annotations except for the ones that we sync.
+ return;
- // Ignore favicon changes to avoid unnecessary churn.
- if (property == "favicon")
- return;
+ // Ignore favicon changes to avoid unnecessary churn.
+ if (property == "favicon")
+ return;
- if (this._ignore(itemId, parentId, guid))
- return;
+ if (yield this._ignore(itemId, parentId, guid))
+ return;
- this._log.trace("onItemChanged: " + itemId +
- (", " + property + (isAnno? " (anno)" : "")) +
- (value ? (" = \"" + value + "\"") : ""));
- this._add(itemId, guid);
+ this._log.trace("onItemChanged: " + itemId +
+ (", " + property + (isAnno? " (anno)" : "")) +
+ (value ? (" = \"" + value + "\"") : ""));
+ yield this._add(itemId, guid);
+ }.bind(this)));
},
onItemMoved: function BMT_onItemMoved(itemId, oldParent, oldIndex,
newParent, newIndex, itemType,
guid, oldParentGuid, newParentGuid) {
- if (this._ignore(itemId, newParent, guid))
- return;
+ Async.promiseSpinningly(Task.spawn(function* () {
+ if (yield this._ignore(itemId, newParent, guid))
+ return;
- this._log.trace("onItemMoved: " + itemId);
- this._add(oldParent, oldParentGuid);
- if (oldParent != newParent) {
- this._add(itemId, guid);
- this._add(newParent, newParentGuid);
- }
+ this._log.trace("onItemMoved: " + itemId);
+ yield this._add(oldParent, oldParentGuid);
+ if (oldParent != newParent) {
+ yield this._add(itemId, guid);
+ yield this._add(newParent, newParentGuid);
+ }
- // Remove any position annotations now that the user moved the item
- PlacesUtils.annotations.removeItemAnnotation(itemId, PARENT_ANNO);
+ // Remove any position annotations now that the user moved the item
+ PlacesUtils.annotations.removeItemAnnotation(itemId, PARENT_ANNO);
+ }.bind(this)));
},
onBeginUpdateBatch: function () {},
onEndUpdateBatch: function () {},
onItemVisited: function () {}
};
--- a/services/sync/modules/engines/clients.js
+++ b/services/sync/modules/engines/clients.js
@@ -33,26 +33,29 @@ Utils.deferGetSet(ClientsRec,
"cleartext",
["name", "type", "commands",
"version", "protocols",
"formfactor", "os", "appPackage", "application", "device"]);
this.ClientEngine = function ClientEngine(service) {
SyncEngine.call(this, "Clients", service);
-
- // Reset the client on every startup so that we fetch recent clients
- this._resetClient();
}
ClientEngine.prototype = {
__proto__: SyncEngine.prototype,
_storeObj: ClientStore,
_recordObj: ClientsRec,
_trackerObj: ClientsTracker,
+ promiseInitialized: Task.async(function* () {
+ // Reset the client on every startup so that we fetch recent clients
+ yield this._resetClient();
+ yield SyncEngine.prototype.promiseInitialized.call(this)
+ }),
+
// Always sync client data as it controls other sync behavior
get enabled() {
return true;
},
get lastRecordUpload() {
return Svc.Prefs.get(this.name + ".lastRecordUpload", 0);
},
@@ -132,60 +135,60 @@ ClientEngine.prototype = {
},
isMobile: function isMobile(id) {
if (this._store._remoteClients[id])
return this._store._remoteClients[id].type == "mobile";
return false;
},
- _syncStartup: function _syncStartup() {
+ _syncStartup: Task.async(function* () {
// Reupload new client record periodically.
if (Date.now() / 1000 - this.lastRecordUpload > CLIENTS_TTL_REFRESH) {
- this._tracker.addChangedID(this.localID);
+ yield this._tracker.addChangedID(this.localID);
this.lastRecordUpload = Date.now() / 1000;
}
- SyncEngine.prototype._syncStartup.call(this);
- },
+ yield SyncEngine.prototype._syncStartup.call(this);
+ }),
// Always process incoming items because they might have commands
_reconcile: function _reconcile() {
- return true;
+ return Promise.resolve(true);
},
// Treat reset the same as wiping for locally cached clients
- _resetClient() {
- this._wipeClient();
- },
+ _resetClient: Task.async(function* () {
+ yield this._wipeClient();
+ }),
- _wipeClient: function _wipeClient() {
- SyncEngine.prototype._resetClient.call(this);
- this._store.wipe();
- },
+ _wipeClient: Task.async(function* () {
+ yield SyncEngine.prototype._resetClient.call(this);
+ yield this._store.wipe()
+ }),
- removeClientData: function removeClientData() {
+ removeClientData: Task.async(function* () {
let res = this.service.resource(this.engineURL + "/" + this.localID);
- res.delete();
- },
+ yield res.delete();
+ }),
// Override the default behavior to delete bad records from the server.
- handleHMACMismatch: function handleHMACMismatch(item, mayRetry) {
+ handleHMACMismatch: Task.async(function* (item, mayRetry) {
this._log.debug("Handling HMAC mismatch for " + item.id);
- let base = SyncEngine.prototype.handleHMACMismatch.call(this, item, mayRetry);
+ let base = yield SyncEngine.prototype.handleHMACMismatch.call(this, item, mayRetry);
if (base != SyncEngine.kRecoveryStrategy.error)
return base;
// It's a bad client record. Save it to be deleted at the end of the sync.
this._log.debug("Bad client record detected. Scheduling for deletion.");
- this._deleteId(item.id);
+ yield this._deleteId(item.id);
// Neither try again nor error; we're going to delete it.
return SyncEngine.kRecoveryStrategy.ignore;
- },
+ }),
/**
* A hash of valid commands that the client knows about. The key is a command
* and the value is a hash containing information about the command such as
* number of arguments and description.
*/
_commands: {
resetAll: { args: 0, desc: "Clear temporary local data for all engines" },
@@ -194,29 +197,29 @@ ClientEngine.prototype = {
wipeEngine: { args: 1, desc: "Delete all client data for engine" },
logout: { args: 0, desc: "Log out client" },
displayURI: { args: 3, desc: "Instruct a client to display a URI" },
},
/**
* Remove any commands for the local client and mark it for upload.
*/
- clearCommands: function clearCommands() {
+ clearCommands: Task.async(function* clearCommands() {
delete this.localCommands;
- this._tracker.addChangedID(this.localID);
- },
+ yield this._tracker.addChangedID(this.localID);
+ }),
/**
* Sends a command+args pair to a specific client.
*
* @param command Command string
* @param args Array of arguments/data for command
* @param clientId Client to send command to
*/
- _sendCommandToClient: function sendCommandToClient(command, args, clientId) {
+ _sendCommandToClient: Task.async(function* sendCommandToClient(command, args, clientId) {
this._log.trace("Sending " + command + " to " + clientId);
let client = this._store._remoteClients[clientId];
if (!client) {
throw new Error("Unknown remote client ID: '" + clientId + "'.");
}
// notDupe compares two commands and returns if they are not equal.
@@ -237,48 +240,48 @@ ClientEngine.prototype = {
client.commands.push(action);
}
// It must be a dupe. Skip.
else {
return;
}
this._log.trace("Client " + clientId + " got a new action: " + [command, args]);
- this._tracker.addChangedID(clientId);
- },
+ yield this._tracker.addChangedID(clientId);
+ }),
/**
* Check if the local client has any remote commands and perform them.
*
* @return false to abort sync
*/
processIncomingCommands: function processIncomingCommands() {
- return this._notify("clients:process-commands", "", function() {
+ return this._promiseNotify("clients:process-commands", "", function* () {
let commands = this.localCommands;
// Immediately clear out the commands as we've got them locally.
- this.clearCommands();
+ yield this.clearCommands();
// Process each command in order.
for each (let {command, args} in commands) {
this._log.debug("Processing command: " + command + "(" + args + ")");
let engines = [args[0]];
switch (command) {
case "resetAll":
engines = null;
// Fallthrough
case "resetEngine":
- this.service.resetClient(engines);
+ yield this.service.resetClient(engines);
break;
case "wipeAll":
engines = null;
// Fallthrough
case "wipeEngine":
- this.service.wipeClient(engines);
+ yield this.service.wipeClient(engines);
break;
case "logout":
this.service.logout();
return false;
case "displayURI":
this._handleDisplayURI.apply(this, args);
break;
default:
@@ -301,38 +304,38 @@ ClientEngine.prototype = {
* @param command
* Command to invoke on remote clients
* @param args
* Array of arguments to give to the command
* @param clientId
* Client ID to send command to. If undefined, send to all remote
* clients.
*/
- sendCommand: function sendCommand(command, args, clientId) {
+ sendCommand: Task.async(function* sendCommand(command, args, clientId) {
let commandData = this._commands[command];
// Don't send commands that we don't know about.
if (!commandData) {
this._log.error("Unknown command to send: " + command);
return;
}
// Don't send a command with the wrong number of arguments.
else if (!args || args.length != commandData.args) {
this._log.error("Expected " + commandData.args + " args for '" +
command + "', but got " + args);
return;
}
if (clientId) {
- this._sendCommandToClient(command, args, clientId);
+ yield this._sendCommandToClient(command, args, clientId);
} else {
for (let id in this._store._remoteClients) {
- this._sendCommandToClient(command, args, id);
+ yield this._sendCommandToClient(command, args, id);
}
}
- },
+ }),
/**
* Send a URI to another client for display.
*
* A side effect is the score is increased dramatically to incur an
* immediate sync.
*
* If an unknown client ID is specified, sendCommand() will throw an
@@ -341,23 +344,23 @@ ClientEngine.prototype = {
* @param uri
* URI (as a string) to send and display on the remote client
* @param clientId
* ID of client to send the command to. If not defined, will be sent
* to all remote clients.
* @param title
* Title of the page being sent.
*/
- sendURIToClientForDisplay: function sendURIToClientForDisplay(uri, clientId, title) {
+ sendURIToClientForDisplay: Task.async(function* sendURIToClientForDisplay(uri, clientId, title) {
this._log.info("Sending URI to client: " + uri + " -> " +
clientId + " (" + title + ")");
- this.sendCommand("displayURI", [uri, this.localID, title], clientId);
+ yield this.sendCommand("displayURI", [uri, this.localID, title], clientId);
this._tracker.score += SCORE_INCREMENT_XLARGE;
- },
+ }),
/**
* Handle a single received 'displayURI' command.
*
* Interested parties should observe the "weave:engine:clients:display-uri"
* topic. The callback will receive an object as the subject parameter with
* the following keys:
*
@@ -395,16 +398,17 @@ ClientStore.prototype = {
},
update: function update(record) {
// Only grab commands from the server; local name/type always wins
if (record.id == this.engine.localID)
this.engine.localCommands = record.commands;
else
this._remoteClients[record.id] = record.cleartext;
+ return Promise.resolve();
},
createRecord: function createRecord(id, collection) {
let record = new ClientsRec(collection, id);
// Package the individual components into a record for the local client
if (id == this.engine.localID) {
record.name = this.engine.localName;
@@ -420,33 +424,36 @@ ClientStore.prototype = {
// We can't compute these yet.
// record.device = ""; // Bug 1100723
// record.formfactor = ""; // Bug 1100722
} else {
record.cleartext = this._remoteClients[id];
}
- return record;
+ return Promise.resolve(record);
},
itemExists(id) {
- return id in this.getAllIDs();
+ return this.getAllIDs().then(
+ ids => id in ids
+ )
},
getAllIDs: function getAllIDs() {
let ids = {};
ids[this.engine.localID] = true;
for (let id in this._remoteClients)
ids[id] = true;
- return ids;
+ return Promise.resolve(ids);
},
wipe: function wipe() {
this._remoteClients = {};
+ return Promise.resolve();
},
};
function ClientsTracker(name, engine) {
Tracker.call(this, name, engine);
Svc.Obs.add("weave:engine:start-tracking", this);
Svc.Obs.add("weave:engine:stop-tracking", this);
}
@@ -466,14 +473,15 @@ ClientsTracker.prototype = {
case "weave:engine:stop-tracking":
if (this._enabled) {
Svc.Prefs.ignore("clients.name", this);
this._enabled = false;
}
break;
case "nsPref:changed":
this._log.debug("client.name preference changed");
- this.addChangedID(Svc.Prefs.get("client.GUID"));
+ // We currently still spin in observer nofitications.
+ Async.promiseSpinningly(this.addChangedID(Svc.Prefs.get("client.GUID")));
this.score += SCORE_INCREMENT_XLARGE;
break;
}
}
};
--- a/services/sync/modules/engines/forms.js
+++ b/services/sync/modules/engines/forms.js
@@ -31,73 +31,74 @@ Utils.deferGetSet(FormRec, "cleartext",
var FormWrapper = {
_log: Log.repository.getLogger("Sync.Engine.Forms"),
_getEntryCols: ["fieldname", "value"],
_guidCols: ["guid"],
- // Do a "sync" search by spinning the event loop until it completes.
- _searchSpinningly: function(terms, searchData) {
- let results = [];
- let cb = Async.makeSpinningCallback();
- let callbacks = {
- handleResult: function(result) {
- results.push(result);
- },
- handleCompletion: function(reason) {
- cb(null, results);
- }
- };
- Svc.FormHistory.search(terms, searchData, callbacks);
- return cb.wait();
+ // Do a promise-based search.
+ _promiseSearch: function(terms, searchData) {
+ return new Promise(resolve => {
+ let results = [];
+ let callbacks = {
+ handleResult: function(result) {
+ results.push(result);
+ },
+ handleCompletion: function(reason) {
+ resolve(results);
+ }
+ };
+ Svc.FormHistory.search(terms, searchData, callbacks);
+ });
},
- _updateSpinningly: function(changes) {
+ _promiseUpdate: function(changes) {
if (!Svc.FormHistory.enabled) {
- return; // update isn't going to do anything.
+ return Promise.resolve(); // update isn't going to do anything.
}
- let cb = Async.makeSpinningCallback();
- let callbacks = {
- handleCompletion: function(reason) {
- cb();
- }
- };
- Svc.FormHistory.update(changes, callbacks);
- return cb.wait();
+ return new Promise(resolve => {
+ let callbacks = {
+ handleCompletion: function(reason) {
+ resolve();
+ }
+ };
+ Svc.FormHistory.update(changes, callbacks);
+ });
},
- getEntry: function (guid) {
- let results = this._searchSpinningly(this._getEntryCols, {guid: guid});
+ getEntry: Task.async(function* (guid) {
+ let results = yield this._promiseSearch(this._getEntryCols, {guid: guid});
if (!results.length) {
return null;
}
return {name: results[0].fieldname, value: results[0].value};
- },
+ }),
- getGUID: function (name, value) {
+ getGUID: Task.async(function* (name, value) {
// Query for the provided entry.
let query = { fieldname: name, value: value };
- let results = this._searchSpinningly(this._guidCols, query);
+ let results = yield this._promiseSearch(this._guidCols, query);
return results.length ? results[0].guid : null;
- },
+ }),
- hasGUID: function (guid) {
- // We could probably use a count function here, but searchSpinningly exists...
- return this._searchSpinningly(this._guidCols, {guid: guid}).length != 0;
- },
+ hasGUID: Task.async(function* (guid) {
+ // We could probably use a count function here, but promiseSearch exists...
+ let results = yield this._promiseSearch(this._guidCols, {guid: guid});
+ return results.length != 0;
+ }),
replaceGUID: function (oldGUID, newGUID) {
let changes = {
op: "update",
guid: oldGUID,
newGuid: newGUID,
}
- this._updateSpinningly(changes);
+ return this._promiseUpdate(changes);
}
};
this.FormEngine = function FormEngine(service) {
SyncEngine.call(this, "Forms", service);
}
FormEngine.prototype = {
@@ -124,91 +125,92 @@ function FormStore(name, engine) {
FormStore.prototype = {
__proto__: Store.prototype,
_processChange: function (change) {
// If this._changes is defined, then we are applying a batch, so we
// can defer it.
if (this._changes) {
this._changes.push(change);
- return;
+ return Promise.resolve();
}
- // Otherwise we must handle the change synchronously, right now.
- FormWrapper._updateSpinningly(change);
+ // Otherwise we must handle the change right now, before the promise resolves.
+ return FormWrapper._promiseUpdate(change);
},
- applyIncomingBatch: function (records) {
+ applyIncomingBatch: Task.async(function* (records) {
// We collect all the changes to be made then apply them all at once.
this._changes = [];
- let failures = Store.prototype.applyIncomingBatch.call(this, records);
+ let failures = yield Store.prototype.applyIncomingBatch.call(this, records);
if (this._changes.length) {
- FormWrapper._updateSpinningly(this._changes);
+ yield FormWrapper._promiseUpdate(this._changes);
}
delete this._changes;
return failures;
- },
+ }),
- getAllIDs: function () {
- let results = FormWrapper._searchSpinningly(["guid"], [])
+ getAllIDs: Task.async(function* () {
+ let results = yield FormWrapper._promiseSearch(["guid"], []);
let guids = {};
for (let result of results) {
guids[result.guid] = true;
}
return guids;
- },
+ }),
changeItemID: function (oldID, newID) {
- FormWrapper.replaceGUID(oldID, newID);
+ return FormWrapper.replaceGUID(oldID, newID);
},
itemExists: function (id) {
- return FormWrapper.hasGUID(id);
+ return Promise.resolve(FormWrapper.hasGUID(id));
},
- createRecord: function (id, collection) {
+ createRecord: Task.async(function* (id, collection) {
let record = new FormRec(collection, id);
- let entry = FormWrapper.getEntry(id);
+ let entry = yield FormWrapper.getEntry(id);
if (entry != null) {
record.name = entry.name;
record.value = entry.value;
} else {
record.deleted = true;
}
return record;
- },
+ }),
create: function (record) {
this._log.trace("Adding form record for " + record.name);
let change = {
op: "add",
fieldname: record.name,
value: record.value
};
- this._processChange(change);
+ return this._processChange(change);
},
remove: function (record) {
this._log.trace("Removing form record: " + record.id);
let change = {
op: "remove",
guid: record.id
};
- this._processChange(change);
+ return this._processChange(change);
},
update: function (record) {
this._log.trace("Ignoring form record update request!");
+ return Promise.resolve();
},
wipe: function () {
let change = {
op: "remove"
};
- FormWrapper._updateSpinningly(change);
+ return FormWrapper._promiseUpdate(change);
}
};
function FormTracker(name, engine) {
Tracker.call(this, name, engine);
}
FormTracker.prototype = {
__proto__: Tracker.prototype,
@@ -227,19 +229,19 @@ FormTracker.prototype = {
observe: function (subject, topic, data) {
Tracker.prototype.observe.call(this, subject, topic, data);
switch (topic) {
case "satchel-storage-changed":
if (data == "formhistory-add" || data == "formhistory-remove") {
let guid = subject.QueryInterface(Ci.nsISupportsString).toString();
- this.trackEntry(guid);
+ Async.promiseSpinningly(this.trackEntry(guid));
}
break;
}
},
trackEntry: function (guid) {
- this.addChangedID(guid);
+ Async.promiseSpinningly(this.addChangedID(guid));
this.score += SCORE_INCREMENT_MEDIUM;
},
};
--- a/services/sync/modules/engines/history.js
+++ b/services/sync/modules/engines/history.js
@@ -113,41 +113,43 @@ HistoryStore.prototype = {
if (!guid) {
guid = Utils.makeGUID();
}
let stmt = this._setGUIDStm;
stmt.params.guid = guid;
stmt.params.page_url = uri;
- Async.querySpinningly(stmt);
- return guid;
+ return Async.promiseQuery(stmt).then(
+ () => guid
+ );
},
get _guidStm() {
return this._getStmt(
"SELECT guid " +
"FROM moz_places " +
"WHERE url = :page_url");
},
_guidCols: ["guid"],
- GUIDForUri: function GUIDForUri(uri, create) {
+ GUIDForUri: Task.async(function* (uri, create) {
let stm = this._guidStm;
stm.params.page_url = uri.spec ? uri.spec : uri;
// Use the existing GUID if it exists
- let result = Async.querySpinningly(stm, this._guidCols)[0];
+ let results = yield Async.promiseQuery(stm, this._guidCols);
+ let result = results[0];
if (result && result.guid)
return result.guid;
// Give the uri a GUID if it doesn't have one
if (create)
- return this.setGUID(uri);
- },
+ return (yield this.setGUID(uri));
+ }),
get _visitStm() {
return this._getStmt(
"/* do not warn (bug 599936) */ " +
"SELECT visit_type type, visit_date date " +
"FROM moz_historyvisits " +
"WHERE place_id = (SELECT id FROM moz_places WHERE url = :url) " +
"ORDER BY date DESC LIMIT 20");
@@ -170,99 +172,104 @@ HistoryStore.prototype = {
"ORDER BY frecency DESC " +
"LIMIT :max_results");
},
_allUrlCols: ["url"],
// See bug 320831 for why we use SQL here
_getVisits: function HistStore__getVisits(uri) {
this._visitStm.params.url = uri;
- return Async.querySpinningly(this._visitStm, this._visitCols);
+ return Async.promiseQuery(this._visitStm, this._visitCols);
},
// See bug 468732 for why we use SQL here
_findURLByGUID: function HistStore__findURLByGUID(guid) {
this._urlStm.params.guid = guid;
- return Async.querySpinningly(this._urlStm, this._urlCols)[0];
+ return Async.promiseQuery(this._urlStm, this._urlCols).then(
+ results => results[0]
+ );
},
- changeItemID: function HStore_changeItemID(oldID, newID) {
- this.setGUID(this._findURLByGUID(oldID).url, newID);
- },
+ changeItemID: Task.async(function* (oldID, newID) {
+ let record = yield this._findURLByGUID(oldID);
+ return (yield this.setGUID(record.url, newID));
+ }),
- getAllIDs: function HistStore_getAllIDs() {
+ getAllIDs: Task.async(function* () {
// Only get places visited within the last 30 days (30*24*60*60*1000ms)
this._allUrlStm.params.cutoff_date = (Date.now() - 2592000000) * 1000;
this._allUrlStm.params.max_results = MAX_HISTORY_UPLOAD;
+ let urls = yield Async.promiseQuery(this._allUrlStm, this._allUrlCols);
+ let ids = {};
+ for (let item of urls) {
+ let guid = yield this.GUIDForUri(item.url, true)
+ ids[guid] = item.url;
+ }
+ return ids;
+ }),
- let urls = Async.querySpinningly(this._allUrlStm, this._allUrlCols);
- let self = this;
- return urls.reduce(function(ids, item) {
- ids[self.GUIDForUri(item.url, true)] = item.url;
- return ids;
- }, {});
- },
-
- applyIncomingBatch: function applyIncomingBatch(records) {
+ applyIncomingBatch: Task.async(function* (records) {
let failed = [];
// Convert incoming records to mozIPlaceInfo objects. Some records can be
// ignored or handled directly, so we're rewriting the array in-place.
let i, k;
for (i = 0, k = 0; i < records.length; i++) {
let record = records[k] = records[i];
let shouldApply;
// This is still synchronous I/O for now.
try {
if (record.deleted) {
// Consider using nsIBrowserHistory::removePages() here.
- this.remove(record);
+ yield this.remove(record);
// No further processing needed. Remove it from the list.
shouldApply = false;
} else {
- shouldApply = this._recordToPlaceInfo(record);
+ shouldApply = yield this._recordToPlaceInfo(record);
}
} catch (ex if !Async.isShutdownException(ex)) {
+ this._log.warn("Failed to apply incoming record", ex);
failed.push(record.id);
shouldApply = false;
}
if (shouldApply) {
k += 1;
}
}
records.length = k; // truncate array
// Nothing to do.
if (!records.length) {
return failed;
}
- let updatePlacesCallback = {
- handleResult: function handleResult() {},
- handleError: function handleError(resultCode, placeInfo) {
- failed.push(placeInfo.guid);
- },
- handleCompletion: Async.makeSyncCallback()
- };
- this._asyncHistory.updatePlaces(records, updatePlacesCallback);
- Async.waitForSyncCallback(updatePlacesCallback.handleCompletion);
+ yield new Promise(resolve => {
+ let updatePlacesCallback = {
+ handleResult: function handleResult() {},
+ handleError: function handleError(resultCode, placeInfo) {
+ failed.push(placeInfo.guid);
+ },
+ handleCompletion: resolve
+ };
+ this._asyncHistory.updatePlaces(records, updatePlacesCallback);
+ });
return failed;
- },
+ }),
/**
* Converts a Sync history record to a mozIPlaceInfo.
*
* Throws if an invalid record is encountered (invalid URI, etc.),
* returns true if the record is to be applied, false otherwise
* (no visits to add, etc.),
*/
- _recordToPlaceInfo: function _recordToPlaceInfo(record) {
+ _recordToPlaceInfo: Task.async(function* (record) {
// Sort out invalid URIs and ones Places just simply doesn't want.
record.uri = Utils.makeURI(record.histUri);
if (!record.uri) {
this._log.warn("Attempted to process invalid URI, skipping.");
throw "Invalid URI in record";
}
if (!Utils.checkGUID(record.id)) {
@@ -276,17 +283,17 @@ HistoryStore.prototype = {
+ record.uri.spec + ": can't add this URI.");
return false;
}
// We dupe visits by date and type. So an incoming visit that has
// the same timestamp and type as a local one won't get applied.
// To avoid creating new objects, we rewrite the query result so we
// can simply check for containment below.
- let curVisits = this._getVisits(record.histUri);
+ let curVisits = yield this._getVisits(record.histUri);
let i, k;
for (i = 0; i < curVisits.length; i++) {
curVisits[i] = curVisits[i].date + "," + curVisits[i].type;
}
// Walk through the visits, make sure we have sound data, and eliminate
// dupes. The latter is done by rewriting the array in-place.
for (i = 0, k = 0; i < record.visits.length; i++) {
@@ -326,51 +333,54 @@ HistoryStore.prototype = {
// and that shouldn't change without a visit.
if (!record.visits.length) {
this._log.trace("Ignoring record " + record.id + " with URI "
+ record.uri.spec + ": no visits to add.");
return false;
}
return true;
- },
+ }),
- remove: function HistStore_remove(record) {
- let page = this._findURLByGUID(record.id);
+ remove: Task.async(function* (record) {
+ let page = yield this._findURLByGUID(record.id);
if (page == null) {
this._log.debug("Page already removed: " + record.id);
return;
}
let uri = Utils.makeURI(page.url);
PlacesUtils.history.removePage(uri);
this._log.trace("Removed page: " + [record.id, page.url, page.title]);
- },
+ }),
itemExists: function HistStore_itemExists(id) {
- return !!this._findURLByGUID(id);
+ return this._findURLByGUID(id).then(
+ result => !!result
+ );
},
- createRecord: function createRecord(id, collection) {
- let foo = this._findURLByGUID(id);
+ createRecord: Task.async(function* (id, collection) {
+ let foo = yield this._findURLByGUID(id);
let record = new HistoryRec(collection, id);
if (foo) {
record.histUri = foo.url;
record.title = foo.title;
record.sortindex = foo.frecency;
- record.visits = this._getVisits(record.histUri);
+ record.visits = yield this._getVisits(record.histUri);
} else {
record.deleted = true;
}
return record;
- },
+ }),
wipe: function HistStore_wipe() {
PlacesUtils.history.removeAllPages();
+ return Promise.resolve();
}
};
function HistoryTracker(name, engine) {
Tracker.call(this, name, engine);
}
HistoryTracker.prototype = {
__proto__: Tracker.prototype,
@@ -390,17 +400,18 @@ HistoryTracker.prototype = {
Ci.nsISupportsWeakReference
]),
onDeleteAffectsGUID: function (uri, guid, reason, source, increment) {
if (this.ignoreAll || reason == Ci.nsINavHistoryObserver.REASON_EXPIRED) {
return;
}
this._log.trace(source + ": " + uri.spec + ", reason " + reason);
- if (this.addChangedID(guid)) {
+ let success = Async.promiseSpinningly(this.addChangedID(guid));
+ if (success) {
this.score += increment;
}
},
onDeleteVisits: function (uri, visitTime, guid, reason) {
this.onDeleteAffectsGUID(uri, guid, reason, "onDeleteVisits", SCORE_INCREMENT_SMALL);
},
@@ -410,17 +421,17 @@ HistoryTracker.prototype = {
onVisit: function (uri, vid, time, session, referrer, trans, guid) {
if (this.ignoreAll) {
this._log.trace("ignoreAll: ignoring visit for " + guid);
return;
}
this._log.trace("onVisit: " + uri.spec);
- if (this.addChangedID(guid)) {
+ if (Async.promiseSpinningly(this.addChangedID(guid))) {
this.score += SCORE_INCREMENT_SMALL;
}
},
onClearHistory: function () {
this._log.trace("onClearHistory");
// Note that we're going to trigger a sync, but none of the cleared
// pages are tracked, so the deletions will not be propagated.
--- a/services/sync/modules/engines/passwords.js
+++ b/services/sync/modules/engines/passwords.js
@@ -35,67 +35,66 @@ PasswordEngine.prototype = {
_storeObj: PasswordStore,
_trackerObj: PasswordTracker,
_recordObj: LoginRec,
applyIncomingBatchSize: PASSWORDS_STORE_BATCH_SIZE,
syncPriority: 2,
- _syncFinish: function () {
- SyncEngine.prototype._syncFinish.call(this);
+ _syncFinish: Task.async(function* () {
+ yield SyncEngine.prototype._syncFinish.call(this);
// Delete the Weave credentials from the server once.
if (!Svc.Prefs.get("deletePwdFxA", false)) {
try {
let ids = [];
for (let host of Utils.getSyncCredentialsHosts()) {
for (let info of Services.logins.findLogins({}, host, "", "")) {
ids.push(info.QueryInterface(Components.interfaces.nsILoginMetaInfo).guid);
}
}
if (ids.length) {
let coll = new Collection(this.engineURL, null, this.service);
coll.ids = ids;
- let ret = coll.delete();
+ let ret = yield coll.delete();
this._log.debug("Delete result: " + ret);
if (!ret.success && ret.status != 400) {
// A non-400 failure means try again next time.
return;
}
} else {
this._log.debug("Didn't find any passwords to delete");
}
// If there were no ids to delete, or we succeeded, or got a 400,
// record success.
Svc.Prefs.set("deletePwdFxA", true);
Svc.Prefs.reset("deletePwd"); // The old prefname we previously used.
} catch (ex if !Async.isShutdownException(ex)) {
this._log.debug("Password deletes failed: " + Utils.exceptionStr(ex));
}
}
- },
+ }),
- _findDupe: function (item) {
+ _findDupe: Task.async(function* (item) {
let login = this._store._nsLoginInfoFromRecord(item);
if (!login) {
return;
}
let logins = Services.logins.findLogins({}, login.hostname, login.formSubmitURL, login.httpRealm);
-
- this._store._sleep(0); // Yield back to main thread after synchronous operation.
+ yield this._store._sleep(0); // Yield back to main thread after synchronous operation.
// Look for existing logins that match the hostname, but ignore the password.
for each (let local in logins) {
if (login.matches(local, true) && local instanceof Ci.nsILoginMetaInfo) {
return local.guid;
}
}
- },
+ }),
};
function PasswordStore(name, engine) {
Store.call(this, name, engine);
this._nsLoginInfo = new Components.Constructor("@mozilla.org/login-manager/loginInfo;1", Ci.nsILoginInfo, "init");
}
PasswordStore.prototype = {
__proto__: Store.prototype,
@@ -165,17 +164,17 @@ PasswordStore.prototype = {
let metaInfo = logins[i].QueryInterface(Ci.nsILoginMetaInfo);
if (Utils.getSyncCredentialsHosts().has(metaInfo.hostname)) {
continue;
}
items[metaInfo.guid] = metaInfo;
}
- return items;
+ return Promise.resolve(items);
},
changeItemID: function (oldID, newID) {
this._log.trace("Changing item ID: " + oldID + " to " + newID);
let oldLogin = this._getLoginFromGUID(oldID);
if (!oldLogin) {
this._log.trace("Can't change item ID: item doesn't exist");
@@ -185,45 +184,46 @@ PasswordStore.prototype = {
this._log.trace("Can't change item ID: new ID already in use");
return;
}
let prop = this._newPropertyBag();
prop.setPropertyAsAUTF8String("guid", newID);
Services.logins.modifyLogin(oldLogin, prop);
+ return Promise.resolve();
},
itemExists: function (id) {
- return !!this._getLoginFromGUID(id);
+ return Promise.resolve(!!this._getLoginFromGUID(id));
},
createRecord: function (id, collection) {
let record = new LoginRec(collection, id);
let login = this._getLoginFromGUID(id);
if (!login) {
record.deleted = true;
- return record;
+ return Promise.resolve(record);
}
record.hostname = login.hostname;
record.formSubmitURL = login.formSubmitURL;
record.httpRealm = login.httpRealm;
record.username = login.username;
record.password = login.password;
record.usernameField = login.usernameField;
record.passwordField = login.passwordField;
// Optional fields.
login.QueryInterface(Ci.nsILoginMetaInfo);
record.timeCreated = login.timeCreated;
record.timePasswordChanged = login.timePasswordChanged;
- return record;
+ return Promise.resolve(record);
},
create: function (record) {
let login = this._nsLoginInfoFromRecord(record);
if (!login) {
return;
}
@@ -245,17 +245,17 @@ PasswordStore.prototype = {
if (!loginItem) {
this._log.trace("Asked to remove record that doesn't exist, ignoring");
return;
}
Services.logins.removeLogin(loginItem);
},
- update: function (record) {
+ update: Task.async(function* (record) {
let loginItem = this._getLoginFromGUID(record.id);
if (!loginItem) {
this._log.debug("Skipping update for unknown item: " + record.hostname);
return;
}
this._log.debug("Updating " + record.hostname);
let newinfo = this._nsLoginInfoFromRecord(record);
@@ -265,20 +265,21 @@ PasswordStore.prototype = {
try {
Services.logins.modifyLogin(loginItem, newinfo);
} catch(ex) {
this._log.debug("Modifying record " + record.id +
" resulted in exception " + Utils.exceptionStr(ex) +
". Not modifying.");
}
- },
+ }),
wipe: function () {
Services.logins.removeAllLogins();
+ return Promise.resolve();
},
};
function PasswordTracker(name, engine) {
Tracker.call(this, name, engine);
Svc.Obs.add("weave:engine:start-tracking", this);
Svc.Obs.add("weave:engine:stop-tracking", this);
}
@@ -311,17 +312,17 @@ PasswordTracker.prototype = {
// Skip over Weave password/passphrase changes.
subject.QueryInterface(Ci.nsILoginMetaInfo).QueryInterface(Ci.nsILoginInfo);
if (Utils.getSyncCredentialsHosts().has(subject.hostname)) {
break;
}
this.score += SCORE_INCREMENT_XLARGE;
this._log.trace(data + ": " + subject.guid);
- this.addChangedID(subject.guid);
+ Async.promiseSpinningly(this.addChangedID(subject.guid));
break;
case "removeAllLogins":
this._log.trace(data);
this.score += SCORE_INCREMENT_XLARGE;
break;
}
},
};
--- a/services/sync/modules/engines/prefs.js
+++ b/services/sync/modules/engines/prefs.js
@@ -43,22 +43,23 @@ PrefsEngine.prototype = {
syncPriority: 1,
getChangedIDs: function () {
// No need for a proper timestamp (no conflict resolution needed).
let changedIDs = {};
if (this._tracker.modified)
changedIDs[PREFS_GUID] = 0;
- return changedIDs;
+ return Promise.resolve(changedIDs);
},
_wipeClient: function () {
- SyncEngine.prototype._wipeClient.call(this);
- this.justWiped = true;
+ return SyncEngine.prototype._wipeClient.call(this).then(
+ () => this.justWiped = true
+ );
},
_reconcile: function (item) {
// Apply the incoming item if we don't care about the local data
if (this.justWiped) {
this.justWiped = false;
return true;
}
@@ -147,58 +148,63 @@ PrefStore.prototype = {
LightweightThemeManager.currentTheme = currentTheme;
}
},
getAllIDs: function () {
/* We store all prefs in just one WBO, with just one GUID */
let allprefs = {};
allprefs[PREFS_GUID] = true;
- return allprefs;
+ return Promise.resolve(allprefs);
},
changeItemID: function (oldID, newID) {
this._log.trace("PrefStore GUID is constant!");
+ return Promise.resolve();
},
itemExists: function (id) {
- return (id === PREFS_GUID);
+ return Promise.resolve(id === PREFS_GUID);
},
createRecord: function (id, collection) {
let record = new PrefRec(collection, id);
if (id == PREFS_GUID) {
record.value = this._getAllPrefs();
} else {
record.deleted = true;
}
- return record;
+ return Promise.resolve(record);
},
create: function (record) {
this._log.trace("Ignoring create request");
+ return Promise.resolve();
},
remove: function (record) {
this._log.trace("Ignoring remove request");
+ return Promise.resolve();
},
update: function (record) {
// Silently ignore pref updates that are for other apps.
if (record.id != PREFS_GUID)
- return;
+ return Promise.resolve();
this._log.trace("Received pref updates, applying...");
this._setAllPrefs(record.value);
+ return Promise.resolve();
},
wipe: function () {
this._log.trace("Ignoring wipe request");
+ return Promise.resolve();
}
};
function PrefTracker(name, engine) {
Tracker.call(this, name, engine);
Svc.Obs.add("profile-before-change", this);
Svc.Obs.add("weave:engine:start-tracking", this);
Svc.Obs.add("weave:engine:stop-tracking", this);
@@ -210,20 +216,22 @@ PrefTracker.prototype = {
return Svc.Prefs.get("engine.prefs.modified", false);
},
set modified(value) {
Svc.Prefs.set("engine.prefs.modified", value);
},
loadChangedIDs: function loadChangedIDs() {
// Don't read changed IDs from disk at start up.
+ return Promise.resolve();
},
clearChangedIDs: function clearChangedIDs() {
this.modified = false;
+ return Promise.resolve();
},
__prefs: null,
get _prefs() {
if (!this.__prefs) {
this.__prefs = new Preferences();
}
return this.__prefs;
--- a/services/sync/modules/engines/tabs.js
+++ b/services/sync/modules/engines/tabs.js
@@ -29,88 +29,91 @@ TabSetRecord.prototype = {
ttl: TABS_TTL,
};
Utils.deferGetSet(TabSetRecord, "cleartext", ["clientName", "tabs"]);
this.TabEngine = function TabEngine(service) {
SyncEngine.call(this, "Tabs", service);
-
- // Reset the client on every startup so that we fetch recent tabs.
- this._resetClient();
}
TabEngine.prototype = {
__proto__: SyncEngine.prototype,
_storeObj: TabStore,
_trackerObj: TabTracker,
_recordObj: TabSetRecord,
syncPriority: 3,
+ promiseInitialized: Task.async(function* () {
+ // Reset the client on every startup so that we fetch recent tabs
+ yield SyncEngine.prototype.promiseInitialized.call(this)
+ yield this._resetClient();
+ }),
+
getChangedIDs: function () {
// No need for a proper timestamp (no conflict resolution needed).
let changedIDs = {};
if (this._tracker.modified)
changedIDs[this.service.clientsEngine.localID] = 0;
- return changedIDs;
+ return Promise.resolve(changedIDs);
},
// API for use by Sync UI code to give user choices of tabs to open.
getAllClients: function () {
return this._store._remoteClients;
},
getClientById: function (id) {
return this._store._remoteClients[id];
},
- _resetClient: function () {
- SyncEngine.prototype._resetClient.call(this);
- this._store.wipe();
+ _resetClient: Task.async(function* () {
+ yield SyncEngine.prototype._resetClient.call(this);
+ yield this._store.wipe();
this._tracker.modified = true;
- },
+ }),
- removeClientData: function () {
+ removeClientData: Task.async(function* () {
let url = this.engineURL + "/" + this.service.clientsEngine.localID;
- this.service.resource(url).delete();
- },
+ yield this.service.resource(url).delete();
+ }),
/**
* Return a Set of open URLs.
*/
getOpenURLs: function () {
let urls = new Set();
for (let entry of this._store.getAllTabs()) {
urls.add(entry.urlHistory[0]);
}
return urls;
},
- _reconcile: function (item) {
+ _reconcile: Task.async(function* (item) {
// Skip our own record.
// TabStore.itemExists tests only against our local client ID.
- if (this._store.itemExists(item.id)) {
+ let exists = yield this._store.itemExists(item.id);
+ if (exists) {
this._log.trace("Ignoring incoming tab item because of its id: " + item.id);
return false;
}
-
- return SyncEngine.prototype._reconcile.call(this, item);
- }
+ return (yield SyncEngine.prototype._reconcile.call(this, item));
+ }),
};
function TabStore(name, engine) {
Store.call(this, name, engine);
}
TabStore.prototype = {
__proto__: Store.prototype,
itemExists: function (id) {
- return id == this.engine.service.clientsEngine.localID;
+ return Promise.resolve(id == this.engine.service.clientsEngine.localID);
},
getWindowEnumerator: function () {
return Services.wm.getEnumerator("navigator:browser");
},
shouldSkipWindow: function (win) {
return win.closed ||
@@ -177,17 +180,17 @@ TabStore.prototype = {
lastUsed: Math.floor((tabState.lastAccessed || 0) / 1000),
});
}
}
return allTabs;
},
- createRecord: function (id, collection) {
+ createRecord: Task.async(function *(id, collection) {
let record = new TabSetRecord(collection, id);
record.clientName = this.engine.service.clientsEngine.localName;
// Sort tabs in descending-used order to grab the most recently used
let tabs = this.getAllTabs(true).sort(function (a, b) {
return b.lastUsed - a.lastUsed;
});
@@ -208,19 +211,19 @@ TabStore.prototype = {
this._log.trace("Created tabs " + tabs.length + " of " + origLength);
tabs.forEach(function (tab) {
this._log.trace("Wrapping tab: " + JSON.stringify(tab));
}, this);
record.tabs = tabs;
return record;
- },
+ }),
- getAllIDs: function () {
+ getAllIDs: Task.async(function* () {
// Don't report any tabs if all windows are in private browsing for
// first syncs.
let ids = {};
let allWindowsArePrivate = false;
let wins = Services.wm.getEnumerator("navigator:browser");
while (wins.hasMoreElements()) {
if (PrivateBrowsingUtils.isWindowPrivate(wins.getNext())) {
// Ensure that at least there is a private window.
@@ -234,23 +237,24 @@ TabStore.prototype = {
if (allWindowsArePrivate &&
!PrivateBrowsingUtils.permanentPrivateBrowsing) {
return ids;
}
ids[this.engine.service.clientsEngine.localID] = true;
return ids;
- },
+ }),
wipe: function () {
this._remoteClients = {};
+ return Promise.resolve();
},
- create: function (record) {
+ create: Task.async(function* (record) {
this._log.debug("Adding remote tabs from " + record.clientName);
this._remoteClients[record.id] = record.cleartext;
// Lose some precision, but that's good enough (seconds).
let roundModify = Math.floor(record.modified / 1000);
let notifyState = Svc.Prefs.get("notifyTabState");
// If there's no existing pref, save this first modified time.
@@ -263,21 +267,21 @@ TabStore.prototype = {
if (notifyState == 0) {
return;
}
// We must have gotten a new tab that isn't the same as last time.
if (notifyState != roundModify) {
Svc.Prefs.set("notifyTabState", 0);
}
- },
+ }),
- update: function (record) {
+ update: Task.async(function* (record) {
this._log.trace("Ignoring tab updates as local ones win");
- },
+ }),
};
function TabTracker(name, engine) {
Tracker.call(this, name, engine);
Svc.Obs.add("weave:engine:start-tracking", this);
Svc.Obs.add("weave:engine:stop-tracking", this);
@@ -287,20 +291,22 @@ function TabTracker(name, engine) {
}
TabTracker.prototype = {
__proto__: Tracker.prototype,
QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver]),
loadChangedIDs: function () {
// Don't read changed IDs from disk at start up.
+ return Promise.resolve();
},
clearChangedIDs: function () {
this.modified = false;
+ return Promise.resolve();
},
_topics: ["pageshow", "TabOpen", "TabClose", "TabSelect"],
_registerListenersForWindow: function (window) {
this._log.trace("Registering tab listeners in window");
for each (let topic in this._topics) {
window.addEventListener(topic, this.onTab, false);
--- a/services/sync/modules/identity.js
+++ b/services/sync/modules/identity.js
@@ -436,25 +436,25 @@ IdentityManager.prototype = {
},
/**
* Pre-fetches any information that might help with migration away from this
* identity. Called after every sync and is really just an optimization that
* allows us to avoid a network request for when we actually need the
* migration info.
*/
- prefetchMigrationSentinel: function(service) {
+ prefetchMigrationSentinel: Task.async(function* (service) {
// Try and fetch the migration sentinel - it will end up in the recordManager
// cache.
try {
- service.recordManager.get(service.storageURL + "meta/fxa_credentials");
+ yield service.recordManager.get(service.storageURL + "meta/fxa_credentials");
} catch (ex if !Async.isShutdownException(ex)) {
this._log.warn("Failed to pre-fetch the migration sentinel", ex);
}
- },
+ }),
/**
* Obtains the array of basic logins from nsiPasswordManager.
*/
_getLogins: function _getLogins(realm) {
return Services.logins.findLogins({}, PWDMGR_HOST, null, realm);
},
--- a/services/sync/modules/policies.js
+++ b/services/sync/modules/policies.js
@@ -695,17 +695,17 @@ ErrorHandler.prototype = {
/**
* Trigger a sync and don't muffle any errors, particularly network errors.
*/
syncAndReportErrors: function syncAndReportErrors() {
this._log.debug("Beginning user-triggered sync.");
this.dontIgnoreErrors = true;
- Utils.nextTick(this.service.sync, this.service);
+ return this.service.sync();
},
/**
* Generate a log file for the sync that just completed
* and refresh the input & output streams.
*/
resetFileLog: function resetFileLog() {
let onComplete = logType => {
--- a/services/sync/modules/record.js
+++ b/services/sync/modules/record.js
@@ -37,36 +37,36 @@ WBORecord.prototype = {
get sortindex() {
if (this.data.sortindex)
return this.data.sortindex;
return 0;
},
// Get thyself from your URI, then deserialize.
// Set thine 'response' field.
- fetch: function fetch(resource) {
+ fetch: Task.async(function* (resource) {
if (!resource instanceof Resource) {
throw new Error("First argument must be a Resource instance.");
}
- let r = resource.get();
+ let r = yield resource.get();
if (r.success) {
this.deserialize(r); // Warning! Muffles exceptions!
}
this.response = r;
return this;
- },
+ }),
- upload: function upload(resource) {
+ upload: Task.async(function* (resource) {
if (!resource instanceof Resource) {
throw new Error("First argument must be a Resource instance.");
}
- return resource.put(this);
- },
+ return (yield resource.put(this));
+ }),
// Take a base URI string, with trailing slash, and return the URI of this
// WBO based on collection and ID.
uri: function(base) {
if (this.collection && this.id) {
let url = Utils.makeURI(base + this.collection + "/" + this.id);
url.QueryInterface(Ci.nsIURL);
return url;
@@ -218,57 +218,57 @@ this.RecordManager = function RecordMana
this._log = Log.repository.getLogger(this._logName);
this._records = {};
}
RecordManager.prototype = {
_recordType: CryptoWrapper,
_logName: "Sync.RecordManager",
- import: function RecordMgr_import(url) {
+ import: Task.async(function* (url) {
this._log.trace("Importing record: " + (url.spec ? url.spec : url));
try {
// Clear out the last response with empty object if GET fails
this.response = {};
- this.response = this.service.resource(url).get();
+ this.response = yield this.service.resource(url).get();
// Don't parse and save the record on failure
if (!this.response.success)
return null;
let record = new this._recordType(url);
record.deserialize(this.response);
return this.set(url, record);
} catch (ex if !Async.isShutdownException(ex)) {
this._log.debug("Failed to import record: " + Utils.exceptionStr(ex));
return null;
}
- },
+ }),
- get: function RecordMgr_get(url) {
+ get: Task.async(function* (url) {
// Use a url string as the key to the hash
let spec = url.spec ? url.spec : url;
if (spec in this._records)
return this._records[spec];
- return this.import(url);
- },
+ return (yield this.import(url));
+ }),
- set: function RecordMgr_set(url, record) {
+ set: function (url, record) {
let spec = url.spec ? url.spec : url;
return this._records[spec] = record;
},
- contains: function RecordMgr_contains(url) {
+ contains: function (url) {
if ((url.spec || url) in this._records)
return true;
return false;
},
- clearCache: function recordMgr_clearCache() {
+ clearCache: function () {
this._records = {};
},
del: function RecordMgr_del(url) {
delete this._records[url];
}
};
@@ -610,23 +610,32 @@ Collection.prototype = {
set recordHandler(onRecord) {
// Save this because onProgress is called with this as the ChannelListener
let coll = this;
// Switch to newline separated records for incremental parsing
coll.setHeader("Accept", "application/newlines");
- this._onProgress = function() {
+ this._onProgress = Task.async(function* () {
+ // Note it would theoretically be possible to let all the promises from
+ // this batch run concurrently then Promise.all() at the end - but when
+ // the records are small enough we end up with well over 1000 promises
+ // per read, and that janks. So we use a task, yield for each individual
+ // promise and every so often we yield the event loop.
+ let count = 0;
let newline;
while ((newline = this._data.indexOf("\n")) > 0) {
// Split the json record from the rest of the data
let json = this._data.slice(0, newline);
this._data = this._data.slice(newline + 1);
- // Deserialize a record from json and give it to the callback
+ // Deserialize a record from json and give it to the callback.
let record = new coll._recordObj();
record.deserialize(json);
- onRecord(record);
+ yield onRecord(record);
+ if (++count % 50 == 0) {
+ yield Async.promiseYield();
+ }
}
- };
+ });
},
};
--- a/services/sync/modules/resource.js
+++ b/services/sync/modules/resource.js
@@ -189,17 +189,17 @@ AsyncResource.prototype = {
this._log.trace("HTTP Header " + key + ": ***** (suppressed)");
else
this._log.trace("HTTP Header " + key + ": " + headers[key]);
channel.setRequestHeader(key, headers[key], false);
}
return channel;
},
- _onProgress: function Res__onProgress(channel) {},
+ _onProgress: function Res__onProgress(channel) { return Promise.resolve(); },
_doRequest: function _doRequest(action, data, callback) {
this._log.trace("In _doRequest.");
this._callback = callback;
let channel = this._createRequest(action);
if ("undefined" != typeof(data))
this._data = data;
@@ -369,70 +369,65 @@ AsyncResource.prototype = {
delete: function delete_(callback) {
this._doRequest("DELETE", undefined, callback);
}
};
/*
* Represent a remote network resource, identified by a URI, with a
- * synchronous API.
+ * promise-based API.
*
- * 'Resource' is not recommended for new code. Use the asynchronous API of
- * 'AsyncResource' instead.
+ * 'Resource' is recommended for new code.
*/
this.Resource = function Resource(uri) {
AsyncResource.call(this, uri);
}
Resource.prototype = {
__proto__: AsyncResource.prototype,
_logName: "Sync.Resource",
// ** {{{ Resource._request }}} **
//
// Perform a particular HTTP request on the resource. This method
// is never called directly, but is used by the high-level
// {{{get}}}, {{{put}}}, {{{post}}} and {{delete}} methods.
_request: function Res__request(action, data) {
- let cb = Async.makeSyncCallback();
- function callback(error, ret) {
- if (error)
- cb.throw(error);
- else
- cb(ret);
- }
+ return new Promise((resolve, reject) => {
+ function callback(ex, ret) {
+ if (ex) {
+ // Combine the channel stack with this request stack. Need to create
+ // a new error object for that.
+ let error = Error(ex.message);
+ error.result = ex.result;
+ let chanStack = [];
+ if (ex.stack)
+ chanStack = ex.stack.trim().split(/\n/).slice(1);
+ let requestStack = error.stack.split(/\n/).slice(1);
- // The channel listener might get a failure code
- try {
+ // Strip out the args for the last 2 frames because they're usually HUGE!
+ for (let i = 0; i <= 1; i++)
+ requestStack[i] = requestStack[i].replace(/\(".*"\)@/, "(...)@");
+
+ error.stack = chanStack.concat(requestStack).join("\n");
+ reject(error);
+ } else {
+ resolve(ret);
+ }
+ }
+
this._doRequest(action, data, callback);
- return Async.waitForSyncCallback(cb);
- } catch (ex if !Async.isShutdownException(ex)) {
- // Combine the channel stack with this request stack. Need to create
- // a new error object for that.
- let error = Error(ex.message);
- error.result = ex.result;
- let chanStack = [];
- if (ex.stack)
- chanStack = ex.stack.trim().split(/\n/).slice(1);
- let requestStack = error.stack.split(/\n/).slice(1);
-
- // Strip out the args for the last 2 frames because they're usually HUGE!
- for (let i = 0; i <= 1; i++)
- requestStack[i] = requestStack[i].replace(/\(".*"\)@/, "(...)@");
-
- error.stack = chanStack.concat(requestStack).join("\n");
- throw error;
- }
+ });
},
// ** {{{ Resource.get }}} **
//
- // Perform an asynchronous HTTP GET for this resource.
+ // Perform a HTTP GET for this resource.
get: function Res_get() {
return this._request("GET");
},
// ** {{{ Resource.put }}} **
//
// Perform a HTTP PUT for this resource.
put: function Res_put(data) {
@@ -553,17 +548,23 @@ ChannelListener.prototype = {
try {
this._data += siStream.read(count);
} catch (ex) {
this._log.warn("Exception thrown reading " + count + " bytes from " + siStream + ".");
throw ex;
}
try {
- this._onProgress();
+ // We don't want to allow too many in-flight promises and don't want to
+ // buffer the entire response in memory, so for now we block here waiting
+ // for the promise to resolve. Ideally we need to work out the best way
+ // to do things truly async (ie, to read the stream as fast as we can
+ // consume it, as opposed to this "listener" model that insists we
+ // consume it as fast as it can be produced.)
+ Async.promiseSpinningly(this._onProgress());
} catch (ex if !Async.isShutdownException(ex)) {
this._log.warn("Got exception calling onProgress handler during fetch of "
+ req.URI.spec);
this._log.debug(CommonUtils.exceptionStr(ex));
this._log.trace("Rethrowing; expect a failure code from the HTTP channel.");
throw ex;
}
--- a/services/sync/modules/service.js
+++ b/services/sync/modules/service.js
@@ -61,21 +61,21 @@ const TELEMETRY_CUSTOM_SERVER_PREFS = {
"identity.sync.tokenserver.uri",
// The old deprecated prefname we previously used for the tokenserver URI.
"services.sync.tokenServerURI",
],
};
function Sync11Service() {
- this._notify = Utils.notify("weave:service:");
+ this._notify = Utils.promiseNotify("weave:service:");
}
Sync11Service.prototype = {
- _lock: Utils.lock,
+ _lock: Utils.promiseLock,
_locked: false,
_loggedIn: false,
infoURL: null,
storageURL: null,
metaURL: null,
cryptoKeyURL: null,
@@ -153,25 +153,25 @@ Sync11Service.prototype = {
},
unlock: function unlock() {
this._locked = false;
},
// A specialized variant of Utils.catch.
// This provides a more informative error message when we're already syncing:
// see Bug 616568.
- _catch: function _catch(func) {
+ _promiseCatch: function (func) {
function lockExceptions(ex) {
if (Utils.isLockException(ex)) {
// This only happens if we're syncing already.
this._log.info("Cannot start sync: already syncing?");
}
}
- return Utils.catch.call(this, func, lockExceptions);
+ return Utils.promiseCatch.call(this, func, lockExceptions);
},
get userBaseURL() {
if (!this._clusterManager) {
return null;
}
return this._clusterManager.getUserBaseURL();
},
@@ -226,98 +226,99 @@ Sync11Service.prototype = {
* thrashing doesn't solve anything. We keep a reasonable interval between
* these remedial actions.
*/
lastHMACEvent: 0,
/*
* Returns whether to try again.
*/
- handleHMACEvent: function handleHMACEvent() {
+ handleHMACEvent: Task.async(function* () {
let now = Date.now();
// Leave a sizable delay between HMAC recovery attempts. This gives us
// time for another client to fix themselves if we touch the record.
if ((now - this.lastHMACEvent) < HMAC_EVENT_INTERVAL)
return false;
this._log.info("Bad HMAC event detected. Attempting recovery " +
"or signaling to other clients.");
// Set the last handled time so that we don't act again.
this.lastHMACEvent = now;
// Fetch keys.
let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
try {
- let cryptoResp = cryptoKeys.fetch(this.resource(this.cryptoKeysURL)).response;
+ let fetchResult = yield cryptoKeys.fetch(this.resource(this.cryptoKeysURL));
+ let cryptoResp = fetchResult.response;
// Save out the ciphertext for when we reupload. If there's a bug in
// CollectionKeyManager, this will prevent us from uploading junk.
let cipherText = cryptoKeys.ciphertext;
if (!cryptoResp.success) {
this._log.warn("Failed to download keys.");
return false;
}
- let keysChanged = this.handleFetchedKeys(this.identity.syncKeyBundle,
- cryptoKeys, true);
+ let keysChanged = yield this.handleFetchedKeys(this.identity.syncKeyBundle,
+ cryptoKeys, true);
if (keysChanged) {
// Did they change? If so, carry on.
this._log.info("Suggesting retry.");
return true; // Try again.
}
// If not, reupload them and continue the current sync.
cryptoKeys.ciphertext = cipherText;
cryptoKeys.cleartext = null;
- let uploadResp = cryptoKeys.upload(this.resource(this.cryptoKeysURL));
+ let uploadResp = yield cryptoKeys.upload(this.resource(this.cryptoKeysURL));
if (uploadResp.success)
this._log.info("Successfully re-uploaded keys. Continuing sync.");
else
this._log.warn("Got error response re-uploading keys. " +
"Continuing sync; let's try again later.");
return false; // Don't try again: same keys.
} catch (ex) {
this._log.warn("Got exception \"" + ex + "\" fetching and handling " +
"crypto keys. Will try again later.");
return false;
}
- },
+ }),
- handleFetchedKeys: function handleFetchedKeys(syncKey, cryptoKeys, skipReset) {
+ handleFetchedKeys: Task.async(function* (syncKey, cryptoKeys, skipReset) {
// Don't want to wipe if we're just starting up!
let wasBlank = this.collectionKeys.isClear;
let keysChanged = this.collectionKeys.updateContents(syncKey, cryptoKeys);
if (keysChanged && !wasBlank) {
this._log.debug("Keys changed: " + JSON.stringify(keysChanged));
if (!skipReset) {
this._log.info("Resetting client to reflect key change.");
if (keysChanged.length) {
// Collection keys only. Reset individual engines.
- this.resetClient(keysChanged);
+ yield this.resetClient(keysChanged);
}
else {
// Default key changed: wipe it all.
- this.resetClient();
+ yield this.resetClient();
}
this._log.info("Downloaded new keys, client reset. Proceeding.");
}
return true;
}
return false;
- },
+ }),
// The global "enabled" state comes from prefs, and will be set to false
// whenever the UI that exposes what to sync finds all Sync engines disabled.
get enabled() {
return Svc.Prefs.get("enabled");
},
set enabled(val) {
// There's no real reason to impose this other than to catch someone doing
@@ -549,37 +550,37 @@ Sync11Service.prototype = {
return request;
},
/**
* Perform the info fetch as part of a login or key fetch, or
* inside engine sync.
*/
- _fetchInfo: function (url) {
+ _fetchInfo: Task.async(function* (url) {
let infoURL = url || this.infoURL;
this._log.trace("In _fetchInfo: " + infoURL);
let info;
try {
- info = this.resource(infoURL).get();
+ info = yield this.resource(infoURL).get();
} catch (ex) {
this.errorHandler.checkServerError(ex, "info/collections");
throw ex;
}
// Always check for errors; this is also where we look for X-Weave-Alert.
this.errorHandler.checkServerError(info, "info/collections");
if (!info.success) {
throw "Aborting sync: failed to get collections.";
}
return info;
- },
+ }),
- verifyAndFetchSymmetricKeys: function verifyAndFetchSymmetricKeys(infoResponse) {
+ verifyAndFetchSymmetricKeys: Task.async(function* (infoResponse) {
this._log.debug("Fetching and verifying -- or generating -- symmetric keys.");
// Don't allow empty/missing passphrase.
// Furthermore, we assume that our sync key is already upgraded,
// and fail if that assumption is invalidated.
if (!this.identity.syncKey) {
@@ -594,17 +595,17 @@ Sync11Service.prototype = {
this.status.login = LOGIN_FAILED_INVALID_PASSPHRASE;
this.status.sync = CREDENTIALS_CHANGED;
return false;
}
try {
if (!infoResponse)
- infoResponse = this._fetchInfo(); // Will throw an exception on failure.
+ infoResponse = yield this._fetchInfo(); // Will throw an exception on failure.
// This only applies when the server is already at version 4.
if (infoResponse.status != 200) {
this._log.warn("info/collections returned non-200 response. Failing key fetch.");
this.status.login = LOGIN_FAILED_SERVER_ERROR;
this.errorHandler.checkServerError(infoResponse, "info/collections");
return false;
}
@@ -619,20 +620,21 @@ Sync11Service.prototype = {
// Don't always set to CREDENTIALS_CHANGED -- we will probably take care of this.
// Fetch storage/crypto/keys.
let cryptoKeys;
if (infoCollections && (CRYPTO_COLLECTION in infoCollections)) {
try {
cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
- let cryptoResp = cryptoKeys.fetch(this.resource(this.cryptoKeysURL)).response;
+ let fetchResult = yield cryptoKeys.fetch(this.resource(this.cryptoKeysURL));
+ let cryptoResp = fetchResult.response;
if (cryptoResp.success) {
- let keysChanged = this.handleFetchedKeys(syncKeyBundle, cryptoKeys);
+ let keysChanged = yield this.handleFetchedKeys(syncKeyBundle, cryptoKeys);
return true;
}
else if (cryptoResp.status == 404) {
// On failure, ask to generate new keys and upload them.
// Fall through to the behavior below.
this._log.warn("Got 404 for crypto/keys, but 'crypto' in info/collections. Regenerating.");
cryptoKeys = null;
}
@@ -669,17 +671,17 @@ Sync11Service.prototype = {
if (!cryptoKeys) {
this._log.info("No keys! Generating new ones.");
// Better make some and upload them, and wipe the server to ensure
// consistency. This is all achieved via _freshStart.
// If _freshStart fails to clear the server or upload keys, it will
// throw.
- this._freshStart();
+ yield this._freshStart();
return true;
}
// Last-ditch case.
return false;
}
else {
// No update needed: we're good!
@@ -688,71 +690,66 @@ Sync11Service.prototype = {
} catch (ex) {
// This means no keys are present, or there's a network error.
this._log.debug("Failed to fetch and verify keys: "
+ Utils.exceptionStr(ex));
this.errorHandler.checkServerError(ex, "crypto/keys");
return false;
}
- },
+ }),
- verifyLogin: function verifyLogin(allow40XRecovery = true) {
+ verifyLogin: Task.async(function* (allow40XRecovery = true) {
if (!this.identity.username) {
this._log.warn("No username in verifyLogin.");
this.status.login = LOGIN_FAILED_NO_USERNAME;
return false;
}
// Attaching auth credentials to a request requires access to
// passwords, which means that Resource.get can throw MP-related
// exceptions!
// So we ask the identity to verify the login state after unlocking the
// master password (ie, this call is expected to prompt for MP unlock
// if necessary) while we still have control.
- let cb = Async.makeSpinningCallback();
- this.identity.unlockAndVerifyAuthState().then(
- result => cb(null, result),
- cb
- );
- let unlockedState = cb.wait();
+ let unlockedState = yield this.identity.unlockAndVerifyAuthState();
this._log.debug("Fetching unlocked auth state returned " + unlockedState);
if (unlockedState != STATUS_OK) {
this.status.login = unlockedState;
return false;
}
try {
// Make sure we have a cluster to verify against.
// This is a little weird, if we don't get a node we pretend
// to succeed, since that probably means we just don't have storage.
- if (this.clusterURL == "" && !this._clusterManager.setCluster()) {
+ if (this.clusterURL == "" && !(yield this._clusterManager.setCluster())) {
this.status.sync = NO_SYNC_NODE_FOUND;
return true;
}
// Fetch collection info on every startup.
- let test = this.resource(this.infoURL).get();
+ let test = yield this.resource(this.infoURL).get();
switch (test.status) {
case 200:
// The user is authenticated.
// We have no way of verifying the passphrase right now,
// so wait until remoteSetup to do so.
// Just make the most trivial checks.
if (!this.identity.syncKey) {
this._log.warn("No passphrase in verifyLogin.");
this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
return false;
}
// Go ahead and do remote setup, so that we can determine
// conclusively that our passphrase is correct.
- if (this._remoteSetup(test)) {
+ if (yield this._remoteSetup(test)) {
// Username/password verified.
this.status.login = LOGIN_SUCCEEDED;
return true;
}
this._log.warn("Remote setup failed.");
// Remote setup must have failed.
return false;
@@ -760,18 +757,18 @@ Sync11Service.prototype = {
case 401:
this._log.warn("401: login failed.");
Services.telemetry.getKeyedHistogramById(
"WEAVE_STORAGE_AUTH_ERRORS").add("info/collections");
// Fall through to the 404 case.
case 404:
// Check that we're verifying with the correct cluster
- if (allow40XRecovery && this._clusterManager.setCluster()) {
- return this.verifyLogin(false);
+ if (allow40XRecovery && (yield this._clusterManager.setCluster())) {
+ return yield this.verifyLogin(false);
}
// We must have the right cluster, but the server doesn't expect us.
// The implications of this depend on the identity being used - for
// the legacy identity, it's an authoritatively "incorrect password",
// (ie, LOGIN_FAILED_LOGIN_REJECTED) but for FxA it probably means
// "transient error fetching auth token".
this.status.login = this.identity.loginStatusFromVerification404();
@@ -785,38 +782,38 @@ Sync11Service.prototype = {
}
} catch (ex) {
// Must have failed on some network issue
this._log.debug("verifyLogin failed: " + Utils.exceptionStr(ex));
this.status.login = LOGIN_FAILED_NETWORK_ERROR;
this.errorHandler.checkServerError(ex, "info/collections");
return false;
}
- },
+ }),
- generateNewSymmetricKeys: function generateNewSymmetricKeys() {
+ generateNewSymmetricKeys: Task.async(function* () {
this._log.info("Generating new keys WBO...");
let wbo = this.collectionKeys.generateNewKeysWBO();
this._log.info("Encrypting new key bundle.");
wbo.encrypt(this.identity.syncKeyBundle);
this._log.info("Uploading...");
- let uploadRes = wbo.upload(this.resource(this.cryptoKeysURL));
+ let uploadRes = yield wbo.upload(this.resource(this.cryptoKeysURL));
if (uploadRes.status != 200) {
this._log.warn("Got status " + uploadRes.status + " uploading new keys. What to do? Throw!");
this.errorHandler.checkServerError(uploadRes, "crypto/keys");
throw new Error("Unable to upload symmetric keys.");
}
this._log.info("Got status " + uploadRes.status + " uploading keys.");
let serverModified = uploadRes.obj; // Modified timestamp according to server.
this._log.debug("Server reports crypto modified: " + serverModified);
// Now verify that info/collections shows them!
this._log.debug("Verifying server collection records.");
- let info = this._fetchInfo();
+ let info = yield this._fetchInfo();
this._log.debug("info/collections is: " + info);
if (info.status != 200) {
this._log.warn("Non-200 info/collections response. Aborting.");
throw new Error("Unable to upload symmetric keys.");
}
info = info.obj;
@@ -832,27 +829,28 @@ Sync11Service.prototype = {
"is stale after successful upload.");
throw new Error("Symmetric key upload failed.");
}
// Doesn't matter if the timestamp is ahead.
// Download and install them.
let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
- let cryptoResp = cryptoKeys.fetch(this.resource(this.cryptoKeysURL)).response;
+ let fetchResult = yield cryptoKeys.fetch(this.resource(this.cryptoKeysURL));
+ let cryptoResp = fetchResult.response;
if (cryptoResp.status != 200) {
this._log.warn("Failed to download keys.");
throw new Error("Symmetric key download failed.");
}
- let keysChanged = this.handleFetchedKeys(this.identity.syncKeyBundle,
- cryptoKeys, true);
+ let keysChanged = yield this.handleFetchedKeys(this.identity.syncKeyBundle,
+ cryptoKeys, true);
if (keysChanged) {
this._log.info("Downloaded keys differed, as expected.");
}
- },
+ }),
changePassword: function changePassword(newPassword) {
let client = new UserAPI10Client(this.userAPIURI);
let cb = Async.makeSpinningCallback();
client.changePassword(this.identity.username,
this.identity.basicPassword, newPassword, cb);
try {
@@ -865,50 +863,50 @@ Sync11Service.prototype = {
// Save the new password for requests and login manager.
this.identity.basicPassword = newPassword;
this.persistLogin();
return true;
},
changePassphrase: function changePassphrase(newphrase) {
- return this._catch(function doChangePasphrase() {
+ return this._promiseCatch(function* doChangePasphrase() {
/* Wipe. */
- this.wipeServer();
+ yield this.wipeServer();
this.logout();
/* Set this so UI is updated on next run. */
this.identity.syncKey = newphrase;
this.persistLogin();
/* We need to re-encrypt everything, so reset. */
- this.resetClient();
+ yield this.resetClient();
this.collectionKeys.clear();
/* Login and sync. This also generates new keys. */
- this.sync();
+ yield this.sync();
Svc.Obs.notify("weave:service:change-passphrase", true);
return true;
})();
},
- startOver: function startOver() {
+ startOver: Task.async(function* () {
this._log.trace("Invoking Service.startOver.");
Svc.Obs.notify("weave:engine:stop-tracking");
- this.status.resetSync();
+ yield this.status.resetSync();
// Deletion doesn't make sense if we aren't set up yet!
if (this.clusterURL != "") {
// Clear client-specific data from the server, including disabled engines.
for each (let engine in [this.clientsEngine].concat(this.engineManager.getAll())) {
try {
- engine.removeClientData();
+ yield engine.removeClientData();
} catch(ex) {
this._log.warn("Deleting client data for " + engine.name + " failed:"
+ Utils.exceptionStr(ex));
}
}
this._log.debug("Finished deleting client data.");
} else {
this._log.debug("Skipping client data removal: no cluster URL.");
@@ -919,17 +917,17 @@ Sync11Service.prototype = {
// by emptying the passphrase (we still need the password).
this._log.info("Service.startOver dropping sync key and logging out.");
this.identity.resetSyncKey();
this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
this.logout();
Svc.Obs.notify("weave:service:start-over");
// Reset all engines and clear keys.
- this.resetClient();
+ yield this.resetClient();
this.collectionKeys.clear();
this.status.resetBackoff();
// Reset Weave prefs.
this._ignorePrefObserver = true;
Svc.Prefs.resetBranch("");
this._ignorePrefObserver = false;
@@ -944,43 +942,43 @@ Sync11Service.prototype = {
keepIdentity = Services.prefs.getBoolPref("services.sync-testing.startOverKeepIdentity");
} catch (_) { /* no such pref */ }
if (keepIdentity) {
Svc.Obs.notify("weave:service:start-over:finish");
return;
}
try {
- this.identity.finalize();
+ yield this.identity.finalize();
// an observer so the FxA migration code can take some action before
// the new identity is created.
Svc.Obs.notify("weave:service:start-over:init-identity");
this.identity.username = "";
this.status.__authManager = null;
this.identity = Status._authManager;
this._clusterManager = this.identity.createClusterManager(this);
Svc.Obs.notify("weave:service:start-over:finish");
} catch (err) {
- this._log.error("startOver failed to re-initialize the identity manager: " + err);
+ this._log.error("startOver failed to re-initialize the identity manager", err);
// Still send the observer notification so the current state is
// reflected in the UI.
Svc.Obs.notify("weave:service:start-over:finish");
}
- },
+ }),
persistLogin: function persistLogin() {
try {
this.identity.persistCredentials(true);
} catch (ex) {
this._log.info("Unable to persist credentials: " + ex);
}
},
login: function login(username, password, passphrase) {
- function onNotify() {
+ function* onNotify() {
this._loggedIn = false;
if (Services.io.offline) {
this.status.login = LOGIN_FAILED_NETWORK_ERROR;
throw "Application is offline, login should not be called";
}
let initialStatus = this._checkSetup();
if (username) {
@@ -993,47 +991,42 @@ Sync11Service.prototype = {
this.identity.syncKey = passphrase;
}
if (this._checkSetup() == CLIENT_NOT_CONFIGURED) {
throw "Aborting login, client not configured.";
}
// Ask the identity manager to explicitly login now.
+ // Just let any errors bubble up - they've more context than we do!
this._log.info("Logging in the user.");
- let cb = Async.makeSpinningCallback();
- this.identity.ensureLoggedIn().then(
- () => cb(null),
- err => cb(err || "ensureLoggedIn failed")
- );
-
- // Just let any errors bubble up - they've more context than we do!
- cb.wait();
+ yield this.identity.ensureLoggedIn();
// Calling login() with parameters when the client was
// previously not configured means setup was completed.
if (initialStatus == CLIENT_NOT_CONFIGURED
&& (username || password || passphrase)) {
Svc.Obs.notify("weave:service:setup-complete");
}
this._updateCachedURLs();
this._log.info("User logged in successfully - verifying login.");
- if (!this.verifyLogin()) {
+ let verified = yield this.verifyLogin();
+ if (!verified) {
// verifyLogin sets the failure states here.
throw "Login failed: " + this.status.login;
}
this._loggedIn = true;
return true;
}
let notifier = this._notify("login", "", onNotify.bind(this));
- return this._catch(this._lock("service.js: login", notifier))();
+ return this._promiseCatch(this._lock("service.js: login", notifier))();
},
logout: function logout() {
// If we failed during login, we aren't going to have this._loggedIn set,
// but we still want to ask the identity to logout, so it doesn't try and
// reuse any old credentials next time we sync.
this._log.info("Logging out");
this.identity.logout();
@@ -1078,36 +1071,36 @@ Sync11Service.prototype = {
return null;
} catch (ex) {
return this.errorHandler.errorStr(ex.body);
}
},
// Stuff we need to do after login, before we can really do
// anything (e.g. key setup).
- _remoteSetup: function _remoteSetup(infoResponse) {
+ _remoteSetup: Task.async(function* (infoResponse) {
let reset = false;
this._log.debug("Fetching global metadata record");
- let meta = this.recordManager.get(this.metaURL);
+ let meta = yield this.recordManager.get(this.metaURL);
// Checking modified time of the meta record.
if (infoResponse &&
(infoResponse.obj.meta != this.metaModified) &&
(!meta || !meta.isNew)) {
// Delete the cached meta record...
this._log.debug("Clearing cached meta record. metaModified is " +
JSON.stringify(this.metaModified) + ", setting to " +
JSON.stringify(infoResponse.obj.meta));
this.recordManager.del(this.metaURL);
// ... fetch the current record from the server, and COPY THE FLAGS.
- let newMeta = this.recordManager.get(this.metaURL);
+ let newMeta = yield this.recordManager.get(this.metaURL);
// If we got a 401, we do not want to create a new meta/global - we
// should be able to get the existing meta after we get a new node.
if (this.recordManager.response.status == 401) {
this._log.debug("Fetching meta/global record on the server returned 401.");
this.errorHandler.checkServerError(this.recordManager.response, "meta/global");
return false;
}
@@ -1116,18 +1109,18 @@ Sync11Service.prototype = {
this._log.debug("No meta/global record on the server. Creating one.");
newMeta = new WBORecord("meta", "global");
newMeta.payload.syncID = this.syncID;
newMeta.payload.storageVersion = STORAGE_VERSION;
newMeta.payload.declined = this.engineManager.getDeclined();
newMeta.isNew = true;
- this.recordManager.set(this.metaURL, newMeta);
- let uploadRes = newMeta.upload(this.resource(this.metaURL));
+ yield this.recordManager.set(this.metaURL, newMeta);
+ let uploadRes = yield newMeta.upload(this.resource(this.metaURL));
if (!uploadRes.success) {
this._log.warn("Unable to upload new meta/global. Failing remote setup.");
this.errorHandler.checkServerError(uploadRes, "meta/global");
return false;
}
} else {
// If newMeta, then it stands to reason that meta != null.
newMeta.isNew = meta.isNew;
@@ -1165,17 +1158,17 @@ Sync11Service.prototype = {
if (!meta)
this._log.info("No metadata record, server wipe needed");
if (meta && !meta.payload.syncID)
this._log.warn("No sync id, server wipe needed");
reset = true;
this._log.info("Wiping server data");
- this._freshStart();
+ yield this._freshStart();
if (status == 404)
this._log.info("Metadata record not found, server was wiped to ensure " +
"consistency.");
else // 200
this._log.info("Wiped server; incompatible metadata: " + remoteVersion);
return true;
@@ -1183,54 +1176,59 @@ Sync11Service.prototype = {
else if (remoteVersion > STORAGE_VERSION) {
this.status.sync = VERSION_OUT_OF_DATE;
this._log.warn("Upgrade required to access newer storage version.");
return false;
}
else if (meta.payload.syncID != this.syncID) {
this._log.info("Sync IDs differ. Local is " + this.syncID + ", remote is " + meta.payload.syncID);
- this.resetClient();
+ yield this.resetClient();
this.collectionKeys.clear();
this.syncID = meta.payload.syncID;
this._log.debug("Clear cached values and take syncId: " + this.syncID);
- if (!this.upgradeSyncKey(meta.payload.syncID)) {
+ let success = yield this.upgradeSyncKey(meta.payload.syncID);
+ if (!success) {
this._log.warn("Failed to upgrade sync key. Failing remote setup.");
return false;
}
- if (!this.verifyAndFetchSymmetricKeys(infoResponse)) {
+ success = yield this.verifyAndFetchSymmetricKeys(infoResponse);
+ if (!success) {
this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
return false;
}
// bug 545725 - re-verify creds and fail sanely
- if (!this.verifyLogin()) {
+ success = yield this.verifyLogin()
+ if (!success) {
this.status.sync = CREDENTIALS_CHANGED;
this._log.info("Credentials have changed, aborting sync and forcing re-login.");
return false;
}
return true;
}
else {
- if (!this.upgradeSyncKey(meta.payload.syncID)) {
+ let success = yield this.upgradeSyncKey(meta.payload.syncID);
+ if (!success) {
this._log.warn("Failed to upgrade sync key. Failing remote setup.");
return false;
}
- if (!this.verifyAndFetchSymmetricKeys(infoResponse)) {
+ success = yield this.verifyAndFetchSymmetricKeys(infoResponse);
+ if (!success) {
this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
return false;
}
return true;
}
- },
+ }),
/**
* Return whether we should attempt login at the start of a sync.
*
* Note that this function has strong ties to _checkSync: callers
* of this function should typically use _checkSync to verify that
* any necessary login took place.
*/
@@ -1263,192 +1261,189 @@ Sync11Service.prototype = {
reason = kFirstSyncChoiceNotMade;
if (ignore && ignore.indexOf(reason) != -1)
return "";
return reason;
},
- sync: function sync() {
+ sync: Task.async(function* () {
if (!this.enabled) {
this._log.debug("Not syncing as Sync is disabled.");
return;
}
let dateStr = new Date().toLocaleFormat(LOG_DATE_FORMAT);
this._log.debug("User-Agent: " + SyncStorageRequest.prototype.userAgent);
this._log.info("Starting sync at " + dateStr);
- this._catch(function () {
+ yield this._promiseCatch(function* () {
// Make sure we're logged in.
if (this._shouldLogin()) {
this._log.debug("In sync: should login.");
- if (!this.login()) {
+ let success = yield this.login();
+ if (!success) {
this._log.debug("Not syncing: login returned false.");
return;
}
}
else {
this._log.trace("In sync: no need to login.");
}
- return this._lockedSync.apply(this, arguments);
+ return (yield this._lockedSync.apply(this, arguments));
})();
- },
+ }),
/**
* Sync up engines with the server.
*/
_lockedSync: function _lockedSync() {
return this._lock("service.js: sync",
- this._notify("sync", "", function onNotify() {
+ this._notify("sync", "", function* () {
let histogram = Services.telemetry.getHistogramById("WEAVE_START_COUNT");
histogram.add(1);
let synchronizer = new EngineSynchronizer(this);
- let cb = Async.makeSpinningCallback();
- synchronizer.onComplete = cb;
-
- synchronizer.sync();
- // wait() throws if the first argument is truthy, which is exactly what
- // we want.
- let result = cb.wait();
+ let result = yield synchronizer.sync();
histogram = Services.telemetry.getHistogramById("WEAVE_COMPLETE_SUCCESS_COUNT");
histogram.add(1);
// We successfully synchronized.
// Check if the identity wants to pre-fetch a migration sentinel from
// the server.
// If we have no clusterURL, we are probably doing a node reassignment
// so don't attempt to get it in that case.
if (this.clusterURL) {
- this.identity.prefetchMigrationSentinel(this);
+ yield this.identity.prefetchMigrationSentinel(this);
}
// Now let's update our declined engines.
- let meta = this.recordManager.get(this.metaURL);
+ let meta = yield this.recordManager.get(this.metaURL);
if (!meta) {
this._log.warn("No meta/global; can't update declined state.");
return;
}
let declinedEngines = new DeclinedEngines(this);
let didChange = declinedEngines.updateDeclined(meta, this.engineManager);
if (!didChange) {
this._log.info("No change to declined engines. Not reuploading meta/global.");
return;
}
- this.uploadMetaGlobal(meta);
+ yield this.uploadMetaGlobal(meta);
}))();
},
/**
* Upload meta/global, throwing the response on failure.
*/
- uploadMetaGlobal: function (meta) {
+ uploadMetaGlobal: Task.async(function* (meta) {
this._log.debug("Uploading meta/global: " + JSON.stringify(meta));
// It would be good to set the X-If-Unmodified-Since header to `timestamp`
// for this PUT to ensure at least some level of transactionality.
// Unfortunately, the servers don't support it after a wipe right now
// (bug 693893), so we're going to defer this until bug 692700.
let res = this.resource(this.metaURL);
- let response = res.put(meta);
+ let response = yield res.put(meta);
if (!response.success) {
throw response;
}
- this.recordManager.set(this.metaURL, meta);
- },
+ yield this.recordManager.set(this.metaURL, meta);
+ }),
/**
* Get a migration sentinel for the Firefox Accounts migration.
* Returns a JSON blob - it is up to callers of this to make sense of the
* data.
*
* Returns a promise that resolves with the sentinel, or null.
*/
- getFxAMigrationSentinel: function() {
+ getFxAMigrationSentinel: Task.async(function* () {
if (this._shouldLogin()) {
this._log.debug("In getFxAMigrationSentinel: should login.");
- if (!this.login()) {
+ let loggedIn = yield this.login();
+ if (!loggedIn) {
this._log.debug("Can't get migration sentinel: login returned false.");
- return Promise.resolve(null);
+ return null;
}
}
if (!this.identity.syncKeyBundle) {
this._log.error("Can't get migration sentinel: no syncKeyBundle.");
- return Promise.resolve(null);
+ return null;
}
try {
let collectionURL = this.storageURL + "meta/fxa_credentials";
- let cryptoWrapper = this.recordManager.get(collectionURL);
+ let cryptoWrapper = yield this.recordManager.get(collectionURL);
if (!cryptoWrapper || !cryptoWrapper.payload) {
// nothing to decrypt - .decrypt is noisy in that case, so just bail
// now.
- return Promise.resolve(null);
+ return null;
}
// If the payload has a sentinel it means we must have put back the
// decrypted version last time we were called.
if (cryptoWrapper.payload.sentinel) {
- return Promise.resolve(cryptoWrapper.payload.sentinel);
+ return cryptoWrapper.payload.sentinel;
}
// If decryption fails it almost certainly means the key is wrong - but
// it's not clear if we need to take special action for that case?
let payload = cryptoWrapper.decrypt(this.identity.syncKeyBundle);
// After decrypting the ciphertext is lost, so we just stash the
// decrypted payload back into the wrapper.
cryptoWrapper.payload = payload;
- return Promise.resolve(payload.sentinel);
+ return payload.sentinel;
} catch (ex) {
this._log.error("Failed to fetch the migration sentinel: ${}", ex);
- return Promise.resolve(null);
+ return null;
}
- },
+ }),
/**
* Set a migration sentinel for the Firefox Accounts migration.
* Accepts a JSON blob - it is up to callers of this to make sense of the
* data.
*
* Returns a promise that resolves with a boolean which indicates if the
* sentinel was successfully written.
*/
- setFxAMigrationSentinel: function(sentinel) {
+ setFxAMigrationSentinel: Task.async(function* (sentinel) {
if (this._shouldLogin()) {
this._log.debug("In setFxAMigrationSentinel: should login.");
- if (!this.login()) {
+ let loggedIn = yield this.login();
+ if (!loggedIn) {
this._log.debug("Can't set migration sentinel: login returned false.");
- return Promise.resolve(false);
+ return false;
}
}
if (!this.identity.syncKeyBundle) {
this._log.error("Can't set migration sentinel: no syncKeyBundle.");
- return Promise.resolve(false);
+ return false;
}
try {
let collectionURL = this.storageURL + "meta/fxa_credentials";
let cryptoWrapper = new CryptoWrapper("meta", "fxa_credentials");
cryptoWrapper.cleartext.sentinel = sentinel;
cryptoWrapper.encrypt(this.identity.syncKeyBundle);
let res = this.resource(collectionURL);
- let response = res.put(cryptoWrapper.toJSON());
+ let response = yield res.put(cryptoWrapper.toJSON());
if (!response.success) {
throw response;
}
this.recordManager.set(collectionURL, cryptoWrapper);
} catch (ex) {
this._log.error("Failed to set the migration sentinel: ${}", ex);
- return Promise.resolve(false);
+ return false;
}
- return Promise.resolve(true);
- },
+ return true;
+ }),
/**
* If we have a passphrase, rather than a 25-alphadigit sync key,
* use the provided sync ID to bootstrap it using PBKDF2.
*
* Store the new 'passphrase' back into the identity manager.
*
* We can check this as often as we want, because once it's done the
@@ -1482,82 +1477,82 @@ Sync11Service.prototype = {
this._log.info("Upgrading sync key...");
this.identity.syncKey = k;
this._log.info("Saving upgraded sync key...");
this.persistLogin();
this._log.info("Done saving.");
return true;
},
- _freshStart: function _freshStart() {
+ _freshStart: Task.async(function* () {
this._log.info("Fresh start. Resetting client and considering key upgrade.");
- this.resetClient();
+ yield this.resetClient();
this.collectionKeys.clear();
- this.upgradeSyncKey(this.syncID);
+ yield this.upgradeSyncKey(this.syncID);
// Wipe the server.
- let wipeTimestamp = this.wipeServer();
+ let wipeTimestamp = yield this.wipeServer();
// Upload a new meta/global record.
let meta = new WBORecord("meta", "global");
meta.payload.syncID = this.syncID;
meta.payload.storageVersion = STORAGE_VERSION;
meta.payload.declined = this.engineManager.getDeclined();
meta.isNew = true;
// uploadMetaGlobal throws on failure -- including race conditions.
// If we got into a race condition, we'll abort the sync this way, too.
// That's fine. We'll just wait till the next sync. The client that we're
// racing is probably busy uploading stuff right now anyway.
- this.uploadMetaGlobal(meta);
+ yield this.uploadMetaGlobal(meta);
// Wipe everything we know about except meta because we just uploaded it
let engines = [this.clientsEngine].concat(this.engineManager.getAll());
let collections = [engine.name for each (engine in engines)];
// TODO: there's a bug here. We should be calling resetClient, no?
// Generate, upload, and download new keys. Do this last so we don't wipe
// them...
- this.generateNewSymmetricKeys();
- },
+ yield this.generateNewSymmetricKeys();
+ }),
/**
* Wipe user data from the server.
*
* @param collections [optional]
* Array of collections to wipe. If not given, all collections are
* wiped by issuing a DELETE request for `storageURL`.
*
* @return the server's timestamp of the (last) DELETE.
*/
- wipeServer: function wipeServer(collections) {
+ wipeServer: Task.async(function* (collections) {
let response;
if (!collections) {
// Strip the trailing slash.
let res = this.resource(this.storageURL.slice(0, -1));
res.setHeader("X-Confirm-Delete", "1");
try {
- response = res.delete();
+ response = yield res.delete();
} catch (ex) {
this._log.debug("Failed to wipe server: " + CommonUtils.exceptionStr(ex));
throw ex;
}
if (response.status != 200 && response.status != 404) {
this._log.debug("Aborting wipeServer. Server responded with " +
response.status + " response for " + this.storageURL);
throw response;
}
return response.headers["x-weave-timestamp"];
}
let timestamp;
for (let name of collections) {
let url = this.storageURL + name;
try {
- response = this.resource(url).delete();
+ response = yield this.resource(url).delete();
} catch (ex) {
this._log.debug("Failed to wipe '" + name + "' collection: " +
Utils.exceptionStr(ex));
throw ex;
}
if (response.status != 200 && response.status != 404) {
this._log.debug("Aborting wipeServer. Server responded with " +
@@ -1566,118 +1561,117 @@ Sync11Service.prototype = {
}
if ("x-weave-timestamp" in response.headers) {
timestamp = response.headers["x-weave-timestamp"];
}
}
return timestamp;
- },
+ }),
/**
* Wipe all local user data.
*
* @param engines [optional]
* Array of engine names to wipe. If not given, all engines are used.
*/
- wipeClient: function wipeClient(engines) {
+ wipeClient: Task.async(function* (engines) {
// If we don't have any engines, reset the service and wipe all engines
if (!engines) {
// Clear out any service data
- this.resetService();
+ yield this.resetService();
engines = [this.clientsEngine].concat(this.engineManager.getAll());
}
// Convert the array of names into engines
else {
engines = this.engineManager.get(engines);
}
// Fully wipe each engine if it's able to decrypt data
for each (let engine in engines) {
- if (engine.canDecrypt()) {
- engine.wipeClient();
+ if (yield engine.canDecrypt()) {
+ yield engine.wipeClient();
}
}
// Save the password/passphrase just in-case they aren't restored by sync
this.persistLogin();
- },
+ }),
/**
* Wipe all remote user data by wiping the server then telling each remote
* client to wipe itself.
*
* @param engines [optional]
* Array of engine names to wipe. If not given, all engines are used.
*/
- wipeRemote: function wipeRemote(engines) {
+ wipeRemote: Task.async(function* (engines) {
try {
// Make sure stuff gets uploaded.
- this.resetClient(engines);
+ yield this.resetClient(engines);
// Clear out any server data.
- this.wipeServer(engines);
+ yield this.wipeServer(engines);
// Only wipe the engines provided.
if (engines) {
- engines.forEach(function(e) {
- this.clientsEngine.sendCommand("wipeEngine", [e]);
- }, this);
- }
- // Tell the remote machines to wipe themselves.
- else {
- this.clientsEngine.sendCommand("wipeAll", []);
+ for (let e of engines) {
+ yield this.clientsEngine.sendCommand("wipeEngine", [e]);
+ }
+ } else {
+ // Tell the remote machines to wipe themselves.
+ yield this.clientsEngine.sendCommand("wipeAll", []);
}
// Make sure the changed clients get updated.
- this.clientsEngine.sync();
+ yield this.clientsEngine.sync();
} catch (ex) {
this.errorHandler.checkServerError(ex, "clients");
throw ex;
}
- },
+ }),
/**
* Reset local service information like logs, sync times, caches.
*/
resetService: function resetService() {
- this._catch(function reset() {
+ return this._promiseCatch(function reset() {
this._log.info("Service reset.");
// Pretend we've never synced to the server and drop cached data
this.syncID = "";
this.recordManager.clearCache();
})();
},
/**
* Reset the client by getting rid of any local server data and client data.
*
* @param engines [optional]
* Array of engine names to reset. If not given, all engines are used.
*/
resetClient: function resetClient(engines) {
- this._catch(function doResetClient() {
+ return this._promiseCatch(function* () {
// If we don't have any engines, reset everything including the service
if (!engines) {
// Clear out any service data
- this.resetService();
+ yield this.resetService();
engines = [this.clientsEngine].concat(this.engineManager.getAll());
}
// Convert the array of names into engines
else {
engines = this.engineManager.get(engines);
}
// Have each engine drop any temporary meta data
for each (let engine in engines) {
- engine.resetClient();
+ yield engine.resetClient();
}
})();
},
/**
* Fetch storage info from the server.
*
* @param type
--- a/services/sync/modules/stages/cluster.js
+++ b/services/sync/modules/stages/cluster.js
@@ -25,26 +25,26 @@ ClusterManager.prototype = {
return this.service.identity;
},
/**
* Obtain the cluster for the current user.
*
* Returns the string URL of the cluster or null on error.
*/
- _findCluster: function _findCluster() {
+ _findCluster: Task.async(function* () {
this._log.debug("Finding cluster for user " + this.identity.username);
// This should ideally use UserAPI10Client but the legacy hackiness is
// strong with this code.
let fail;
let url = this.service.userAPIURI + this.identity.username + "/node/weave";
let res = this.service.resource(url);
try {
- let node = res.get();
+ let node = yield res.get();
switch (node.status) {
case 400:
this.service.status.login = LOGIN_FAILED_LOGIN_REJECTED;
fail = "Find cluster denied: " + this.service.errorHandler.errorStr(node);
break;
case 404:
this._log.debug("Using serverURL as data cluster (multi-cluster support disabled)");
return this.service.serverURL;
@@ -62,40 +62,40 @@ ClusterManager.prototype = {
}
} catch (e) {
this._log.debug("Network error on findCluster");
this.service.status.login = LOGIN_FAILED_NETWORK_ERROR;
this.service.errorHandler.checkServerError(e, "node/weave");
fail = e;
}
throw fail;
- },
+ }),
/**
* Determine the cluster for the current user and update state.
*/
- setCluster: function setCluster() {
+ setCluster: Task.async(function* () {
// Make sure we didn't get some unexpected response for the cluster.
- let cluster = this._findCluster();
+ let cluster = yield this._findCluster();
this._log.debug("Cluster value = " + cluster);
if (cluster == null) {
return false;
}
// Don't update stuff if we already have the right cluster
if (cluster == this.service.clusterURL) {
return false;
}
this._log.debug("Setting cluster to " + cluster);
this.service.clusterURL = cluster;
Svc.Prefs.set("lastClusterUpdate", Date.now().toString());
return true;
- },
+ }),
getUserBaseURL: function getUserBaseURL() {
// Legacy Sync and FxA Sync construct the userBaseURL differently. Legacy
// Sync appends path components onto an empty path, and in FxA Sync, the
// token server constructs this for us in an opaque manner. Since the
// cluster manager already sets the clusterURL on Service and also has
// access to the current identity, we added this functionality here.
--- a/services/sync/modules/stages/enginesync.js
+++ b/services/sync/modules/stages/enginesync.js
@@ -21,158 +21,151 @@ Cu.import("resource://services-sync/util
*
* This was originally split out of service.js. The API needs lots of love.
*/
this.EngineSynchronizer = function EngineSynchronizer(service) {
this._log = Log.repository.getLogger("Sync.Synchronizer");
this._log.level = Log.Level[Svc.Prefs.get("log.logger.synchronizer")];
this.service = service;
-
- this.onComplete = null;
}
EngineSynchronizer.prototype = {
- sync: function sync() {
- if (!this.onComplete) {
- throw new Error("onComplete handler not installed.");
- }
-
+ sync: Task.async(function*() {
let startTime = Date.now();
- this.service.status.resetSync();
+ yield this.service.status.resetSync();
// Make sure we should sync or record why we shouldn't.
let reason = this.service._checkSync();
if (reason) {
if (reason == kSyncNetworkOffline) {
this.service.status.sync = LOGIN_FAILED_NETWORK_ERROR;
}
// this is a purposeful abort rather than a failure, so don't set
// any status bits
reason = "Can't sync: " + reason;
- this.onComplete(new Error("Can't sync: " + reason));
- return;
+ throw new Error("Can't sync: " + reason);
}
// If we don't have a node, get one. If that fails, retry in 10 minutes.
- if (!this.service.clusterURL && !this.service._clusterManager.setCluster()) {
- this.service.status.sync = NO_SYNC_NODE_FOUND;
- this._log.info("No cluster URL found. Cannot sync.");
- this.onComplete(null);
- return;
+ if (!this.service.clusterURL) {
+ let success = yield this.service._clusterManager.setCluster();
+ if (!success) {
+ this.service.status.sync = NO_SYNC_NODE_FOUND;
+ this._log.info("No cluster URL found. Cannot sync.");
+ return;
+ }
}
// Ping the server with a special info request once a day.
let infoURL = this.service.infoURL;
let now = Math.floor(Date.now() / 1000);
let lastPing = Svc.Prefs.get("lastPing", 0);
if (now - lastPing > 86400) { // 60 * 60 * 24
infoURL += "?v=" + WEAVE_VERSION;
Svc.Prefs.set("lastPing", now);
}
let engineManager = this.service.engineManager;
// Figure out what the last modified time is for each collection
- let info = this.service._fetchInfo(infoURL);
+ let info = yield this.service._fetchInfo(infoURL);
// Convert the response to an object and read out the modified times
for (let engine of [this.service.clientsEngine].concat(engineManager.getAll())) {
engine.lastModified = info.obj[engine.name] || 0;
}
- if (!(this.service._remoteSetup(info))) {
- this.onComplete(new Error("Aborting sync, remote setup failed"));
- return;
+ let success = yield this.service._remoteSetup(info)
+ if (!success) {
+ throw new Error("Aborting sync, remote setup failed");
}
// Make sure we have an up-to-date list of clients before sending commands
this._log.debug("Refreshing client list.");
- if (!this._syncEngine(this.service.clientsEngine)) {
+ success = yield this._syncEngine(this.service.clientsEngine);
+ if (!success) {
// Clients is an engine like any other; it can fail with a 401,
// and we can elect to abort the sync.
this._log.warn("Client engine sync failed. Aborting.");
- this.onComplete(null);
return;
}
// Wipe data in the desired direction if necessary
switch (Svc.Prefs.get("firstSync")) {
case "resetClient":
- this.service.resetClient(engineManager.enabledEngineNames);
+ yield this.service.resetClient(engineManager.enabledEngineNames);
break;
case "wipeClient":
- this.service.wipeClient(engineManager.enabledEngineNames);
+ yield this.service.wipeClient(engineManager.enabledEngineNames);
break;
case "wipeRemote":
- this.service.wipeRemote(engineManager.enabledEngineNames);
+ yield this.service.wipeRemote(engineManager.enabledEngineNames);
break;
}
if (this.service.clientsEngine.localCommands) {
try {
- if (!(this.service.clientsEngine.processIncomingCommands())) {
+ success = yield this.service.clientsEngine.processIncomingCommands();
+ if (!success) {
this.service.status.sync = ABORT_SYNC_COMMAND;
- this.onComplete(new Error("Processed command aborted sync."));
- return;
+ throw new Error("Processed command aborted sync.");
}
// Repeat remoteSetup in-case the commands forced us to reset
- if (!(this.service._remoteSetup(info))) {
- this.onComplete(new Error("Remote setup failed after processing commands."));
- return;
+ success = yield this.service._remoteSetup(info);
+ if (!success) {
+ throw new Error("Remote setup failed after processing commands.");
}
}
finally {
// Always immediately attempt to push back the local client (now
// without commands).
// Note that we don't abort here; if there's a 401 because we've
// been reassigned, we'll handle it around another engine.
- this._syncEngine(this.service.clientsEngine);
+ yield this._syncEngine(this.service.clientsEngine);
}
}
// Update engines because it might change what we sync.
try {
- this._updateEnabledEngines();
+ yield this._updateEnabledEngines();
} catch (ex) {
- this._log.debug("Updating enabled engines failed: " +
- Utils.exceptionStr(ex));
+ this._log.debug("Updating enabled engines failed", ex);
this.service.errorHandler.checkServerError(ex, "meta/global");
- this.onComplete(ex);
- return;
+ throw ex;
}
try {
for (let engine of engineManager.getEnabled()) {
// If there's any problems with syncing the engine, report the failure
- if (!(this._syncEngine(engine)) || this.service.status.enforceBackoff) {
+ success = yield this._syncEngine(engine);
+ if (!success || this.service.status.enforceBackoff) {
this._log.info("Aborting sync for failure in " + engine.name);
break;
}
}
// If _syncEngine fails for a 401, we might not have a cluster URL here.
// If that's the case, break out of this immediately, rather than
// throwing an exception when trying to fetch metaURL.
if (!this.service.clusterURL) {
this._log.debug("Aborting sync, no cluster URL: " +
"not uploading new meta/global.");
- this.onComplete(null);
return;
}
// Upload meta/global if any engines changed anything.
- let meta = this.service.recordManager.get(this.service.metaURL);
+ let meta = yield this.service.recordManager.get(this.service.metaURL);
if (meta.isNew || meta.changed) {
this._log.info("meta/global changed locally: reuploading.");
try {
- this.service.uploadMetaGlobal(meta);
+ yield this.service.uploadMetaGlobal(meta);
delete meta.isNew;
delete meta.changed;
} catch (error) {
this._log.error("Unable to upload meta/global. Leaving marked as new.");
}
}
// If there were no sync engine failures
@@ -183,41 +176,37 @@ EngineSynchronizer.prototype = {
} finally {
Svc.Prefs.reset("firstSync");
let syncTime = ((Date.now() - startTime) / 1000).toFixed(2);
let dateStr = new Date().toLocaleFormat(LOG_DATE_FORMAT);
this._log.info("Sync completed at " + dateStr
+ " after " + syncTime + " secs.");
}
+ }),
- this.onComplete(null);
- },
-
- // Returns true if sync should proceed.
+ // Returns a promise that resolved with true if sync should proceed.
// false / no return value means sync should be aborted.
- _syncEngine: function _syncEngine(engine) {
+ _syncEngine: Task.async(function* (engine) {
try {
- engine.sync();
- }
- catch(e) {
+ yield engine.sync();
+ } catch (e) {
if (e.status == 401) {
// Maybe a 401, cluster update perhaps needed?
// We rely on ErrorHandler observing the sync failure notification to
// schedule another sync and clear node assignment values.
// Here we simply want to muffle the exception and return an
// appropriate value.
return false;
}
}
+ return true;
+ }),
- return true;
- },
-
- _updateEnabledFromMeta: function (meta, numClients, engineManager=this.service.engineManager) {
+ _updateEnabledFromMeta: Task.async(function* (meta, numClients, engineManager=this.service.engineManager) {
this._log.info("Updating enabled engines: " +
numClients + " clients.");
if (meta.isNew || !meta.payload.engines) {
this._log.debug("meta/global isn't new, or is missing engines. Not updating enabled state.");
return;
}
@@ -269,17 +258,17 @@ EngineSynchronizer.prototype = {
attemptedEnable = true;
}
// If either the engine was disabled locally or enabling the engine
// failed (see above re master-password) then wipe server data and
// disable it everywhere.
if (!engine.enabled) {
this._log.trace("Wiping data for " + engineName + " engine.");
- engine.wipeServer();
+ yield engine.wipeServer();
delete meta.payload.engines[engineName];
meta.changed = true; // the new enabled state must propagate
// We also here mark the engine as declined, because the pref
// was explicitly changed to false - unless we tried, and failed,
// to enable it - in which case we leave the declined state alone.
if (!attemptedEnable) {
// This will be reflected in meta/global in the next stage.
this._log.trace("Engine " + engineName + " was disabled locally. Marking as declined.");
@@ -302,19 +291,19 @@ EngineSynchronizer.prototype = {
}
}
engineManager.decline(toDecline);
engineManager.undecline(toUndecline);
Svc.Prefs.resetBranch("engineStatusChanged.");
this.service._ignorePrefObserver = false;
- },
+ }),
- _updateEnabledEngines: function () {
- let meta = this.service.recordManager.get(this.service.metaURL);
+ _updateEnabledEngines: Task.async(function* () {
+ let meta = yield this.service.recordManager.get(this.service.metaURL);
let numClients = this.service.scheduler.numClients;
let engineManager = this.service.engineManager;
- this._updateEnabledFromMeta(meta, numClients, engineManager);
- },
+ yield this._updateEnabledFromMeta(meta, numClients, engineManager);
+ }),
};
Object.freeze(EngineSynchronizer.prototype);
--- a/services/sync/modules/util.js
+++ b/services/sync/modules/util.js
@@ -1,13 +1,14 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-this.EXPORTED_SYMBOLS = ["XPCOMUtils", "Services", "Utils", "Async", "Svc", "Str"];
+this.EXPORTED_SYMBOLS = ["XPCOMUtils", "Services", "Utils", "Async", "Svc",
+ "Str", "Task"];
const {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
Cu.import("resource://gre/modules/Log.jsm");
Cu.import("resource://services-common/observers.js");
Cu.import("resource://services-common/stringbundle.js");
Cu.import("resource://services-common/utils.js");
Cu.import("resource://services-common/async.js", this);
@@ -81,36 +82,57 @@ this.Utils = {
if (exceptionCallback) {
return exceptionCallback.call(thisArg, ex);
}
return null;
}
};
},
+ promiseCatch: function(func, exceptionCallback) {
+ let thisArg = this;
+ return function WrappedCatch() {
+ let exceptionHandler = function(ex) {
+ thisArg._log.debug("Exception: " + Utils.exceptionStr(ex));
+ if (exceptionCallback) {
+ return exceptionCallback.call(thisArg, ex);
+ }
+ return null;
+ }
+ try {
+ return Task.spawn(func.call(thisArg)).then(
+ null, ex => exceptionHandler(ex)
+ );
+ } catch (ex) {
+ return exceptionHandler(ex);
+ }
+ };
+ },
+
/**
- * Wrap a function to call lock before calling the function then unlock.
+ * Wrap a promise-returning function to call lock before calling the function
+ * then unlock when the promise resolves or rejects.
*
* @usage MyObj._lock = Utils.lock;
* MyObj.foo = function() { this._lock(func)(); }
*/
- lock: function lock(label, func) {
+ promiseLock: function promiseLock(label, func) {
let thisArg = this;
- return function WrappedLock() {
+ return Task.async(function* WrappedLock() {
if (!thisArg.lock()) {
throw "Could not acquire lock. Label: \"" + label + "\".";
}
try {
- return func.call(thisArg);
+ return (yield func.call(thisArg));
}
finally {
thisArg.unlock();
}
- };
+ });
},
isLockException: function isLockException(ex) {
return ex && ex.indexOf && ex.indexOf("Could not acquire lock.") == 0;
},
/**
* Wrap functions to notify when it starts and finishes executing or if it
@@ -152,16 +174,41 @@ this.Utils = {
catch(ex) {
notify("error", ex);
throw ex;
}
};
};
},
+ promiseNotify: function Utils_promiseNotify(prefix) {
+ return function NotifyMaker(name, data, func) {
+ let thisArg = this;
+ let notify = function(state, subject) {
+ let mesg = prefix + name + ":" + state;
+ thisArg._log.trace("Event: " + mesg);
+ Observers.notify(mesg, subject, data);
+ };
+
+ return function WrappedNotify() {
+ notify("start", null);
+ return Task.spawn(func.call(thisArg)).then(
+ ret => {
+ notify("finish", ret);
+ return ret;
+ },
+ ex => {
+ notify("error", ex);
+ throw ex;
+ }
+ );
+ };
+ };
+ },
+
/**
* GUIDs are 9 random bytes encoded with base64url (RFC 4648).
* That makes them 12 characters long with 72 bits of entropy.
*/
makeGUID: function makeGUID() {
return CommonUtils.encodeBase64URL(Utils.generateRandomBytes(9));
},
@@ -318,60 +365,56 @@ this.Utils = {
* Load a JSON file from disk in the profile directory.
*
* @param filePath
* JSON file path load from profile. Loaded file will be
* <profile>/<filePath>.json. i.e. Do not specify the ".json"
* extension.
* @param that
* Object to use for logging and "this" for callback.
- * @param callback
- * Function to process json object as its first argument. If the file
- * could not be loaded, the first argument will be undefined.
+ * @return Promise<Object>
+ * If the file could not be loaded, the value will be undefined.
*/
- jsonLoad: Task.async(function*(filePath, that, callback) {
+ jsonLoad: Task.async(function*(filePath, that) {
let path = OS.Path.join(OS.Constants.Path.profileDir, "weave", filePath + ".json");
- if (that._log) {
+ if (that._log && that._log.trace) {
that._log.trace("Loading json from disk: " + filePath);
}
- let json;
-
try {
- json = yield CommonUtils.readJSON(path);
+ return yield CommonUtils.readJSON(path);
} catch (e if e instanceof OS.File.Error && e.becauseNoSuchFile) {
// Ignore non-existent files.
} catch (e) {
if (that._log) {
that._log.debug("Failed to load json: " +
CommonUtils.exceptionStr(e));
}
}
-
- callback.call(that, json);
}),
/**
* Save a json-able object to disk in the profile directory.
*
* @param filePath
* JSON file path save to <filePath>.json
* @param that
* Object to use for logging and "this" for callback
* @param obj
* Function to provide json-able object to save. If this isn't a
* function, it'll be used as the object to make a json string.
- * @param callback
- * Function called when the write has been performed. Optional.
+ *
+ * @return Promise<>
+ * Promise resolved when the write has been performed.
* The first argument will be a Components.results error
* constant on error or null if no error was encountered (and
* the file saved successfully).
*/
- jsonSave: Task.async(function*(filePath, that, obj, callback) {
+ jsonSave: Task.async(function*(filePath, that, obj) {
let path = OS.Path.join(OS.Constants.Path.profileDir, "weave",
...(filePath + ".json").split("/"));
let dir = OS.Path.dirname(path);
let error = null;
try {
yield OS.File.makeDir(dir, { from: OS.Constants.Path.profileDir });
@@ -380,20 +423,16 @@ this.Utils = {
}
let json = typeof obj == "function" ? obj.call(that) : obj;
yield CommonUtils.writeJSON(json, path);
} catch (e) {
error = e
}
-
- if (typeof callback == "function") {
- callback.call(that, error);
- }
}),
getErrorString: function Utils_getErrorString(error, args) {
try {
return Str.errors.get(error, args || null);
} catch (e) {}
// basically returns "Unknown Error"
--- a/services/sync/tests/unit/head_helpers.js
+++ b/services/sync/tests/unit/head_helpers.js
@@ -201,8 +201,19 @@ function mockGetWindowEnumerator(url, nu
// Helper that allows checking array equality.
function do_check_array_eq(a1, a2) {
do_check_eq(a1.length, a2.length);
for (let i = 0; i < a1.length; ++i) {
do_check_eq(a1[i], a2[i]);
}
}
+
+// A helper that waits for an observer notification. Returns a promise that
+// is resolved with the subject and data of the notification.
+function promiseOneObserver(topic) {
+ return new Promise(resolve => {
+ Svc.Obs.add(topic, function obs(subject, data) {
+ Svc.Obs.remove(topic, obs);
+ resolve({ subject, data });
+ });
+ });
+}
--- a/services/sync/tests/unit/head_http_server.js
+++ b/services/sync/tests/unit/head_http_server.js
@@ -589,16 +589,22 @@ SyncServer.prototype = {
this._log.warn("SyncServer: Warning: server not running. Can't stop me now!");
return;
}
this.server.stop(cb);
this.started = false;
},
+ promiseStop() {
+ return new Promise(resolve => {
+ this.stop(resolve);
+ });
+ },
+
/**
* Return a server timestamp for a record.
* The server returns timestamps with 1/100 sec granularity. Note that this is
* subject to change: see Bug 650435.
*/
timestamp: function timestamp() {
return new_timestamp();
},
--- a/services/sync/tests/unit/test_addons_engine.js
+++ b/services/sync/tests/unit/test_addons_engine.js
@@ -22,107 +22,107 @@ startupManager();
var engineManager = Service.engineManager;
engineManager.register(AddonsEngine);
var engine = engineManager.get("addons");
var reconciler = engine._reconciler;
var tracker = engine._tracker;
-function advance_test() {
+function* advance_test() {
reconciler._addons = {};
reconciler._changes = [];
- let cb = Async.makeSpinningCallback();
- reconciler.saveState(null, cb);
- cb.wait();
+ yield reconciler.saveState(null);
+ Svc.Prefs.reset("addons.ignoreRepositoryChecking");
+}
- Svc.Prefs.reset("addons.ignoreRepositoryChecking");
-
- run_next_test();
-}
+// an initial call to advance_test to bootstrap the world.
+add_task(function* () {
+ yield advance_test();
+});
// This is a basic sanity test for the unit test itself. If this breaks, the
// add-ons API likely changed upstream.
-add_test(function test_addon_install() {
+add_task(function* test_addon_install() {
_("Ensure basic add-on APIs work as expected.");
let install = getAddonInstall("test_bootstrap1_1");
do_check_neq(install, null);
do_check_eq(install.type, "extension");
do_check_eq(install.name, "Test Bootstrap 1");
- advance_test();
+ yield advance_test();
});
-add_test(function test_find_dupe() {
+add_task(function* test_find_dupe() {
_("Ensure the _findDupe() implementation is sane.");
// This gets invoked at the top of sync, which is bypassed by this
// test, so we do it manually.
- engine._refreshReconcilerState();
+ yield engine._refreshReconcilerState();
let addon = installAddon("test_bootstrap1_1");
let record = {
id: Utils.makeGUID(),
addonID: addon.id,
enabled: true,
applicationID: Services.appinfo.ID,
source: "amo"
};
- let dupe = engine._findDupe(record);
+ let dupe = yield engine._findDupe(record);
do_check_eq(addon.syncGUID, dupe);
record.id = addon.syncGUID;
- dupe = engine._findDupe(record);
+ dupe = yield engine._findDupe(record);
do_check_eq(null, dupe);
uninstallAddon(addon);
- advance_test();
+ yield advance_test();
});
-add_test(function test_get_changed_ids() {
+add_task(function* test_get_changed_ids() {
_("Ensure getChangedIDs() has the appropriate behavior.");
_("Ensure getChangedIDs() returns an empty object by default.");
- let changes = engine.getChangedIDs();
+ let changes = yield engine.getChangedIDs();
do_check_eq("object", typeof(changes));
do_check_eq(0, Object.keys(changes).length);
_("Ensure tracker changes are populated.");
let now = new Date();
let changeTime = now.getTime() / 1000;
let guid1 = Utils.makeGUID();
- tracker.addChangedID(guid1, changeTime);
+ yield tracker.addChangedID(guid1, changeTime);
- changes = engine.getChangedIDs();
+ changes = yield engine.getChangedIDs();
do_check_eq("object", typeof(changes));
do_check_eq(1, Object.keys(changes).length);
do_check_true(guid1 in changes);
do_check_eq(changeTime, changes[guid1]);
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
_("Ensure reconciler changes are populated.");
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
let addon = installAddon("test_bootstrap1_1");
- tracker.clearChangedIDs(); // Just in case.
- changes = engine.getChangedIDs();
+ yield tracker.clearChangedIDs(); // Just in case.
+ changes = yield engine.getChangedIDs();
do_check_eq("object", typeof(changes));
do_check_eq(1, Object.keys(changes).length);
do_check_true(addon.syncGUID in changes);
_("Change time: " + changeTime + ", addon change: " + changes[addon.syncGUID]);
do_check_true(changes[addon.syncGUID] >= changeTime);
let oldTime = changes[addon.syncGUID];
let guid2 = addon.syncGUID;
uninstallAddon(addon);
- changes = engine.getChangedIDs();
+ changes = yield engine.getChangedIDs();
do_check_eq(1, Object.keys(changes).length);
do_check_true(guid2 in changes);
do_check_true(changes[guid2] > oldTime);
_("Ensure non-syncable add-ons aren't picked up by reconciler changes.");
reconciler._addons = {};
reconciler._changes = [];
let record = {
@@ -130,27 +130,28 @@ add_test(function test_get_changed_ids()
guid: Utils.makeGUID(),
enabled: true,
installed: true,
modified: new Date(),
type: "UNSUPPORTED",
scope: 0,
foreignInstall: false
};
- reconciler.addons["DUMMY"] = record;
- reconciler._addChange(record.modified, CHANGE_INSTALLED, record);
+ let addons = yield reconciler.addons;
+ addons["DUMMY"] = record;
+ yield reconciler._addChange(record.modified, CHANGE_INSTALLED, record);
- changes = engine.getChangedIDs();
+ changes = yield engine.getChangedIDs();
_(JSON.stringify(changes));
do_check_eq(0, Object.keys(changes).length);
- advance_test();
+ yield advance_test();
});
-add_test(function test_disabled_install_semantics() {
+add_task(function* test_disabled_install_semantics() {
_("Ensure that syncing a disabled add-on preserves proper state.");
// This is essentially a test for bug 712542, which snuck into the original
// add-on sync drop. It ensures that when an add-on is installed that the
// disabled state and incoming syncGUID is preserved, even on the next sync.
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
@@ -194,47 +195,46 @@ add_test(function test_disabled_install_
enabled: false,
deleted: false,
source: "amo",
});
let wbo = new ServerWBO(id, record, now - 2);
server.insertWBO(USER, "addons", wbo);
_("Performing sync of add-ons engine.");
- engine._sync();
+ yield engine._sync();
// At this point the non-restartless extension should be staged for install.
// Don't need this server any more.
- let cb = Async.makeSpinningCallback();
- amoServer.stop(cb);
- cb.wait();
+ yield promiseStopServer(amoServer);
// We ensure the reconciler has recorded the proper ID and enabled state.
- let addon = reconciler.getAddonStateFromSyncGUID(id);
+ let addon = yield reconciler.getAddonStateFromSyncGUID(id);
do_check_neq(null, addon);
do_check_eq(false, addon.enabled);
// We fake an app restart and perform another sync, just to make sure things
// are sane.
restartManager();
- engine._sync();
+ yield engine._sync();
// The client should not upload a new record. The old record should be
// retained and unmodified.
let collection = server.getCollection(USER, "addons");
do_check_eq(1, collection.count());
let payload = collection.payloads()[0];
do_check_neq(null, collection.wbo(id));
do_check_eq(ADDON_ID, payload.addonID);
do_check_false(payload.enabled);
- server.stop(advance_test);
+ yield server.promiseStop();
+ yield advance_test();
});
add_test(function cleanup() {
// There's an xpcom-shutdown hook for this, but let's give this a shot.
reconciler.stopListening();
run_next_test();
});
@@ -249,10 +249,10 @@ function run_test() {
Log.Level.Trace;
reconciler.startListening();
// Don't flush to disk in the middle of an event listener!
// This causes test hangs on WinXP.
reconciler._shouldPersist = false;
- advance_test();
+ run_next_test();
}
--- a/services/sync/tests/unit/test_addons_reconciler.js
+++ b/services/sync/tests/unit/test_addons_reconciler.js
@@ -33,46 +33,43 @@ add_test(function test_defaults() {
do_check_eq("object", typeof(reconciler.addons));
do_check_eq(0, Object.keys(reconciler.addons).length);
do_check_eq(0, reconciler._changes.length);
do_check_eq(0, reconciler._listeners.length);
run_next_test();
});
-add_test(function test_load_state_empty_file() {
+add_task(function* test_load_state_empty_file() {
_("Ensure loading from a missing file results in defaults being set.");
let reconciler = new AddonsReconciler();
- reconciler.loadState(null, function(error, loaded) {
- do_check_eq(null, error);
- do_check_false(loaded);
+ let loaded = yield reconciler.loadState(null);
+ do_check_false(loaded);
- do_check_eq("object", typeof(reconciler.addons));
- do_check_eq(0, Object.keys(reconciler.addons).length);
- do_check_eq(0, reconciler._changes.length);
-
- run_next_test();
- });
+ do_check_eq("object", typeof(reconciler.addons));
+ do_check_eq(0, Object.keys(reconciler.addons).length);
+ do_check_eq(0, reconciler._changes.length);
});
-add_test(function test_install_detection() {
+add_task(function* test_install_detection() {
_("Ensure that add-on installation results in appropriate side-effects.");
let reconciler = new AddonsReconciler();
reconciler.startListening();
let before = new Date();
let addon = installAddon("test_bootstrap1_1");
let after = new Date();
- do_check_eq(1, Object.keys(reconciler.addons).length);
- do_check_true(addon.id in reconciler.addons);
- let record = reconciler.addons[addon.id];
+ let addons = yield reconciler.addons;
+ do_check_eq(1, Object.keys(addons).length);
+ do_check_true(addon.id in addons);
+ let record = addons[addon.id];
const KEYS = ["id", "guid", "enabled", "installed", "modified", "type",
"scope", "foreignInstall"];
for each (let key in KEYS) {
do_check_true(key in record);
do_check_neq(null, record[key]);
}
@@ -86,110 +83,100 @@ add_test(function test_install_detection
do_check_eq(1, reconciler._changes.length);
let change = reconciler._changes[0];
do_check_true(change[0] >= before && change[1] <= after);
do_check_eq(CHANGE_INSTALLED, change[1]);
do_check_eq(addon.id, change[2]);
uninstallAddon(addon);
-
- run_next_test();
});
-add_test(function test_uninstall_detection() {
+add_task(function* test_uninstall_detection() {
_("Ensure that add-on uninstallation results in appropriate side-effects.");
let reconciler = new AddonsReconciler();
reconciler.startListening();
reconciler._addons = {};
reconciler._changes = [];
let addon = installAddon("test_bootstrap1_1");
let id = addon.id;
let guid = addon.syncGUID;
reconciler._changes = [];
uninstallAddon(addon);
- do_check_eq(1, Object.keys(reconciler.addons).length);
- do_check_true(id in reconciler.addons);
+ let addons = yield reconciler.addons;
+ do_check_eq(1, Object.keys(addons).length);
+ do_check_true(id in addons);
- let record = reconciler.addons[id];
+ let record = addons[id];
do_check_false(record.installed);
do_check_eq(1, reconciler._changes.length);
let change = reconciler._changes[0];
do_check_eq(CHANGE_UNINSTALLED, change[1]);
do_check_eq(id, change[2]);
-
- run_next_test();
});
-add_test(function test_load_state_future_version() {
+add_task(function* test_load_state_future_version() {
_("Ensure loading a file from a future version results in no data loaded.");
const FILENAME = "TEST_LOAD_STATE_FUTURE_VERSION";
let reconciler = new AddonsReconciler();
// First we populate our new file.
let state = {version: 100, addons: {foo: {}}, changes: [[1, 1, "foo"]]};
- let cb = Async.makeSyncCallback();
// jsonSave() expects an object with ._log, so we give it a reconciler
// instance.
- Utils.jsonSave(FILENAME, reconciler, state, cb);
- Async.waitForSyncCallback(cb);
+ yield Utils.jsonSave(FILENAME, reconciler, state);
- reconciler.loadState(FILENAME, function(error, loaded) {
- do_check_eq(null, error);
- do_check_false(loaded);
+ let loaded = yield reconciler.loadState(FILENAME);
+ do_check_false(loaded);
- do_check_eq("object", typeof(reconciler.addons));
- do_check_eq(1, Object.keys(reconciler.addons).length);
- do_check_eq(1, reconciler._changes.length);
-
- run_next_test();
- });
+ let addons = yield reconciler.addons;
+ do_check_eq("object", typeof(addons));
+ do_check_eq(1, Object.keys(addons).length);
+ do_check_eq(1, reconciler._changes.length);
});
-add_test(function test_prune_changes_before_date() {
+add_task(function* test_prune_changes_before_date() {
_("Ensure that old changes are pruned properly.");
let reconciler = new AddonsReconciler();
- reconciler._ensureStateLoaded();
+ yield reconciler._ensureStateLoaded();
reconciler._changes = [];
let now = new Date();
const HOUR_MS = 1000 * 60 * 60;
_("Ensure pruning an empty changes array works.");
- reconciler.pruneChangesBeforeDate(now);
+ yield reconciler.pruneChangesBeforeDate(now);
do_check_eq(0, reconciler._changes.length);
let old = new Date(now.getTime() - HOUR_MS);
let young = new Date(now.getTime() - 1000);
reconciler._changes.push([old, CHANGE_INSTALLED, "foo"]);
reconciler._changes.push([young, CHANGE_INSTALLED, "bar"]);
do_check_eq(2, reconciler._changes.length);
_("Ensure pruning with an old time won't delete anything.");
let threshold = new Date(old.getTime() - 1);
- reconciler.pruneChangesBeforeDate(threshold);
+ yield reconciler.pruneChangesBeforeDate(threshold);
do_check_eq(2, reconciler._changes.length);
_("Ensure pruning a single item works.");
threshold = new Date(young.getTime() - 1000);
- reconciler.pruneChangesBeforeDate(threshold);
+ yield reconciler.pruneChangesBeforeDate(threshold);
do_check_eq(1, reconciler._changes.length);
do_check_neq(undefined, reconciler._changes[0]);
do_check_eq(young, reconciler._changes[0][0]);
do_check_eq("bar", reconciler._changes[0][2]);
_("Ensure pruning all changes works.");
reconciler._changes.push([old, CHANGE_INSTALLED, "foo"]);
- reconciler.pruneChangesBeforeDate(now);
+ yield reconciler.pruneChangesBeforeDate(now);
do_check_eq(0, reconciler._changes.length);
-
- run_next_test();
});
--- a/services/sync/tests/unit/test_addons_store.js
+++ b/services/sync/tests/unit/test_addons_store.js
@@ -76,155 +76,146 @@ function run_test() {
// Don't flush to disk in the middle of an event listener!
// This causes test hangs on WinXP.
reconciler._shouldPersist = false;
run_next_test();
}
-add_test(function test_remove() {
+add_task(function* test_remove() {
_("Ensure removing add-ons from deleted records works.");
let addon = installAddon("test_bootstrap1_1");
let record = createRecordForThisApp(addon.syncGUID, addon.id, true, true);
- let failed = store.applyIncomingBatch([record]);
+ let failed = yield store.applyIncomingBatch([record]);
do_check_eq(0, failed.length);
let newAddon = getAddonFromAddonManagerByID(addon.id);
do_check_eq(null, newAddon);
-
- run_next_test();
});
-add_test(function test_apply_enabled() {
+add_task(function* test_apply_enabled() {
_("Ensures that changes to the userEnabled flag apply.");
let addon = installAddon("test_bootstrap1_1");
do_check_true(addon.isActive);
do_check_false(addon.userDisabled);
_("Ensure application of a disable record works as expected.");
let records = [];
records.push(createRecordForThisApp(addon.syncGUID, addon.id, false, false));
- let failed = store.applyIncomingBatch(records);
+ let failed = yield store.applyIncomingBatch(records);
do_check_eq(0, failed.length);
addon = getAddonFromAddonManagerByID(addon.id);
do_check_true(addon.userDisabled);
records = [];
_("Ensure enable record works as expected.");
records.push(createRecordForThisApp(addon.syncGUID, addon.id, true, false));
- failed = store.applyIncomingBatch(records);
+ failed = yield store.applyIncomingBatch(records);
do_check_eq(0, failed.length);
addon = getAddonFromAddonManagerByID(addon.id);
do_check_false(addon.userDisabled);
records = [];
_("Ensure enabled state updates don't apply if the ignore pref is set.");
records.push(createRecordForThisApp(addon.syncGUID, addon.id, false, false));
Svc.Prefs.set("addons.ignoreUserEnabledChanges", true);
- failed = store.applyIncomingBatch(records);
+ failed = yield store.applyIncomingBatch(records);
do_check_eq(0, failed.length);
addon = getAddonFromAddonManagerByID(addon.id);
do_check_false(addon.userDisabled);
records = [];
uninstallAddon(addon);
Svc.Prefs.reset("addons.ignoreUserEnabledChanges");
- run_next_test();
});
-add_test(function test_ignore_different_appid() {
+add_task(function* test_ignore_different_appid() {
_("Ensure that incoming records with a different application ID are ignored.");
// We test by creating a record that should result in an update.
let addon = installAddon("test_bootstrap1_1");
do_check_false(addon.userDisabled);
let record = createRecordForThisApp(addon.syncGUID, addon.id, false, false);
record.applicationID = "FAKE_ID";
- let failed = store.applyIncomingBatch([record]);
+ let failed = yield store.applyIncomingBatch([record]);
do_check_eq(0, failed.length);
let newAddon = getAddonFromAddonManagerByID(addon.id);
do_check_false(addon.userDisabled);
uninstallAddon(addon);
-
- run_next_test();
});
-add_test(function test_ignore_unknown_source() {
+add_task(function* test_ignore_unknown_source() {
_("Ensure incoming records with unknown source are ignored.");
let addon = installAddon("test_bootstrap1_1");
let record = createRecordForThisApp(addon.syncGUID, addon.id, false, false);
record.source = "DUMMY_SOURCE";
- let failed = store.applyIncomingBatch([record]);
+ let failed = yield store.applyIncomingBatch([record]);
do_check_eq(0, failed.length);
let newAddon = getAddonFromAddonManagerByID(addon.id);
do_check_false(addon.userDisabled);
uninstallAddon(addon);
-
- run_next_test();
});
-add_test(function test_apply_uninstall() {
+add_task(function* test_apply_uninstall() {
_("Ensures that uninstalling an add-on from a record works.");
let addon = installAddon("test_bootstrap1_1");
let records = [];
records.push(createRecordForThisApp(addon.syncGUID, addon.id, true, true));
- let failed = store.applyIncomingBatch(records);
+ let failed = yield store.applyIncomingBatch(records);
do_check_eq(0, failed.length);
addon = getAddonFromAddonManagerByID(addon.id);
do_check_eq(null, addon);
-
- run_next_test();
});
-add_test(function test_addon_syncability() {
+add_task(function* test_addon_syncability() {
_("Ensure isAddonSyncable functions properly.");
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
Svc.Prefs.set("addons.trustedSourceHostnames",
"addons.mozilla.org,other.example.com");
- do_check_false(store.isAddonSyncable(null));
+ do_check_false((yield store.isAddonSyncable(null)));
let addon = installAddon("test_bootstrap1_1");
- do_check_true(store.isAddonSyncable(addon));
+ do_check_true((yield store.isAddonSyncable(addon)));
let dummy = {};
const KEYS = ["id", "syncGUID", "type", "scope", "foreignInstall"];
for each (let k in KEYS) {
dummy[k] = addon[k];
}
- do_check_true(store.isAddonSyncable(dummy));
+ do_check_true((yield store.isAddonSyncable(dummy)));
dummy.type = "UNSUPPORTED";
- do_check_false(store.isAddonSyncable(dummy));
+ do_check_false((yield store.isAddonSyncable(dummy)));
dummy.type = addon.type;
dummy.scope = 0;
- do_check_false(store.isAddonSyncable(dummy));
+ do_check_false((yield store.isAddonSyncable(dummy)));
dummy.scope = addon.scope;
dummy.foreignInstall = true;
- do_check_false(store.isAddonSyncable(dummy));
+ do_check_false((yield store.isAddonSyncable(dummy)));
dummy.foreignInstall = false;
uninstallAddon(addon);
do_check_false(store.isSourceURITrusted(null));
function createURI(s) {
let service = Components.classes["@mozilla.org/network/io-service;1"]
@@ -255,223 +246,213 @@ add_test(function test_addon_syncability
for each (let uri in trusted) {
do_check_false(store.isSourceURITrusted(createURI(uri)));
}
Svc.Prefs.set("addons.trustedSourceHostnames", "addons.mozilla.org");
do_check_true(store.isSourceURITrusted(createURI("https://addons.mozilla.org/foo")));
Svc.Prefs.reset("addons.trustedSourceHostnames");
-
- run_next_test();
});
-add_test(function test_ignore_hotfixes() {
+add_task(function* test_ignore_hotfixes() {
_("Ensure that hotfix extensions are ignored.");
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
// A hotfix extension is one that has the id the same as the
// extensions.hotfix.id pref.
let prefs = new Preferences("extensions.");
let addon = installAddon("test_bootstrap1_1");
- do_check_true(store.isAddonSyncable(addon));
+ do_check_true((yield store.isAddonSyncable(addon)));
let dummy = {};
const KEYS = ["id", "syncGUID", "type", "scope", "foreignInstall"];
for each (let k in KEYS) {
dummy[k] = addon[k];
}
// Basic sanity check.
- do_check_true(store.isAddonSyncable(dummy));
+ do_check_true((yield store.isAddonSyncable(dummy)));
prefs.set("hotfix.id", dummy.id);
- do_check_false(store.isAddonSyncable(dummy));
+ do_check_false((yield store.isAddonSyncable(dummy)));
// Verify that int values don't throw off checking.
let prefSvc = Cc["@mozilla.org/preferences-service;1"]
.getService(Ci.nsIPrefService)
.getBranch("extensions.");
// Need to delete pref before changing type.
prefSvc.deleteBranch("hotfix.id");
prefSvc.setIntPref("hotfix.id", 0xdeadbeef);
- do_check_true(store.isAddonSyncable(dummy));
+ do_check_true((yield store.isAddonSyncable(dummy)));
uninstallAddon(addon);
Svc.Prefs.reset("addons.ignoreRepositoryChecking");
prefs.reset("hotfix.id");
-
- run_next_test();
});
-add_test(function test_get_all_ids() {
+add_task(function* test_get_all_ids() {
_("Ensures that getAllIDs() returns an appropriate set.");
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
_("Installing two addons.");
let addon1 = installAddon("test_install1");
let addon2 = installAddon("test_bootstrap1_1");
_("Ensure they're syncable.");
- do_check_true(store.isAddonSyncable(addon1));
- do_check_true(store.isAddonSyncable(addon2));
+ do_check_true((yield store.isAddonSyncable(addon1)));
+ do_check_true((yield store.isAddonSyncable(addon2)));
- let ids = store.getAllIDs();
+ let ids = yield store.getAllIDs();
do_check_eq("object", typeof(ids));
do_check_eq(2, Object.keys(ids).length);
do_check_true(addon1.syncGUID in ids);
do_check_true(addon2.syncGUID in ids);
addon1.install.cancel();
uninstallAddon(addon2);
Svc.Prefs.reset("addons.ignoreRepositoryChecking");
- run_next_test();
});
-add_test(function test_change_item_id() {
+add_task(function* test_change_item_id() {
_("Ensures that changeItemID() works properly.");
let addon = installAddon("test_bootstrap1_1");
let oldID = addon.syncGUID;
let newID = Utils.makeGUID();
- store.changeItemID(oldID, newID);
+ yield store.changeItemID(oldID, newID);
let newAddon = getAddonFromAddonManagerByID(addon.id);
do_check_neq(null, newAddon);
do_check_eq(newID, newAddon.syncGUID);
uninstallAddon(newAddon);
-
- run_next_test();
});
-add_test(function test_create() {
+add_task(function* test_create() {
_("Ensure creating/installing an add-on from a record works.");
// Set this so that getInstallFromSearchResult doesn't end up
// failing the install due to an insecure source URI scheme.
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
let server = createAndStartHTTPServer(HTTP_PORT);
let addon = installAddon("test_bootstrap1_1");
let id = addon.id;
uninstallAddon(addon);
let guid = Utils.makeGUID();
let record = createRecordForThisApp(guid, id, true, false);
- let failed = store.applyIncomingBatch([record]);
+ let failed = yield store.applyIncomingBatch([record]);
do_check_eq(0, failed.length);
let newAddon = getAddonFromAddonManagerByID(id);
do_check_neq(null, newAddon);
do_check_eq(guid, newAddon.syncGUID);
do_check_false(newAddon.userDisabled);
uninstallAddon(newAddon);
Svc.Prefs.reset("addons.ignoreRepositoryChecking");
- server.stop(run_next_test);
+ yield promiseStopServer(server);
});
-add_test(function test_create_missing_search() {
+add_task(function* test_create_missing_search() {
_("Ensures that failed add-on searches are handled gracefully.");
let server = createAndStartHTTPServer(HTTP_PORT);
// The handler for this ID is not installed, so a search should 404.
const id = "missing@tests.mozilla.org";
let guid = Utils.makeGUID();
let record = createRecordForThisApp(guid, id, true, false);
- let failed = store.applyIncomingBatch([record]);
+ let failed = yield store.applyIncomingBatch([record]);
do_check_eq(1, failed.length);
do_check_eq(guid, failed[0]);
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_FAILURES", { key: "addons" }), 1);
let addon = getAddonFromAddonManagerByID(id);
do_check_eq(null, addon);
- server.stop(run_next_test);
+ yield promiseStopServer(server);
});
-add_test(function test_create_bad_install() {
+add_task(function test_create_bad_install() {
_("Ensures that add-ons without a valid install are handled gracefully.");
let server = createAndStartHTTPServer(HTTP_PORT);
// The handler returns a search result but the XPI will 404.
const id = "missing-xpi@tests.mozilla.org";
let guid = Utils.makeGUID();
let record = createRecordForThisApp(guid, id, true, false);
- let failed = store.applyIncomingBatch([record]);
+ let failed = yield store.applyIncomingBatch([record]);
do_check_eq(1, failed.length);
do_check_eq(guid, failed[0]);
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_FAILURES", { key: "addons" }), 1);
let addon = getAddonFromAddonManagerByID(id);
do_check_eq(null, addon);
- server.stop(run_next_test);
+ yield promiseStopServer(server);
});
-add_test(function test_wipe() {
+add_task(function test_wipe() {
_("Ensures that wiping causes add-ons to be uninstalled.");
let addon1 = installAddon("test_bootstrap1_1");
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
- store.wipe();
+ yield store.wipe();
let addon = getAddonFromAddonManagerByID(addon1.id);
do_check_eq(null, addon);
Svc.Prefs.reset("addons.ignoreRepositoryChecking");
-
- run_next_test();
});
-add_test(function test_wipe_and_install() {
+add_task(function* test_wipe_and_install() {
_("Ensure wipe followed by install works.");
// This tests the reset sync flow where remote data is replaced by local. The
// receiving client will see a wipe followed by a record which should undo
// the wipe.
let installed = installAddon("test_bootstrap1_1");
let record = createRecordForThisApp(installed.syncGUID, installed.id, true,
false);
Svc.Prefs.set("addons.ignoreRepositoryChecking", true);
- store.wipe();
+ yield store.wipe();
let deleted = getAddonFromAddonManagerByID(installed.id);
do_check_null(deleted);
// Re-applying the record can require re-fetching the XPI.
let server = createAndStartHTTPServer(HTTP_PORT);
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
let fetched = getAddonFromAddonManagerByID(record.addonID);
do_check_true(!!fetched);
Svc.Prefs.reset("addons.ignoreRepositoryChecking");
- server.stop(run_next_test);
+ yield promiseStopServer(server);
});
-add_test(function cleanup() {
+add_task(function* cleanup() {
// There's an xpcom-shutdown hook for this, but let's give this a shot.
reconciler.stopListening();
- run_next_test();
});
--- a/services/sync/tests/unit/test_addons_tracker.js
+++ b/services/sync/tests/unit/test_addons_tracker.js
@@ -20,159 +20,159 @@ var reconciler = engine._reconciler;
var store = engine._store;
var tracker = engine._tracker;
// Don't write out by default.
tracker.persistChangedIDs = false;
const addon1ID = "addon1@tests.mozilla.org";
-function cleanup_and_advance() {
+function* cleanup_and_advance() {
Svc.Obs.notify("weave:engine:stop-tracking");
tracker.stopTracking();
tracker.resetScore();
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
reconciler._addons = {};
reconciler._changes = [];
- let cb = Async.makeSpinningCallback();
- reconciler.saveState(null, cb);
- cb.wait();
-
- run_next_test();
+ yield reconciler.saveState(null);
}
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Addons").level = Log.Level.Trace;
Log.repository.getLogger("Sync.AddonsReconciler").level =
Log.Level.Trace;
- cleanup_and_advance();
+ run_next_test();
}
-add_test(function test_empty() {
+add_task(function* () {
+ yield cleanup_and_advance();
+});
+
+add_task(function* test_empty() {
_("Verify the tracker is empty to start with.");
do_check_eq(0, Object.keys(tracker.changedIDs).length);
do_check_eq(0, tracker.score);
- cleanup_and_advance();
+ yield cleanup_and_advance();
});
-add_test(function test_not_tracking() {
+add_task(function* test_not_tracking() {
_("Ensures the tracker doesn't do anything when it isn't tracking.");
let addon = installAddon("test_bootstrap1_1");
uninstallAddon(addon);
do_check_eq(0, Object.keys(tracker.changedIDs).length);
do_check_eq(0, tracker.score);
- cleanup_and_advance();
+ yield cleanup_and_advance();
});
-add_test(function test_track_install() {
+add_task(function* test_track_install() {
_("Ensure that installing an add-on notifies tracker.");
reconciler.startListening();
Svc.Obs.notify("weave:engine:start-tracking");
do_check_eq(0, tracker.score);
let addon = installAddon("test_bootstrap1_1");
let changed = tracker.changedIDs;
do_check_eq(1, Object.keys(changed).length);
do_check_true(addon.syncGUID in changed);
do_check_eq(SCORE_INCREMENT_XLARGE, tracker.score);
uninstallAddon(addon);
- cleanup_and_advance();
+ yield cleanup_and_advance();
});
-add_test(function test_track_uninstall() {
+add_task(function* test_track_uninstall() {
_("Ensure that uninstalling an add-on notifies tracker.");
reconciler.startListening();
let addon = installAddon("test_bootstrap1_1");
let guid = addon.syncGUID;
do_check_eq(0, tracker.score);
Svc.Obs.notify("weave:engine:start-tracking");
uninstallAddon(addon);
let changed = tracker.changedIDs;
do_check_eq(1, Object.keys(changed).length);
do_check_true(guid in changed);
do_check_eq(SCORE_INCREMENT_XLARGE, tracker.score);
- cleanup_and_advance();
+ yield cleanup_and_advance();
});
-add_test(function test_track_user_disable() {
+add_task(function* test_track_user_disable() {
_("Ensure that tracker sees disabling of add-on");
reconciler.startListening();
let addon = installAddon("test_bootstrap1_1");
do_check_false(addon.userDisabled);
do_check_false(addon.appDisabled);
do_check_true(addon.isActive);
Svc.Obs.notify("weave:engine:start-tracking");
do_check_eq(0, tracker.score);
- let cb = Async.makeSyncCallback();
-
- let listener = {
- onDisabled: function(disabled) {
- _("onDisabled");
- if (disabled.id == addon.id) {
- AddonManager.removeAddonListener(listener);
- cb();
+ yield new Promise(resolve => {
+ let listener = {
+ onDisabled: function(disabled) {
+ _("onDisabled");
+ if (disabled.id == addon.id) {
+ AddonManager.removeAddonListener(listener);
+ resolve();
+ }
+ },
+ onDisabling: function(disabling) {
+ _("onDisabling add-on");
}
- },
- onDisabling: function(disabling) {
- _("onDisabling add-on");
- }
- };
- AddonManager.addAddonListener(listener);
+ };
+ AddonManager.addAddonListener(listener);
- _("Disabling add-on");
- addon.userDisabled = true;
- _("Disabling started...");
- Async.waitForSyncCallback(cb);
+ _("Disabling add-on");
+ addon.userDisabled = true;
+ _("Disabling started...");
+
+ });
let changed = tracker.changedIDs;
do_check_eq(1, Object.keys(changed).length);
do_check_true(addon.syncGUID in changed);
do_check_eq(SCORE_INCREMENT_XLARGE, tracker.score);
uninstallAddon(addon);
- cleanup_and_advance();
+ yield cleanup_and_advance();
});
-add_test(function test_track_enable() {
+add_task(function* test_track_enable() {
_("Ensure that enabling a disabled add-on notifies tracker.");
reconciler.startListening();
let addon = installAddon("test_bootstrap1_1");
addon.userDisabled = true;
- store._sleep(0);
+ yield store._sleep(0);
do_check_eq(0, tracker.score);
Svc.Obs.notify("weave:engine:start-tracking");
addon.userDisabled = false;
- store._sleep(0);
+ yield store._sleep(0);
let changed = tracker.changedIDs;
do_check_eq(1, Object.keys(changed).length);
do_check_true(addon.syncGUID in changed);
do_check_eq(SCORE_INCREMENT_XLARGE, tracker.score);
uninstallAddon(addon);
- cleanup_and_advance();
+ yield cleanup_and_advance();
});
--- a/services/sync/tests/unit/test_bookmark_batch_fail.js
+++ b/services/sync/tests/unit/test_bookmark_batch_fail.js
@@ -1,23 +1,27 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
_("Making sure a failing sync reports a useful error");
Cu.import("resource://services-sync/engines/bookmarks.js");
Cu.import("resource://services-sync/service.js");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let engine = new BookmarksEngine(Service);
engine._syncStartup = function() {
- throw "FAIL!";
+ return Promise.reject("FAIL!");
};
try {
_("Try calling the sync that should throw right away");
- engine._sync();
+ yield engine._sync();
do_throw("Should have failed sync!");
}
catch(ex) {
_("Making sure what we threw ended up as the exception:", ex);
do_check_eq(ex, "FAIL!");
}
-}
+});
--- a/services/sync/tests/unit/test_bookmark_engine.js
+++ b/services/sync/tests/unit/test_bookmark_engine.js
@@ -9,17 +9,17 @@ Cu.import("resource://services-sync/engi
Cu.import("resource://services-sync/engines/bookmarks.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
Cu.import("resource://gre/modules/Promise.jsm");
Service.engineManager.register(BookmarksEngine);
-add_test(function bad_record_allIDs() {
+add_task(function* bad_record_allIDs() {
let server = new SyncServer();
server.start();
let syncTesting = new SyncTestingInfrastructure(server.server);
_("Ensure that bad Places queries don't cause an error in getAllIDs.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let badRecordID = PlacesUtils.bookmarks.insertBookmark(
@@ -28,109 +28,109 @@ add_test(function bad_record_allIDs() {
PlacesUtils.bookmarks.DEFAULT_INDEX,
null);
do_check_true(badRecordID > 0);
_("Record is " + badRecordID);
_("Type: " + PlacesUtils.bookmarks.getItemType(badRecordID));
_("Fetching children.");
- store._getChildren("toolbar", {});
+ yield store._getChildren("toolbar", {});
_("Fetching all IDs.");
- let all = store.getAllIDs();
+ let all = yield store.getAllIDs();
_("All IDs: " + JSON.stringify(all));
do_check_true("menu" in all);
do_check_true("toolbar" in all);
_("Clean up.");
PlacesUtils.bookmarks.removeItem(badRecordID);
- server.stop(run_next_test);
+ yield server.promiseStop();
});
-add_test(function test_ID_caching() {
+add_task(function* test_ID_caching() {
let server = new SyncServer();
server.start();
let syncTesting = new SyncTestingInfrastructure(server.server);
_("Ensure that Places IDs are not cached.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
- _("All IDs: " + JSON.stringify(store.getAllIDs()));
+ _("All IDs: " + JSON.stringify(yield store.getAllIDs()));
- let mobileID = store.idForGUID("mobile");
+ let mobileID = yield store.idForGUID("mobile");
_("Change the GUID for that item, and drop the mobile anno.");
- store._setGUID(mobileID, "abcdefghijkl");
+ yield store._setGUID(mobileID, "abcdefghijkl");
PlacesUtils.annotations.removeItemAnnotation(mobileID, "mobile/bookmarksRoot");
let err;
let newMobileID;
// With noCreate, we don't find an entry.
try {
- newMobileID = store.idForGUID("mobile", true);
+ newMobileID = yield store.idForGUID("mobile", true);
_("New mobile ID: " + newMobileID);
} catch (ex) {
err = ex;
_("Error: " + Utils.exceptionStr(err));
}
do_check_true(!err);
// With !noCreate, lookup works, and it's different.
- newMobileID = store.idForGUID("mobile", false);
+ newMobileID = yield store.idForGUID("mobile", false);
_("New mobile ID: " + newMobileID);
do_check_true(!!newMobileID);
do_check_neq(newMobileID, mobileID);
// And it's repeatable, even with creation enabled.
- do_check_eq(newMobileID, store.idForGUID("mobile", false));
+ do_check_eq(newMobileID, yield store.idForGUID("mobile", false));
- do_check_eq(store.GUIDForId(mobileID), "abcdefghijkl");
- server.stop(run_next_test);
+ do_check_eq((yield store.GUIDForId(mobileID)), "abcdefghijkl");
+ yield server.promiseStop();
});
function serverForFoo(engine) {
return serverForUsers({"foo": "password"}, {
meta: {global: {engines: {bookmarks: {version: engine.version,
syncID: engine.syncID}}}},
bookmarks: {}
});
}
-add_test(function test_processIncoming_error_orderChildren() {
+add_task(function* test_processIncoming_error_orderChildren() {
_("Ensure that _orderChildren() is called even when _processIncoming() throws an error.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
new SyncTestingInfrastructure(server.server);
let collection = server.user("foo").collection("bookmarks");
try {
let folder1_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder 1", 0);
- let folder1_guid = store.GUIDForId(folder1_id);
+ let folder1_guid = yield store.GUIDForId(folder1_id);
let fxuri = Utils.makeURI("http://getfirefox.com/");
let tburi = Utils.makeURI("http://getthunderbird.com/");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, fxuri, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
let bmk2_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, tburi, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Thunderbird!");
- let bmk2_guid = store.GUIDForId(bmk2_id);
+ let bmk2_guid = yield store.GUIDForId(bmk2_id);
// Create a server record for folder1 where we flip the order of
// the children.
- let folder1_payload = store.createRecord(folder1_guid).cleartext;
+ let folder1_payload = (yield store.createRecord(folder1_guid)).cleartext;
folder1_payload.children.reverse();
collection.insert(folder1_guid, encryptPayload(folder1_payload));
// Create a bogus record that when synced down will provoke a
// network error which in turn provokes an exception in _processIncoming.
const BOGUS_GUID = "zzzzzzzzzzzz";
let bogus_record = collection.insert(BOGUS_GUID, "I'm a bogus record!");
bogus_record.get = function get() {
@@ -139,64 +139,64 @@ add_test(function test_processIncoming_e
// Make the 10 minutes old so it will only be synced in the toFetch phase.
bogus_record.modified = Date.now() / 1000 - 60 * 10;
engine.lastSync = Date.now() / 1000 - 60;
engine.toFetch = [BOGUS_GUID];
let error;
try {
- engine.sync();
+ yield engine.sync();
} catch(ex) {
error = ex;
}
do_check_true(!!error);
// Verify that the bookmark order has been applied.
- let new_children = store.createRecord(folder1_guid).children;
+ let new_children = (yield store.createRecord(folder1_guid)).children;
do_check_eq(new_children.length, 2);
do_check_eq(new_children[0], folder1_payload.children[0]);
do_check_eq(new_children[1], folder1_payload.children[1]);
do_check_eq(PlacesUtils.bookmarks.getItemIndex(bmk1_id), 1);
do_check_eq(PlacesUtils.bookmarks.getItemIndex(bmk2_id), 0);
} finally {
- store.wipe();
+ yield store.wipe();
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield server.promiseStop();
}
});
-add_task(function test_restorePromptsReupload() {
+add_task(function* test_restorePromptsReupload() {
_("Ensure that restoring from a backup will reupload all records.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
new SyncTestingInfrastructure(server.server);
let collection = server.user("foo").collection("bookmarks");
Svc.Obs.notify("weave:engine:start-tracking"); // We skip usual startup...
try {
let folder1_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder 1", 0);
- let folder1_guid = store.GUIDForId(folder1_id);
+ let folder1_guid = yield store.GUIDForId(folder1_id);
_("Folder 1: " + folder1_id + ", " + folder1_guid);
let fxuri = Utils.makeURI("http://getfirefox.com/");
let tburi = Utils.makeURI("http://getthunderbird.com/");
_("Create a single record.");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, fxuri, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
_("Get Firefox!: " + bmk1_id + ", " + bmk1_guid);
let dirSvc = Cc["@mozilla.org/file/directory_service;1"]
.getService(Ci.nsIProperties);
let backupFile = dirSvc.get("TmpD", Ci.nsILocalFile);
@@ -205,24 +205,24 @@ add_task(function test_restorePromptsReu
_("Backing up to file " + backupFile.path);
backupFile.create(Ci.nsILocalFile.NORMAL_FILE_TYPE, 0600);
yield BookmarkJSONUtils.exportToFile(backupFile);
_("Create a different record and sync.");
let bmk2_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, tburi, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Thunderbird!");
- let bmk2_guid = store.GUIDForId(bmk2_id);
+ let bmk2_guid = yield store.GUIDForId(bmk2_id);
_("Get Thunderbird!: " + bmk2_id + ", " + bmk2_guid);
PlacesUtils.bookmarks.removeItem(bmk1_id);
let error;
try {
- engine.sync();
+ yield engine.sync();
} catch(ex) {
error = ex;
_("Got error: " + Utils.exceptionStr(ex));
}
do_check_true(!error);
_("Verify that there's only one bookmark on the server, and it's Thunderbird.");
// Of course, there's also the Bookmarks Toolbar and Bookmarks Menu...
@@ -231,24 +231,24 @@ add_task(function test_restorePromptsReu
});
do_check_eq(wbos.length, 1);
do_check_eq(wbos[0], bmk2_guid);
_("Now restore from a backup.");
yield BookmarkJSONUtils.importFromFile(backupFile, true);
_("Ensure we have the bookmarks we expect locally.");
- let guids = store.getAllIDs();
+ let guids = yield store.getAllIDs();
_("GUIDs: " + JSON.stringify(guids));
let found = false;
let count = 0;
let newFX;
for (let guid in guids) {
count++;
- let id = store.idForGUID(guid, true);
+ let id = yield store.idForGUID(guid, true);
// Only one bookmark, so _all_ should be Firefox!
if (PlacesUtils.bookmarks.getItemType(id) == PlacesUtils.bookmarks.TYPE_BOOKMARK) {
let uri = PlacesUtils.bookmarks.getBookmarkURI(id);
_("Found URI " + uri.spec + " for GUID " + guid);
do_check_eq(uri.spec, fxuri.spec);
newFX = guid; // Save the new GUID after restore.
found = true; // Only runs if the above check passes.
}
@@ -256,17 +256,17 @@ add_task(function test_restorePromptsReu
_("We found it: " + found);
do_check_true(found);
_("Have the correct number of IDs locally, too.");
do_check_eq(count, ["menu", "toolbar", folder1_id, bmk1_id].length);
_("Sync again. This'll wipe bookmarks from the server.");
try {
- engine.sync();
+ yield engine.sync();
} catch(ex) {
error = ex;
_("Got error: " + Utils.exceptionStr(ex));
}
do_check_true(!error);
_("Verify that there's only one bookmark on the server, and it's Firefox.");
// Of course, there's also the Bookmarks Toolbar and Bookmarks Menu...
@@ -285,34 +285,32 @@ add_task(function test_restorePromptsReu
do_check_eq(bookmarkWBOs[0].bmkUri, fxuri.spec);
do_check_eq(bookmarkWBOs[0].title, "Get Firefox!");
_("Our old friend Folder 1 is still in play.");
do_check_eq(folderWBOs.length, 1);
do_check_eq(folderWBOs[0].title, "Folder 1");
} finally {
- store.wipe();
+ yield store.wipe();
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- yield deferred.promise;
+ yield server.promiseStop();
}
});
function FakeRecord(constructor, r) {
constructor.call(this, "bookmarks", r.id);
for (let x in r) {
this[x] = r[x];
}
}
// Bug 632287.
-add_test(function test_mismatched_types() {
+add_task(function* test_mismatched_types() {
_("Ensure that handling a record that changes type causes deletion " +
"then re-adding.");
let oldRecord = {
"id": "l1nZZXfB8nC7",
"type":"folder",
"parentName":"Bookmarks Toolbar",
"title":"Innerst i Sneglehode",
@@ -335,116 +333,114 @@ add_test(function test_mismatched_types(
"parentid": "toolbar"
};
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
new SyncTestingInfrastructure(server.server);
- _("GUID: " + store.GUIDForId(6, true));
+ _("GUID: " + (yield store.GUIDForId(6, true)));
try {
let bms = PlacesUtils.bookmarks;
let oldR = new FakeRecord(BookmarkFolder, oldRecord);
let newR = new FakeRecord(Livemark, newRecord);
oldR._parent = PlacesUtils.bookmarks.toolbarFolder;
newR._parent = PlacesUtils.bookmarks.toolbarFolder;
- store.applyIncoming(oldR);
+ yield store.applyIncoming(oldR);
_("Applied old. It's a folder.");
- let oldID = store.idForGUID(oldR.id);
+ let oldID = yield store.idForGUID(oldR.id);
_("Old ID: " + oldID);
do_check_eq(bms.getItemType(oldID), bms.TYPE_FOLDER);
do_check_false(PlacesUtils.annotations
.itemHasAnnotation(oldID, PlacesUtils.LMANNO_FEEDURI));
- store.applyIncoming(newR);
- let newID = store.idForGUID(newR.id);
+ yield store.applyIncoming(newR);
+ let newID = yield store.idForGUID(newR.id);
_("New ID: " + newID);
_("Applied new. It's a livemark.");
do_check_eq(bms.getItemType(newID), bms.TYPE_FOLDER);
do_check_true(PlacesUtils.annotations
.itemHasAnnotation(newID, PlacesUtils.LMANNO_FEEDURI));
} finally {
- store.wipe();
+ yield store.wipe();
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield server.promiseStop();
}
});
-add_test(function test_bookmark_guidMap_fail() {
+add_task(function* test_bookmark_guidMap_fail() {
_("Ensure that failures building the GUID map cause early death.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
let coll = server.user("foo").collection("bookmarks");
new SyncTestingInfrastructure(server.server);
// Add one item to the server.
let itemID = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder 1", 0);
- let itemGUID = store.GUIDForId(itemID);
- let itemPayload = store.createRecord(itemGUID).cleartext;
+ let itemGUID = yield store.GUIDForId(itemID);
+ let itemPayload = (yield store.createRecord(itemGUID)).cleartext;
coll.insert(itemGUID, encryptPayload(itemPayload));
engine.lastSync = 1; // So we don't back up.
// Make building the GUID map fail.
- store.getAllIDs = function () { throw "Nooo"; };
+ store.getAllIDs = function () { return Promise.reject("Nooo"); };
// Ensure that we throw when accessing _guidMap.
- engine._syncStartup();
+ yield engine._syncStartup();
_("No error.");
do_check_false(engine._guidMapFailed);
_("We get an error if building _guidMap fails in use.");
let err;
try {
- _(engine._guidMap);
+ _(yield engine._guidMap);
} catch (ex) {
err = ex;
}
do_check_eq(err.code, Engine.prototype.eEngineAbortApplyIncoming);
do_check_eq(err.cause, "Nooo");
_("We get an error and abort during processIncoming.");
err = undefined;
try {
- engine._processIncoming();
+ yield engine._processIncoming();
} catch (ex) {
err = ex;
}
do_check_eq(err, "Nooo");
- server.stop(run_next_test);
+ yield server.promiseStop();
});
-add_test(function test_bookmark_is_taggable() {
+add_task(function* test_bookmark_is_taggable() {
let engine = new BookmarksEngine(Service);
let store = engine._store;
do_check_true(store.isTaggable("bookmark"));
do_check_true(store.isTaggable("microsummary"));
do_check_true(store.isTaggable("query"));
do_check_false(store.isTaggable("folder"));
do_check_false(store.isTaggable("livemark"));
do_check_false(store.isTaggable(null));
do_check_false(store.isTaggable(undefined));
do_check_false(store.isTaggable(""));
-
- run_next_test();
});
-add_test(function test_bookmark_tag_but_no_uri() {
+add_task(function* test_bookmark_tag_but_no_uri() {
_("Ensure that a bookmark record with tags, but no URI, doesn't throw an exception.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
// We're simply checking that no exception is thrown, so
// no actual checks in this test.
@@ -459,43 +455,41 @@ add_test(function test_bookmark_tag_but_
tags: ["foo"],
title: "Taggy tag",
type: "folder"
};
// Because update() walks the cleartext.
record.cleartext = record;
- store.create(record);
+ yield store.create(record);
record.tags = ["bar"];
- store.update(record);
-
- run_next_test();
+ yield store.update(record);
});
-add_test(function test_misreconciled_root() {
+add_task(function* test_misreconciled_root() {
_("Ensure that we don't reconcile an arbitrary record with a root.");
let engine = new BookmarksEngine(Service);
let store = engine._store;
let server = serverForFoo(engine);
// Log real hard for this test.
store._log.trace = store._log.debug;
engine._log.trace = engine._log.debug;
- engine._syncStartup();
+ yield engine._syncStartup();
// Let's find out where the toolbar is right now.
- let toolbarBefore = store.createRecord("toolbar", "bookmarks");
- let toolbarIDBefore = store.idForGUID("toolbar");
+ let toolbarBefore = yield store.createRecord("toolbar", "bookmarks");
+ let toolbarIDBefore = yield store.idForGUID("toolbar");
do_check_neq(-1, toolbarIDBefore);
let parentGUIDBefore = toolbarBefore.parentid;
- let parentIDBefore = store.idForGUID(parentGUIDBefore);
+ let parentIDBefore = yield store.idForGUID(parentGUIDBefore);
do_check_neq(-1, parentIDBefore);
do_check_eq("string", typeof(parentGUIDBefore));
_("Current parent: " + parentGUIDBefore + " (" + parentIDBefore + ").");
let to_apply = {
id: "zzzzzzzzzzzz",
type: "folder",
@@ -510,33 +504,33 @@ add_test(function test_misreconciled_roo
let encrypted = encryptPayload(rec.cleartext);
encrypted.decrypt = function () {
for (let x in rec) {
encrypted[x] = rec[x];
}
};
_("Applying record.");
- engine._processIncoming({
+ yield engine._processIncoming({
get: function () {
this.recordHandler(encrypted);
return {success: true}
},
});
// Ensure that afterwards, toolbar is still there.
// As of 2012-12-05, this only passes because Places doesn't use "toolbar" as
// the real GUID, instead using a generated one. Sync does the translation.
- let toolbarAfter = store.createRecord("toolbar", "bookmarks");
+ let toolbarAfter = yield store.createRecord("toolbar", "bookmarks");
let parentGUIDAfter = toolbarAfter.parentid;
- let parentIDAfter = store.idForGUID(parentGUIDAfter);
- do_check_eq(store.GUIDForId(toolbarIDBefore), "toolbar");
+ let parentIDAfter = yield store.idForGUID(parentGUIDAfter);
+ do_check_eq((yield store.GUIDForId(toolbarIDBefore)), "toolbar");
do_check_eq(parentGUIDBefore, parentGUIDAfter);
do_check_eq(parentIDBefore, parentIDAfter);
- server.stop(run_next_test);
+ yield server.promiseStop();
});
function run_test() {
initTestLogging("Trace");
generateNewKeys(Service.collectionKeys);
run_next_test();
}
--- a/services/sync/tests/unit/test_bookmark_invalid.js
+++ b/services/sync/tests/unit/test_bookmark_invalid.js
@@ -68,17 +68,17 @@ add_task(function* test_ignore_invalid_u
WHERE id = (SELECT b.fk FROM moz_bookmarks b
WHERE b.id = :id LIMIT 1)`,
{ id: bmid, url: "<invalid url>" });
}));
// DB is now "corrupt" - setup a log appender to capture what we log.
let promiseMessage = promiseLogMessage('Deleting bookmark with invalid URI. url="<invalid url>"');
// This should work and log our invalid id.
- engine._buildGUIDMap();
+ yield engine._buildGUIDMap();
yield promiseMessage;
// And we should have deleted the item.
yield promiseNoItem(bmid);
});
add_task(function* test_ignore_missing_uri() {
_("Ensure that we don't die with a bookmark referencing an invalid bookmark id.");
--- a/services/sync/tests/unit/test_bookmark_legacy_microsummaries_support.js
+++ b/services/sync/tests/unit/test_bookmark_legacy_microsummaries_support.js
@@ -31,56 +31,59 @@ function newMicrosummary(url, title) {
PlacesUtils.annotations.EXPIRE_NEVER);
PlacesUtils.annotations.setItemAnnotation(id, STATICTITLE_ANNO,
"Static title", 0,
PlacesUtils.annotations.EXPIRE_NEVER);
return id;
}
function run_test() {
+ Service.engineManager.register(BookmarksEngine);
+ run_next_test();
+};
- Service.engineManager.register(BookmarksEngine);
+add_task(function* () {
let engine = Service.engineManager.get("bookmarks");
let store = engine._store;
// Clean up.
- store.wipe();
+ yield store.wipe();
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Bookmarks").level = Log.Level.Trace;
_("Create a microsummarized bookmark.");
let id = newMicrosummary(TEST_URL, TEST_TITLE);
- let guid = store.GUIDForId(id);
+ let guid = yield store.GUIDForId(id);
_("GUID: " + guid);
do_check_true(!!guid);
_("Create record object and verify that it's sane.");
- let record = store.createRecord(guid);
+ let record = yield store.createRecord(guid);
do_check_true(record instanceof Bookmark);
do_check_eq(record.bmkUri, TEST_URL);
_("Make sure the new record does not carry the microsummaries annotations.");
do_check_false("staticTitle" in record);
do_check_false("generatorUri" in record);
_("Remove the bookmark from Places.");
PlacesUtils.bookmarks.removeItem(id);
_("Convert record to the old microsummaries one.");
record.staticTitle = STATIC_TITLE;
record.generatorUri = GENERATOR_URL;
record.type = "microsummary";
_("Apply the modified record as incoming data.");
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
_("Verify it has been created correctly as a simple Bookmark.");
- id = store.idForGUID(record.id);
- do_check_eq(store.GUIDForId(id), record.id);
+ id = yield store.idForGUID(record.id);
+ do_check_eq((yield store.GUIDForId(id)), record.id);
do_check_eq(PlacesUtils.bookmarks.getItemType(id),
PlacesUtils.bookmarks.TYPE_BOOKMARK);
do_check_eq(PlacesUtils.bookmarks.getBookmarkURI(id).spec, TEST_URL);
do_check_eq(PlacesUtils.bookmarks.getItemTitle(id), TEST_TITLE);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(id),
PlacesUtils.unfiledBookmarksFolderId);
do_check_eq(PlacesUtils.bookmarks.getKeywordForBookmark(id), null);
@@ -90,10 +93,10 @@ function run_test() {
);
do_check_throws(
() => PlacesUtils.annotations.getItemAnnotation(id, STATICTITLE_ANNO),
Cr.NS_ERROR_NOT_AVAILABLE
);
// Clean up.
- store.wipe();
-}
+ yield store.wipe();
+});
--- a/services/sync/tests/unit/test_bookmark_livemarks.js
+++ b/services/sync/tests/unit/test_bookmark_livemarks.js
@@ -48,19 +48,16 @@ var record631361 = {
"E3H04Wn2RfSi", "eaSIMI6kSrcz", "rtkRxFoG5Vqi", "dectkUglV0Dz",
"B4vUE0BE15No", "qgQFW5AQrgB0", "SxAXvwOhu8Zi", "0S6cRPOg-5Z2",
"zcZZBGeLnaWW", "B0at8hkQqVZQ", "sgPtgGulbP66", "lwtwGHSCPYaQ",
"mNTdpgoRZMbW", "-L8Vci6CbkJY", "bVzudKSQERc1", "Gxl9lb4DXsmL",
"3Qr13GucOtEh"]},
collection: "bookmarks"
};
-// Clean up after other tests. Only necessary in XULRunner.
-store.wipe();
-
function makeLivemark(p, mintGUID) {
let b = new Livemark("bookmarks", p.id);
// Copy here, because tests mutate the contents.
b.cleartext = TestingUtils.deepCopy(p);
if (mintGUID)
b.id = Utils.makeGUID();
@@ -71,73 +68,73 @@ function makeLivemark(p, mintGUID) {
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Bookmarks").level = Log.Level.Trace;
Log.repository.getLogger("Sync.Store.Bookmarks").level = Log.Level.Trace;
run_next_test();
}
-add_test(function test_livemark_descriptions() {
+add_task(function* test_livemark_descriptions() {
+
+ // Clean up after other tests. Only necessary in XULRunner.
+ yield store.wipe();
+
let record = record631361.payload;
- function doRecord(r) {
+ function* doRecord(r) {
store._childrenToOrder = {};
- store.applyIncoming(r);
+ yield store.applyIncoming(r);
store._orderChildren();
delete store._childrenToOrder;
}
// Attempt to provoke an error by messing around with the description.
record.description = null;
- doRecord(makeLivemark(record));
+ yield doRecord(makeLivemark(record));
record.description = "";
- doRecord(makeLivemark(record));
+ yield doRecord(makeLivemark(record));
// Attempt to provoke an error by adding a bad description anno.
- let id = store.idForGUID(record.id);
+ let id = yield store.idForGUID(record.id);
PlacesUtils.annotations.setItemAnnotation(id, DESCRIPTION_ANNO, "", 0,
PlacesUtils.annotations.EXPIRE_NEVER);
- run_next_test();
});
-add_test(function test_livemark_invalid() {
+add_task(function* test_livemark_invalid() {
_("Livemarks considered invalid by nsLivemarkService are skipped.");
_("Parent is 0, which is invalid. Will be set to unfiled.");
let noParentRec = makeLivemark(record631361.payload, true);
noParentRec._parent = 0;
- store.create(noParentRec);
- let recID = store.idForGUID(noParentRec.id, true);
+ yield store.create(noParentRec);
+ let recID = yield store.idForGUID(noParentRec.id, true);
do_check_true(recID > 0);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(recID), PlacesUtils.bookmarks.unfiledBookmarksFolder);
_("Parent is unknown. Will be set to unfiled.");
let lateParentRec = makeLivemark(record631361.payload, true);
let parentGUID = Utils.makeGUID();
lateParentRec.parentid = parentGUID;
- lateParentRec._parent = store.idForGUID(parentGUID); // Usually done by applyIncoming.
+ lateParentRec._parent = yield store.idForGUID(parentGUID); // Usually done by applyIncoming.
do_check_eq(-1, lateParentRec._parent);
- store.create(lateParentRec);
- recID = store.idForGUID(lateParentRec.id, true);
+ yield store.create(lateParentRec);
+ recID = yield store.idForGUID(lateParentRec.id, true);
do_check_true(recID > 0);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(recID),
PlacesUtils.bookmarks.unfiledBookmarksFolder);
_("No feed URI, which is invalid. Will be skipped.");
let noFeedURIRec = makeLivemark(record631361.payload, true);
delete noFeedURIRec.cleartext.feedUri;
- store.create(noFeedURIRec);
+ yield store.create(noFeedURIRec);
// No exception, but no creation occurs.
- do_check_eq(-1, store.idForGUID(noFeedURIRec.id, true));
+ do_check_eq(-1, yield store.idForGUID(noFeedURIRec.id, true));
_("Parent is a Livemark. Will be skipped.");
let lmParentRec = makeLivemark(record631361.payload, true);
lmParentRec._parent = recID;
- store.create(lmParentRec);
+ yield store.create(lmParentRec);
// No exception, but no creation occurs.
- do_check_eq(-1, store.idForGUID(lmParentRec.id, true));
-
- // Clear event loop.
- Utils.nextTick(run_next_test);
+ do_check_eq(-1, yield store.idForGUID(lmParentRec.id, true));
});
--- a/services/sync/tests/unit/test_bookmark_order.js
+++ b/services/sync/tests/unit/test_bookmark_order.js
@@ -38,21 +38,25 @@ function check(expected) {
let bookmarks = getBookmarks(PlacesUtils.bookmarks.unfiledBookmarksFolder);
_("Checking if the bookmark structure is", JSON.stringify(expected));
_("Got bookmarks:", JSON.stringify(bookmarks));
do_check_true(Utils.deepEquals(bookmarks, expected));
}
function run_test() {
- let store = new BookmarksEngine(Service)._store;
initTestLogging("Trace");
+ run_next_test();
+}
+
+add_task(function* () {
_("Starting with a clean slate of no bookmarks");
- store.wipe();
+ let store = new BookmarksEngine(Service)._store;
+ yield store.wipe();
check([]);
function bookmark(name, parent) {
let bookmark = new Bookmark("http://weave.server/my-bookmark");
bookmark.id = name;
bookmark.title = name;
bookmark.bmkUri = "http://uri/";
bookmark.parentid = parent || "unfiled";
@@ -66,73 +70,73 @@ function run_test() {
folder.title = name;
folder.parentid = parent || "unfiled";
folder.children = children;
return folder;
}
function apply(record) {
store._childrenToOrder = {};
- store.applyIncoming(record);
- store._orderChildren();
+ yield store.applyIncoming(record);
+ yield store._orderChildren();
delete store._childrenToOrder;
}
_("basic add first bookmark");
- apply(bookmark("10", ""));
+ yield apply(bookmark("10", ""));
check(["10"]);
_("basic append behind 10");
- apply(bookmark("20", ""));
+ yield apply(bookmark("20", ""));
check(["10", "20"]);
_("basic create in folder");
- apply(bookmark("31", "f30"));
+ yield apply(bookmark("31", "f30"));
let f30 = folder("f30", "", ["31"]);
- apply(f30);
+ yield apply(f30);
check(["10", "20", ["31"]]);
_("insert missing parent -> append to unfiled");
- apply(bookmark("41", "f40"));
+ yield apply(bookmark("41", "f40"));
check(["10", "20", ["31"], "41"]);
_("insert another missing parent -> append");
- apply(bookmark("42", "f40"));
+ yield apply(bookmark("42", "f40"));
check(["10", "20", ["31"], "41", "42"]);
_("insert folder -> move children and followers");
let f40 = folder("f40", "", ["41", "42"]);
- apply(f40);
+ yield apply(f40);
check(["10", "20", ["31"], ["41", "42"]]);
_("Moving 41 behind 42 -> update f40");
f40.children = ["42", "41"];
- apply(f40);
+ yield apply(f40);
check(["10", "20", ["31"], ["42", "41"]]);
_("Moving 10 back to front -> update 10, 20");
f40.children = ["41", "42"];
- apply(f40);
+ yield apply(f40);
check(["10", "20", ["31"], ["41", "42"]]);
_("Moving 20 behind 42 in f40 -> update 50");
- apply(bookmark("20", "f40"));
+ yield apply(bookmark("20", "f40"));
check(["10", ["31"], ["41", "42", "20"]]);
_("Moving 10 in front of 31 in f30 -> update 10, f30");
- apply(bookmark("10", "f30"));
+ yield apply(bookmark("10", "f30"));
f30.children = ["10", "31"];
- apply(f30);
+ yield apply(f30);
check([["10", "31"], ["41", "42", "20"]]);
_("Moving 20 from f40 to f30 -> update 20, f30");
- apply(bookmark("20", "f30"));
+ yield apply(bookmark("20", "f30"));
f30.children = ["10", "20", "31"];
- apply(f30);
+ yield apply(f30);
check([["10", "20", "31"], ["41", "42"]]);
_("Move 20 back to front -> update 20, f30");
- apply(bookmark("20", ""));
+ yield apply(bookmark("20", ""));
f30.children = ["10", "31"];
- apply(f30);
+ yield apply(f30);
check([["10", "31"], ["41", "42"], "20"]);
-}
+});
--- a/services/sync/tests/unit/test_bookmark_smart_bookmarks.js
+++ b/services/sync/tests/unit/test_bookmark_smart_bookmarks.js
@@ -14,19 +14,16 @@ var IOService = Cc["@mozilla.org/network
.getService(Ci.nsIIOService);
("http://www.mozilla.com", null, null);
Service.engineManager.register(BookmarksEngine);
var engine = Service.engineManager.get("bookmarks");
var store = engine._store;
-// Clean up after other tests. Only necessary in XULRunner.
-store.wipe();
-
function newSmartBookmark(parent, uri, position, title, queryID) {
let id = PlacesUtils.bookmarks.insertBookmark(parent, uri, position, title);
PlacesUtils.annotations.setItemAnnotation(id, SMART_BOOKMARKS_ANNO,
queryID, 0,
PlacesUtils.annotations.EXPIRE_NEVER);
return id;
}
@@ -52,17 +49,20 @@ function serverForFoo(engine) {
meta: {global: {engines: {bookmarks: {version: engine.version,
syncID: engine.syncID}}}},
bookmarks: {}
});
}
// Verify that Places smart bookmarks have their annotation uploaded and
// handled locally.
-add_test(function test_annotation_uploaded() {
+add_task(function* test_annotation_uploaded() {
+ // Clean up after other tests. Only necessary in XULRunner.
+ yield store.wipe();
+
let server = serverForFoo(engine);
new SyncTestingInfrastructure(server.server);
let startCount = smartBookmarkCount();
_("Start count is " + startCount);
if (startCount > 0) {
@@ -84,38 +84,38 @@ add_test(function test_annotation_upload
_("New item ID: " + mostVisitedID);
do_check_true(!!mostVisitedID);
let annoValue = PlacesUtils.annotations.getItemAnnotation(mostVisitedID,
SMART_BOOKMARKS_ANNO);
_("Anno: " + annoValue);
do_check_eq("MostVisited", annoValue);
- let guid = store.GUIDForId(mostVisitedID);
+ let guid = yield store.GUIDForId(mostVisitedID);
_("GUID: " + guid);
do_check_true(!!guid);
_("Create record object and verify that it's sane.");
- let record = store.createRecord(guid);
+ let record = yield store.createRecord(guid);
do_check_true(record instanceof Bookmark);
do_check_true(record instanceof BookmarkQuery);
do_check_eq(record.bmkUri, uri.spec);
_("Make sure the new record carries with it the annotation.");
do_check_eq("MostVisited", record.queryId);
_("Our count has increased since we started.");
do_check_eq(smartBookmarkCount(), startCount + 1);
_("Sync record to the server.");
let collection = server.user("foo").collection("bookmarks");
try {
- engine.sync();
+ yield engine.sync();
let wbos = collection.keys(function (id) {
return ["menu", "toolbar", "mobile"].indexOf(id) == -1;
});
do_check_eq(wbos.length, 1);
_("Verify that the server WBO has the annotation.");
let serverGUID = wbos[0];
do_check_eq(serverGUID, guid);
@@ -131,100 +131,100 @@ add_test(function test_annotation_upload
// "Clear" by changing attributes: if we delete it, apparently it sticks
// around as a deleted record...
PlacesUtils.bookmarks.setItemTitle(mostVisitedID, "Not Most Visited");
PlacesUtils.bookmarks.changeBookmarkURI(
mostVisitedID, Utils.makeURI("http://something/else"));
PlacesUtils.annotations.removeItemAnnotation(mostVisitedID,
SMART_BOOKMARKS_ANNO);
- store.wipe();
- engine.resetClient();
+ yield store.wipe();
+ yield engine.resetClient();
do_check_eq(smartBookmarkCount(), startCount);
_("Sync. Verify that the downloaded record carries the annotation.");
- engine.sync();
+ yield engine.sync();
_("Verify that the Places DB now has an annotated bookmark.");
_("Our count has increased again.");
do_check_eq(smartBookmarkCount(), startCount + 1);
_("Find by GUID and verify that it's annotated.");
- let newID = store.idForGUID(serverGUID);
+ let newID = yield store.idForGUID(serverGUID);
let newAnnoValue = PlacesUtils.annotations.getItemAnnotation(
newID, SMART_BOOKMARKS_ANNO);
do_check_eq(newAnnoValue, "MostVisited");
do_check_eq(PlacesUtils.bookmarks.getBookmarkURI(newID).spec, uri.spec);
_("Test updating.");
- let newRecord = store.createRecord(serverGUID);
+ let newRecord = yield store.createRecord(serverGUID);
do_check_eq(newRecord.queryId, newAnnoValue);
newRecord.queryId = "LeastVisited";
- store.update(newRecord);
+ yield store.update(newRecord);
do_check_eq("LeastVisited", PlacesUtils.annotations.getItemAnnotation(
newID, SMART_BOOKMARKS_ANNO));
} finally {
// Clean up.
- store.wipe();
+ yield store.wipe();
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield server.promiseStop();
}
});
-add_test(function test_smart_bookmarks_duped() {
+add_task(function* test_smart_bookmarks_duped() {
let server = serverForFoo(engine);
new SyncTestingInfrastructure(server.server);
let parent = PlacesUtils.toolbarFolderId;
let uri =
Utils.makeURI("place:sort=" +
Ci.nsINavHistoryQueryOptions.SORT_BY_VISITCOUNT_DESCENDING +
"&maxResults=10");
let title = "Most Visited";
let mostVisitedID = newSmartBookmark(parent, uri, -1, title, "MostVisited");
- let mostVisitedGUID = store.GUIDForId(mostVisitedID);
+ let mostVisitedGUID = yield store.GUIDForId(mostVisitedID);
- let record = store.createRecord(mostVisitedGUID);
+ let record = yield store.createRecord(mostVisitedGUID);
_("Prepare sync.");
let collection = server.user("foo").collection("bookmarks");
try {
- engine._syncStartup();
+ yield engine._syncStartup();
_("Verify that mapDupe uses the anno, discovering a dupe regardless of URI.");
- do_check_eq(mostVisitedGUID, engine._mapDupe(record));
+ do_check_eq(mostVisitedGUID, yield engine._mapDupe(record));
record.bmkUri = "http://foo/";
- do_check_eq(mostVisitedGUID, engine._mapDupe(record));
+ do_check_eq(mostVisitedGUID, yield engine._mapDupe(record));
do_check_neq(PlacesUtils.bookmarks.getBookmarkURI(mostVisitedID).spec,
record.bmkUri);
_("Verify that different annos don't dupe.");
let other = new BookmarkQuery("bookmarks", "abcdefabcdef");
other.queryId = "LeastVisited";
other.parentName = "Bookmarks Toolbar";
other.bmkUri = "place:foo";
other.title = "";
- do_check_eq(undefined, engine._findDupe(other));
+ do_check_eq(undefined, yield engine._findDupe(other));
_("Handle records without a queryId entry.");
record.bmkUri = uri;
delete record.queryId;
- do_check_eq(mostVisitedGUID, engine._mapDupe(record));
+ do_check_eq(mostVisitedGUID, yield engine._mapDupe(record));
- engine._syncFinish();
+ yield engine._syncFinish();
} finally {
// Clean up.
- store.wipe();
- server.stop(do_test_finished);
+ yield store.wipe();
+ yield server.promiseStop();
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
}
});
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Bookmarks").level = Log.Level.Trace;
--- a/services/sync/tests/unit/test_bookmark_store.js
+++ b/services/sync/tests/unit/test_bookmark_store.js
@@ -16,419 +16,406 @@ var store = engine._store;
var tracker = engine._tracker;
// Don't write some persistence files asynchronously.
tracker.persistChangedIDs = false;
var fxuri = Utils.makeURI("http://getfirefox.com/");
var tburi = Utils.makeURI("http://getthunderbird.com/");
-add_test(function test_ignore_specials() {
+add_task(function* test_ignore_specials() {
_("Ensure that we can't delete bookmark roots.");
// Belt...
let record = new BookmarkFolder("bookmarks", "toolbar", "folder");
record.deleted = true;
- do_check_neq(null, store.idForGUID("toolbar"));
+ do_check_neq(null, yield store.idForGUID("toolbar"));
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
// Ensure that the toolbar exists.
- do_check_neq(null, store.idForGUID("toolbar"));
+ do_check_neq(null, yield store.idForGUID("toolbar"));
// This will fail painfully in getItemType if the deletion worked.
- engine._buildGUIDMap();
+ yield engine._buildGUIDMap();
// Braces...
- store.remove(record);
- do_check_neq(null, store.idForGUID("toolbar"));
- engine._buildGUIDMap();
+ yield store.remove(record);
+ do_check_neq(null, yield store.idForGUID("toolbar"));
+ yield engine._buildGUIDMap();
- store.wipe();
- run_next_test();
+ yield store.wipe();
});
-add_test(function test_bookmark_create() {
+add_task(function* test_bookmark_create() {
try {
_("Ensure the record isn't present yet.");
let ids = PlacesUtils.bookmarks.getBookmarkIdsForURI(fxuri, {});
do_check_eq(ids.length, 0);
_("Let's create a new record.");
let fxrecord = new Bookmark("bookmarks", "get-firefox1");
fxrecord.bmkUri = fxuri.spec;
fxrecord.description = "Firefox is awesome.";
fxrecord.title = "Get Firefox!";
fxrecord.tags = ["firefox", "awesome", "browser"];
fxrecord.keyword = "awesome";
fxrecord.loadInSidebar = false;
fxrecord.parentName = "Bookmarks Toolbar";
fxrecord.parentid = "toolbar";
- store.applyIncoming(fxrecord);
+ yield store.applyIncoming(fxrecord);
_("Verify it has been created correctly.");
- let id = store.idForGUID(fxrecord.id);
- do_check_eq(store.GUIDForId(id), fxrecord.id);
+ let id = yield store.idForGUID(fxrecord.id);
+ do_check_eq((yield store.GUIDForId(id)), fxrecord.id);
do_check_eq(PlacesUtils.bookmarks.getItemType(id),
PlacesUtils.bookmarks.TYPE_BOOKMARK);
do_check_true(PlacesUtils.bookmarks.getBookmarkURI(id).equals(fxuri));
do_check_eq(PlacesUtils.bookmarks.getItemTitle(id), fxrecord.title);
do_check_eq(PlacesUtils.annotations.getItemAnnotation(id, "bookmarkProperties/description"),
fxrecord.description);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(id),
PlacesUtils.bookmarks.toolbarFolder);
do_check_eq(PlacesUtils.bookmarks.getKeywordForBookmark(id), fxrecord.keyword);
_("Have the store create a new record object. Verify that it has the same data.");
- let newrecord = store.createRecord(fxrecord.id);
+ let newrecord = yield store.createRecord(fxrecord.id);
do_check_true(newrecord instanceof Bookmark);
for each (let property in ["type", "bmkUri", "description", "title",
"keyword", "parentName", "parentid"]) {
do_check_eq(newrecord[property], fxrecord[property]);
}
do_check_true(Utils.deepEquals(newrecord.tags.sort(),
fxrecord.tags.sort()));
_("The calculated sort index is based on frecency data.");
do_check_true(newrecord.sortindex >= 150);
_("Create a record with some values missing.");
let tbrecord = new Bookmark("bookmarks", "thunderbird1");
tbrecord.bmkUri = tburi.spec;
tbrecord.parentName = "Bookmarks Toolbar";
tbrecord.parentid = "toolbar";
- store.applyIncoming(tbrecord);
+ yield store.applyIncoming(tbrecord);
_("Verify it has been created correctly.");
- id = store.idForGUID(tbrecord.id);
- do_check_eq(store.GUIDForId(id), tbrecord.id);
+ id = yield store.idForGUID(tbrecord.id);
+ do_check_eq((yield store.GUIDForId(id)), tbrecord.id);
do_check_eq(PlacesUtils.bookmarks.getItemType(id),
PlacesUtils.bookmarks.TYPE_BOOKMARK);
do_check_true(PlacesUtils.bookmarks.getBookmarkURI(id).equals(tburi));
do_check_eq(PlacesUtils.bookmarks.getItemTitle(id), null);
let error;
try {
PlacesUtils.annotations.getItemAnnotation(id, "bookmarkProperties/description");
} catch(ex) {
error = ex;
}
do_check_eq(error.result, Cr.NS_ERROR_NOT_AVAILABLE);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(id),
PlacesUtils.bookmarks.toolbarFolder);
do_check_eq(PlacesUtils.bookmarks.getKeywordForBookmark(id), null);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_bookmark_update() {
+add_task(function* test_bookmark_update() {
try {
_("Create a bookmark whose values we'll change.");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder, fxuri,
PlacesUtils.bookmarks.DEFAULT_INDEX,
"Get Firefox!");
PlacesUtils.annotations.setItemAnnotation(
bmk1_id, "bookmarkProperties/description", "Firefox is awesome.", 0,
PlacesUtils.annotations.EXPIRE_NEVER);
PlacesUtils.bookmarks.setKeywordForBookmark(bmk1_id, "firefox");
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
_("Update the record with some null values.");
- let record = store.createRecord(bmk1_guid);
+ let record = yield store.createRecord(bmk1_guid);
record.title = null;
record.description = null;
record.keyword = null;
record.tags = null;
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
_("Verify that the values have been cleared.");
do_check_throws(function () {
PlacesUtils.annotations.getItemAnnotation(
bmk1_id, "bookmarkProperties/description");
}, Cr.NS_ERROR_NOT_AVAILABLE);
do_check_eq(PlacesUtils.bookmarks.getItemTitle(bmk1_id), null);
do_check_eq(PlacesUtils.bookmarks.getKeywordForBookmark(bmk1_id), null);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_bookmark_createRecord() {
+add_task(function* test_bookmark_createRecord() {
try {
_("Create a bookmark without a description or title.");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder, fxuri,
PlacesUtils.bookmarks.DEFAULT_INDEX, null);
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
_("Verify that the record is created accordingly.");
- let record = store.createRecord(bmk1_guid);
+ let record = yield store.createRecord(bmk1_guid);
do_check_eq(record.title, null);
do_check_eq(record.description, null);
do_check_eq(record.keyword, null);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_folder_create() {
+add_task(function* test_folder_create() {
try {
_("Create a folder.");
let folder = new BookmarkFolder("bookmarks", "testfolder-1");
folder.parentName = "Bookmarks Toolbar";
folder.parentid = "toolbar";
folder.title = "Test Folder";
- store.applyIncoming(folder);
+ yield store.applyIncoming(folder);
_("Verify it has been created correctly.");
- let id = store.idForGUID(folder.id);
+ let id = yield store.idForGUID(folder.id);
do_check_eq(PlacesUtils.bookmarks.getItemType(id),
PlacesUtils.bookmarks.TYPE_FOLDER);
do_check_eq(PlacesUtils.bookmarks.getItemTitle(id), folder.title);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(id),
PlacesUtils.bookmarks.toolbarFolder);
_("Have the store create a new record object. Verify that it has the same data.");
- let newrecord = store.createRecord(folder.id);
+ let newrecord = yield store.createRecord(folder.id);
do_check_true(newrecord instanceof BookmarkFolder);
for each (let property in ["title", "parentName", "parentid"])
do_check_eq(newrecord[property], folder[property]);
_("Folders have high sort index to ensure they're synced first.");
do_check_eq(newrecord.sortindex, 1000000);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_folder_createRecord() {
+add_task(function* test_folder_createRecord() {
try {
_("Create a folder.");
let folder1_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder1", 0);
- let folder1_guid = store.GUIDForId(folder1_id);
+ let folder1_guid = yield store.GUIDForId(folder1_id);
_("Create two bookmarks in that folder without assigning them GUIDs.");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, fxuri, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
let bmk2_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, tburi, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Thunderbird!");
_("Create a record for the folder and verify basic properties.");
- let record = store.createRecord(folder1_guid);
+ let record = yield store.createRecord(folder1_guid);
do_check_true(record instanceof BookmarkFolder);
do_check_eq(record.title, "Folder1");
do_check_eq(record.parentid, "toolbar");
do_check_eq(record.parentName, "Bookmarks Toolbar");
_("Verify the folder's children. Ensures that the bookmarks were given GUIDs.");
- let bmk1_guid = store.GUIDForId(bmk1_id);
- let bmk2_guid = store.GUIDForId(bmk2_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
+ let bmk2_guid = yield store.GUIDForId(bmk2_id);
do_check_eq(record.children.length, 2);
do_check_eq(record.children[0], bmk1_guid);
do_check_eq(record.children[1], bmk2_guid);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_deleted() {
+add_task(function* test_deleted() {
try {
_("Create a bookmark that will be deleted.");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder, fxuri,
PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
_("Delete the bookmark through the store.");
let record = new PlacesItem("bookmarks", bmk1_guid);
record.deleted = true;
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
_("Ensure it has been deleted.");
let error;
try {
PlacesUtils.bookmarks.getBookmarkURI(bmk1_id);
} catch(ex) {
error = ex;
}
do_check_eq(error.result, Cr.NS_ERROR_ILLEGAL_VALUE);
- let newrec = store.createRecord(bmk1_guid);
+ let newrec = yield store.createRecord(bmk1_guid);
do_check_eq(newrec.deleted, true);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_move_folder() {
+add_task(function* test_move_folder() {
try {
_("Create two folders and a bookmark in one of them.");
let folder1_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder1", 0);
- let folder1_guid = store.GUIDForId(folder1_id);
+ let folder1_guid = yield store.GUIDForId(folder1_id);
let folder2_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder2", 0);
- let folder2_guid = store.GUIDForId(folder2_id);
+ let folder2_guid = yield store.GUIDForId(folder2_id);
let bmk_id = PlacesUtils.bookmarks.insertBookmark(
folder1_id, fxuri, PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bmk_guid = store.GUIDForId(bmk_id);
+ let bmk_guid = yield store.GUIDForId(bmk_id);
_("Get a record, reparent it and apply it to the store.");
- let record = store.createRecord(bmk_guid);
+ let record = yield store.createRecord(bmk_guid);
do_check_eq(record.parentid, folder1_guid);
record.parentid = folder2_guid;
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
_("Verify the new parent.");
let new_folder_id = PlacesUtils.bookmarks.getFolderIdForItem(bmk_id);
- do_check_eq(store.GUIDForId(new_folder_id), folder2_guid);
+ do_check_eq((yield store.GUIDForId(new_folder_id)), folder2_guid);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_move_order() {
+add_task(function* test_move_order() {
// Make sure the tracker is turned on.
Svc.Obs.notify("weave:engine:start-tracking");
try {
_("Create two bookmarks");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder, fxuri,
PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
let bmk2_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder, tburi,
PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Thunderbird!");
- let bmk2_guid = store.GUIDForId(bmk2_id);
+ let bmk2_guid = yield store.GUIDForId(bmk2_id);
_("Verify order.");
do_check_eq(PlacesUtils.bookmarks.getItemIndex(bmk1_id), 0);
do_check_eq(PlacesUtils.bookmarks.getItemIndex(bmk2_id), 1);
- let toolbar = store.createRecord("toolbar");
+ let toolbar = yield store.createRecord("toolbar");
do_check_eq(toolbar.children.length, 2);
do_check_eq(toolbar.children[0], bmk1_guid);
do_check_eq(toolbar.children[1], bmk2_guid);
_("Move bookmarks around.");
store._childrenToOrder = {};
toolbar.children = [bmk2_guid, bmk1_guid];
- store.applyIncoming(toolbar);
+ yield store.applyIncoming(toolbar);
// Bookmarks engine does this at the end of _processIncoming
tracker.ignoreAll = true;
- store._orderChildren();
+ yield store._orderChildren();
tracker.ignoreAll = false;
delete store._childrenToOrder;
_("Verify new order.");
do_check_eq(PlacesUtils.bookmarks.getItemIndex(bmk2_id), 0);
do_check_eq(PlacesUtils.bookmarks.getItemIndex(bmk1_id), 1);
} finally {
Svc.Obs.notify("weave:engine:stop-tracking");
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_orphan() {
+add_task(function* test_orphan() {
try {
_("Add a new bookmark locally.");
let bmk1_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder, fxuri,
PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bmk1_guid = store.GUIDForId(bmk1_id);
+ let bmk1_guid = yield store.GUIDForId(bmk1_id);
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(bmk1_id),
PlacesUtils.bookmarks.toolbarFolder);
let error;
try {
PlacesUtils.annotations.getItemAnnotation(bmk1_id, PARENT_ANNO);
} catch(ex) {
error = ex;
}
do_check_eq(error.result, Cr.NS_ERROR_NOT_AVAILABLE);
_("Apply a server record that is the same but refers to non-existent folder.");
- let record = store.createRecord(bmk1_guid);
+ let record = yield store.createRecord(bmk1_guid);
record.parentid = "non-existent";
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
_("Verify that bookmark has been flagged as orphan, has not moved.");
do_check_eq(PlacesUtils.bookmarks.getFolderIdForItem(bmk1_id),
PlacesUtils.bookmarks.toolbarFolder);
do_check_eq(PlacesUtils.annotations.getItemAnnotation(bmk1_id, PARENT_ANNO),
"non-existent");
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
-add_test(function test_reparentOrphans() {
+add_task(function* test_reparentOrphans() {
try {
let folder1_id = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.toolbarFolder, "Folder1", 0);
- let folder1_guid = store.GUIDForId(folder1_id);
+ let folder1_guid = yield store.GUIDForId(folder1_id);
_("Create a bogus orphan record and write the record back to the store to trigger _reparentOrphans.");
PlacesUtils.annotations.setItemAnnotation(
folder1_id, PARENT_ANNO, folder1_guid, 0,
PlacesUtils.annotations.EXPIRE_NEVER);
- let record = store.createRecord(folder1_guid);
+ let record = yield store.createRecord(folder1_guid);
record.title = "New title for Folder 1";
store._childrenToOrder = {};
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
_("Verify that is has been marked as an orphan even though it couldn't be moved into itself.");
do_check_eq(PlacesUtils.annotations.getItemAnnotation(folder1_id, PARENT_ANNO),
folder1_guid);
} finally {
_("Clean up.");
- store.wipe();
- run_next_test();
+ yield store.wipe();
}
});
// Tests Bug 806460, in which query records arrive with empty folder
// names and missing bookmark URIs.
-add_test(function test_empty_query_doesnt_die() {
+add_task(function* test_empty_query_doesnt_die() {
let record = new BookmarkQuery("bookmarks", "8xoDGqKrXf1P");
record.folderName = "";
record.queryId = "";
record.parentName = "Toolbar";
record.parentid = "toolbar";
// These should not throw.
- store.applyIncoming(record);
+ yield store.applyIncoming(record);
delete record.folderName;
- store.applyIncoming(record);
-
- run_next_test();
+ yield store.applyIncoming(record);
});
function run_test() {
initTestLogging('Trace');
run_next_test();
}
--- a/services/sync/tests/unit/test_bookmark_tracker.js
+++ b/services/sync/tests/unit/test_bookmark_tracker.js
@@ -8,20 +8,23 @@ Cu.import("resource://services-sync/engi
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Service.engineManager.register(BookmarksEngine);
var engine = Service.engineManager.get("bookmarks");
var store = engine._store;
var tracker = engine._tracker;
-store.wipe();
-tracker.persistChangedIDs = false;
-function test_tracking() {
+add_task(function *() {
+ yield store.wipe();
+ tracker.persistChangedIDs = false;
+});
+
+add_task(function* test_tracking() {
_("Verify we've got an empty tracker to work with.");
let tracker = engine._tracker;
do_check_empty(tracker.changedIDs);
let folder = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.bookmarksMenuFolder,
"Test Folder", PlacesUtils.bookmarks.DEFAULT_INDEX);
function createBmk() {
@@ -46,39 +49,39 @@ function test_tracking() {
_("Notifying twice won't do any harm.");
Svc.Obs.notify("weave:engine:start-tracking");
createBmk();
do_check_attribute_count(tracker.changedIDs, 3);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 4);
_("Let's stop tracking again.");
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
tracker.resetScore();
Svc.Obs.notify("weave:engine:stop-tracking");
createBmk();
do_check_empty(tracker.changedIDs);
do_check_eq(tracker.score, 0);
_("Notifying twice won't do any harm.");
Svc.Obs.notify("weave:engine:stop-tracking");
createBmk();
do_check_empty(tracker.changedIDs);
do_check_eq(tracker.score, 0);
} finally {
_("Clean up.");
- store.wipe();
- tracker.clearChangedIDs();
+ yield store.wipe();
+ yield tracker.clearChangedIDs();
tracker.resetScore();
Svc.Obs.notify("weave:engine:stop-tracking");
}
-}
+});
-function test_onItemChanged() {
+add_task(function* test_onItemChanged() {
// Anno that's in ANNOS_TO_TRACK.
const DESCRIPTION_ANNO = "bookmarkProperties/description";
_("Verify we've got an empty tracker to work with.");
let tracker = engine._tracker;
do_check_empty(tracker.changedIDs);
do_check_eq(tracker.score, 0);
@@ -86,93 +89,91 @@ function test_onItemChanged() {
Svc.Obs.notify("weave:engine:stop-tracking");
let folder = PlacesUtils.bookmarks.createFolder(
PlacesUtils.bookmarks.bookmarksMenuFolder, "Parent",
PlacesUtils.bookmarks.DEFAULT_INDEX);
_("Track changes to annos.");
let b = PlacesUtils.bookmarks.insertBookmark(
folder, Utils.makeURI("http://getfirefox.com"),
PlacesUtils.bookmarks.DEFAULT_INDEX, "Get Firefox!");
- let bGUID = engine._store.GUIDForId(b);
+ let bGUID = yield engine._store.GUIDForId(b);
_("New item is " + b);
_("GUID: " + bGUID);
Svc.Obs.notify("weave:engine:start-tracking");
PlacesUtils.annotations.setItemAnnotation(
b, DESCRIPTION_ANNO, "A test description", 0,
PlacesUtils.annotations.EXPIRE_NEVER);
do_check_true(tracker.changedIDs[bGUID] > 0);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE);
} finally {
_("Clean up.");
- store.wipe();
- tracker.clearChangedIDs();
+ yield store.wipe();
+ yield tracker.clearChangedIDs();
tracker.resetScore();
Svc.Obs.notify("weave:engine:stop-tracking");
}
-}
+});
-function test_onItemMoved() {
+add_task(function* test_onItemMoved() {
_("Verify we've got an empty tracker to work with.");
let tracker = engine._tracker;
do_check_empty(tracker.changedIDs);
do_check_eq(tracker.score, 0);
try {
let fx_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.bookmarksMenuFolder,
Utils.makeURI("http://getfirefox.com"),
PlacesUtils.bookmarks.DEFAULT_INDEX,
"Get Firefox!");
- let fx_guid = engine._store.GUIDForId(fx_id);
+ let fx_guid = yield engine._store.GUIDForId(fx_id);
let tb_id = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.bookmarksMenuFolder,
Utils.makeURI("http://getthunderbird.com"),
PlacesUtils.bookmarks.DEFAULT_INDEX,
"Get Thunderbird!");
- let tb_guid = engine._store.GUIDForId(tb_id);
+ let tb_guid = yield engine._store.GUIDForId(tb_id);
Svc.Obs.notify("weave:engine:start-tracking");
// Moving within the folder will just track the folder.
PlacesUtils.bookmarks.moveItem(
tb_id, PlacesUtils.bookmarks.bookmarksMenuFolder, 0);
do_check_true(tracker.changedIDs['menu'] > 0);
do_check_eq(tracker.changedIDs['toolbar'], undefined);
do_check_eq(tracker.changedIDs[fx_guid], undefined);
do_check_eq(tracker.changedIDs[tb_guid], undefined);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE);
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
tracker.resetScore();
// Moving a bookmark to a different folder will track the old
// folder, the new folder and the bookmark.
PlacesUtils.bookmarks.moveItem(tb_id, PlacesUtils.bookmarks.toolbarFolder,
PlacesUtils.bookmarks.DEFAULT_INDEX);
do_check_true(tracker.changedIDs['menu'] > 0);
do_check_true(tracker.changedIDs['toolbar'] > 0);
do_check_eq(tracker.changedIDs[fx_guid], undefined);
do_check_true(tracker.changedIDs[tb_guid] > 0);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 3);
} finally {
_("Clean up.");
- store.wipe();
- tracker.clearChangedIDs();
+ yield store.wipe();
+ yield tracker.clearChangedIDs();
tracker.resetScore();
Svc.Obs.notify("weave:engine:stop-tracking");
}
-}
+});
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Bookmarks").level = Log.Level.Trace;
Log.repository.getLogger("Sync.Store.Bookmarks").level = Log.Level.Trace;
Log.repository.getLogger("Sync.Tracker.Bookmarks").level = Log.Level.Trace;
- test_tracking();
- test_onItemChanged();
- test_onItemMoved();
+ run_next_test();
}
--- a/services/sync/tests/unit/test_clients_engine.js
+++ b/services/sync/tests/unit/test_clients_engine.js
@@ -32,17 +32,17 @@ function check_record_version(user, id)
_("Payload is " + JSON.stringify(cleartext));
do_check_eq(Services.appinfo.version, cleartext.version);
do_check_eq(2, cleartext.protocols.length);
do_check_eq("1.1", cleartext.protocols[0]);
do_check_eq("1.5", cleartext.protocols[1]);
}
-add_test(function test_bad_hmac() {
+add_task(function* test_bad_hmac() {
_("Ensure that Clients engine deletes corrupt records.");
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
let deletedCollections = [];
@@ -68,137 +68,136 @@ add_test(function test_bad_hmac() {
}
function check_client_deleted(id) {
let coll = user.collection("clients");
let wbo = coll.wbo(id);
return !wbo || !wbo.payload;
}
- function uploadNewKeys() {
+ function* uploadNewKeys() {
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
- do_check_true(serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success);
+ do_check_true((yield serverKeys.upload(Service.resource(Service.cryptoKeysURL))).success);
}
try {
ensureLegacyIdentityManager();
let passphrase = "abcdeabcdeabcdeabcdeabcdea";
Service.serverURL = server.baseURI;
- Service.login("foo", "ilovejane", passphrase);
+ yield Service.login("foo", "ilovejane", passphrase);
generateNewKeys(Service.collectionKeys);
_("First sync, client record is uploaded");
do_check_eq(engine.lastRecordUpload, 0);
check_clients_count(0);
- engine._sync();
+ yield engine._sync();
check_clients_count(1);
do_check_true(engine.lastRecordUpload > 0);
// Our uploaded record has a version.
check_record_version(user, engine.localID);
// Initial setup can wipe the server, so clean up.
deletedCollections = [];
deletedItems = [];
_("Change our keys and our client ID, reupload keys.");
let oldLocalID = engine.localID; // Preserve to test for deletion!
engine.localID = Utils.makeGUID();
- engine.resetClient();
+ yield engine.resetClient();
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
- do_check_true(serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success);
+ do_check_true((yield serverKeys.upload(Service.resource(Service.cryptoKeysURL))).success);
_("Sync.");
- engine._sync();
+ yield engine._sync();
_("Old record " + oldLocalID + " was deleted, new one uploaded.");
check_clients_count(1);
check_client_deleted(oldLocalID);
_("Now change our keys but don't upload them. " +
"That means we get an HMAC error but redownload keys.");
Service.lastHMACEvent = 0;
engine.localID = Utils.makeGUID();
- engine.resetClient();
+ yield engine.resetClient();
generateNewKeys(Service.collectionKeys);
deletedCollections = [];
deletedItems = [];
check_clients_count(1);
- engine._sync();
+ yield engine._sync();
_("Old record was not deleted, new one uploaded.");
do_check_eq(deletedCollections.length, 0);
do_check_eq(deletedItems.length, 0);
check_clients_count(2);
_("Now try the scenario where our keys are wrong *and* there's a bad record.");
// Clean up and start fresh.
user.collection("clients")._wbos = {};
Service.lastHMACEvent = 0;
engine.localID = Utils.makeGUID();
engine.resetClient();
deletedCollections = [];
deletedItems = [];
check_clients_count(0);
- uploadNewKeys();
+ yield uploadNewKeys();
// Sync once to upload a record.
- engine._sync();
+ yield engine._sync();
check_clients_count(1);
// Generate and upload new keys, so the old client record is wrong.
- uploadNewKeys();
+ yield uploadNewKeys();
// Create a new client record and new keys. Now our keys are wrong, as well
// as the object on the server. We'll download the new keys and also delete
// the bad client record.
oldLocalID = engine.localID; // Preserve to test for deletion!
engine.localID = Utils.makeGUID();
- engine.resetClient();
+ yield engine.resetClient();
generateNewKeys(Service.collectionKeys);
let oldKey = Service.collectionKeys.keyForCollection();
do_check_eq(deletedCollections.length, 0);
do_check_eq(deletedItems.length, 0);
- engine._sync();
+ yield engine._sync();
do_check_eq(deletedItems.length, 1);
check_client_deleted(oldLocalID);
check_clients_count(1);
let newKey = Service.collectionKeys.keyForCollection();
do_check_false(oldKey.equals(newKey));
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield server.promiseStop();
}
});
-add_test(function test_properties() {
+add_task(function* test_properties() {
_("Test lastRecordUpload property");
try {
do_check_eq(Svc.Prefs.get("clients.lastRecordUpload"), undefined);
do_check_eq(engine.lastRecordUpload, 0);
let now = Date.now();
engine.lastRecordUpload = now / 1000;
do_check_eq(engine.lastRecordUpload, Math.floor(now / 1000));
} finally {
Svc.Prefs.resetBranch("");
- run_next_test();
}
});
-add_test(function test_sync() {
+add_task(function* test_sync() {
_("Ensure that Clients engine uploads a new client record once a week.");
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
@@ -212,109 +211,105 @@ add_test(function test_sync() {
return user.collection("clients").wbo(engine.localID);
}
try {
_("First sync. Client record is uploaded.");
do_check_eq(clientWBO(), undefined);
do_check_eq(engine.lastRecordUpload, 0);
- engine._sync();
+ yield engine._sync();
do_check_true(!!clientWBO().payload);
do_check_true(engine.lastRecordUpload > 0);
_("Let's time travel more than a week back, new record should've been uploaded.");
engine.lastRecordUpload -= MORE_THAN_CLIENTS_TTL_REFRESH;
let lastweek = engine.lastRecordUpload;
clientWBO().payload = undefined;
- engine._sync();
+ yield engine._sync();
do_check_true(!!clientWBO().payload);
do_check_true(engine.lastRecordUpload > lastweek);
_("Remove client record.");
- engine.removeClientData();
+ yield engine.removeClientData();
do_check_eq(clientWBO().payload, undefined);
_("Time travel one day back, no record uploaded.");
engine.lastRecordUpload -= LESS_THAN_CLIENTS_TTL_REFRESH;
let yesterday = engine.lastRecordUpload;
- engine._sync();
+ yield engine._sync();
do_check_eq(clientWBO().payload, undefined);
do_check_eq(engine.lastRecordUpload, yesterday);
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield server.promiseStop();
}
});
-add_test(function test_client_name_change() {
+add_task(function* test_client_name_change() {
_("Ensure client name change incurs a client record update.");
let tracker = engine._tracker;
let localID = engine.localID;
let initialName = engine.localName;
Svc.Obs.notify("weave:engine:start-tracking");
_("initial name: " + initialName);
// Tracker already has data, so clear it.
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
let initialScore = tracker.score;
do_check_eq(Object.keys(tracker.changedIDs).length, 0);
Svc.Prefs.set("client.name", "new name");
_("new name: " + engine.localName);
do_check_neq(initialName, engine.localName);
do_check_eq(Object.keys(tracker.changedIDs).length, 1);
do_check_true(engine.localID in tracker.changedIDs);
do_check_true(tracker.score > initialScore);
do_check_true(tracker.score >= SCORE_INCREMENT_XLARGE);
Svc.Obs.notify("weave:engine:stop-tracking");
-
- run_next_test();
});
-add_test(function test_send_command() {
+add_task(function* test_send_command() {
_("Verifies _sendCommandToClient puts commands in the outbound queue.");
let store = engine._store;
let tracker = engine._tracker;
let remoteId = Utils.makeGUID();
let rec = new ClientsRec("clients", remoteId);
- store.create(rec);
- let remoteRecord = store.createRecord(remoteId, "clients");
+ yield store.create(rec);
+ let remoteRecord = yield store.createRecord(remoteId, "clients");
let action = "testCommand";
let args = ["foo", "bar"];
- engine._sendCommandToClient(action, args, remoteId);
+ yield engine._sendCommandToClient(action, args, remoteId);
let newRecord = store._remoteClients[remoteId];
do_check_neq(newRecord, undefined);
do_check_eq(newRecord.commands.length, 1);
let command = newRecord.commands[0];
do_check_eq(command.command, action);
do_check_eq(command.args.length, 2);
do_check_eq(command.args, args);
do_check_neq(tracker.changedIDs[remoteId], undefined);
-
- run_next_test();
});
-add_test(function test_command_validation() {
+add_task(function* test_command_validation() {
_("Verifies that command validation works properly.");
let store = engine._store;
let testCommands = [
["resetAll", [], true ],
["resetAll", ["foo"], false],
["resetEngine", ["tabs"], true ],
@@ -327,20 +322,20 @@ add_test(function test_command_validatio
["logout", ["foo"], false],
["__UNKNOWN__", [], false]
];
for each (let [action, args, expectedResult] in testCommands) {
let remoteId = Utils.makeGUID();
let rec = new ClientsRec("clients", remoteId);
- store.create(rec);
- store.createRecord(remoteId, "clients");
+ yield store.create(rec);
+ yield store.createRecord(remoteId, "clients");
- engine.sendCommand(action, args, remoteId);
+ yield engine.sendCommand(action, args, remoteId);
let newRecord = store._remoteClients[remoteId];
do_check_neq(newRecord, undefined);
if (expectedResult) {
_("Ensuring command is sent: " + action);
do_check_eq(newRecord.commands.length, 1);
@@ -355,91 +350,81 @@ add_test(function test_command_validatio
do_check_eq(newRecord.commands, undefined);
if (store._tracker) {
do_check_eq(engine._tracker[remoteId], undefined);
}
}
}
- run_next_test();
});
-add_test(function test_command_duplication() {
+add_task(function* test_command_duplication() {
_("Ensures duplicate commands are detected and not added");
let store = engine._store;
let remoteId = Utils.makeGUID();
let rec = new ClientsRec("clients", remoteId);
- store.create(rec);
- store.createRecord(remoteId, "clients");
+ yield store.create(rec);
+ yield store.createRecord(remoteId, "clients");
let action = "resetAll";
let args = [];
- engine.sendCommand(action, args, remoteId);
- engine.sendCommand(action, args, remoteId);
+ yield engine.sendCommand(action, args, remoteId);
+ yield engine.sendCommand(action, args, remoteId);
let newRecord = store._remoteClients[remoteId];
do_check_eq(newRecord.commands.length, 1);
_("Check variant args length");
newRecord.commands = [];
action = "resetEngine";
- engine.sendCommand(action, [{ x: "foo" }], remoteId);
- engine.sendCommand(action, [{ x: "bar" }], remoteId);
+ yield engine.sendCommand(action, [{ x: "foo" }], remoteId);
+ yield engine.sendCommand(action, [{ x: "bar" }], remoteId);
_("Make sure we spot a real dupe argument.");
- engine.sendCommand(action, [{ x: "bar" }], remoteId);
+ yield engine.sendCommand(action, [{ x: "bar" }], remoteId);
do_check_eq(newRecord.commands.length, 2);
-
- run_next_test();
});
-add_test(function test_command_invalid_client() {
+add_task(function* test_command_invalid_client() {
_("Ensures invalid client IDs are caught");
let id = Utils.makeGUID();
let error;
try {
- engine.sendCommand("wipeAll", [], id);
+ yield engine.sendCommand("wipeAll", [], id);
} catch (ex) {
error = ex;
}
do_check_eq(error.message.indexOf("Unknown remote client ID: "), 0);
-
- run_next_test();
});
-add_test(function test_process_incoming_commands() {
+add_task(function* test_process_incoming_commands() {
_("Ensures local commands are executed");
engine.localCommands = [{ command: "logout", args: [] }];
let ev = "weave:service:logout:finish";
-
- var handler = function() {
- Svc.Obs.remove(ev, handler);
- run_next_test();
- };
-
- Svc.Obs.add(ev, handler);
+ let promiseHandlerCalled = promiseOneObserver(ev);
// logout command causes processIncomingCommands to return explicit false.
- do_check_false(engine.processIncomingCommands());
+ do_check_false(yield engine.processIncomingCommands());
+ yield promiseHandlerCalled;
});
-add_test(function test_command_sync() {
+add_task(function* test_command_sync() {
_("Ensure that commands are synced across clients.");
- engine._store.wipe();
+ yield engine._store.wipe();
generateNewKeys(Service.collectionKeys);
let contents = {
meta: {global: {engines: {clients: {version: engine.version,
syncID: engine.syncID}}}},
clients: {},
crypto: {}
};
@@ -450,70 +435,70 @@ add_test(function test_command_sync() {
let remoteId = Utils.makeGUID();
function clientWBO(id) {
return user.collection("clients").wbo(id);
}
_("Create remote client record");
let rec = new ClientsRec("clients", remoteId);
- engine._store.create(rec);
- let remoteRecord = engine._store.createRecord(remoteId, "clients");
- engine.sendCommand("wipeAll", []);
+ yield engine._store.create(rec);
+ let remoteRecord = yield engine._store.createRecord(remoteId, "clients");
+ yield engine.sendCommand("wipeAll", []);
let clientRecord = engine._store._remoteClients[remoteId];
do_check_neq(clientRecord, undefined);
do_check_eq(clientRecord.commands.length, 1);
try {
_("Syncing.");
- engine._sync();
+ yield engine._sync();
_("Checking record was uploaded.");
do_check_neq(clientWBO(engine.localID).payload, undefined);
do_check_true(engine.lastRecordUpload > 0);
do_check_neq(clientWBO(remoteId).payload, undefined);
Svc.Prefs.set("client.GUID", remoteId);
- engine._resetClient();
+ yield engine._resetClient();
do_check_eq(engine.localID, remoteId);
_("Performing sync on resetted client.");
- engine._sync();
+ yield engine._sync();
do_check_neq(engine.localCommands, undefined);
do_check_eq(engine.localCommands.length, 1);
let command = engine.localCommands[0];
do_check_eq(command.command, "wipeAll");
do_check_eq(command.args.length, 0);
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield server.promiseStop();
}
});
-add_test(function test_send_uri_to_client_for_display() {
+add_task(function test_send_uri_to_client_for_display() {
_("Ensure sendURIToClientForDisplay() sends command properly.");
let tracker = engine._tracker;
let store = engine._store;
let remoteId = Utils.makeGUID();
let rec = new ClientsRec("clients", remoteId);
rec.name = "remote";
- store.create(rec);
- let remoteRecord = store.createRecord(remoteId, "clients");
+ yield store.create(rec);
+ let remoteRecord = yield store.createRecord(remoteId, "clients");
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
let initialScore = tracker.score;
let uri = "http://www.mozilla.org/";
let title = "Title of the Page";
- engine.sendURIToClientForDisplay(uri, remoteId, title);
+ yield engine.sendURIToClientForDisplay(uri, remoteId, title);
let newRecord = store._remoteClients[remoteId];
do_check_neq(newRecord, undefined);
do_check_eq(newRecord.commands.length, 1);
let command = newRecord.commands[0];
do_check_eq(command.command, "displayURI");
@@ -525,27 +510,25 @@ add_test(function test_send_uri_to_clien
do_check_true(tracker.score > initialScore);
do_check_true(tracker.score - initialScore >= SCORE_INCREMENT_XLARGE);
_("Ensure unknown client IDs result in exception.");
let unknownId = Utils.makeGUID();
let error;
try {
- engine.sendURIToClientForDisplay(uri, unknownId);
+ yield engine.sendURIToClientForDisplay(uri, unknownId);
} catch (ex) {
error = ex;
}
do_check_eq(error.message.indexOf("Unknown remote client ID: "), 0);
-
- run_next_test();
});
-add_test(function test_receive_display_uri() {
+add_task(function* test_receive_display_uri() {
_("Ensure processing of received 'displayURI' commands works.");
// We don't set up WBOs and perform syncing because other tests verify
// the command API works as advertised. This saves us a little work.
let uri = "http://www.mozilla.org/";
let remoteId = Utils.makeGUID();
let title = "Page Title!";
@@ -554,39 +537,31 @@ add_test(function test_receive_display_u
command: "displayURI",
args: [uri, remoteId, title],
};
engine.localCommands = [command];
// Received 'displayURI' command should result in the topic defined below
// being called.
- let ev = "weave:engine:clients:display-uri";
-
- let handler = function(subject, data) {
- Svc.Obs.remove(ev, handler);
+ let promiseHandlerCalled = promiseOneObserver("weave:engine:clients:display-uri");
- do_check_eq(subject.uri, uri);
- do_check_eq(subject.client, remoteId);
- do_check_eq(subject.title, title);
- do_check_eq(data, null);
-
- run_next_test();
- };
-
- Svc.Obs.add(ev, handler);
-
- do_check_true(engine.processIncomingCommands());
+ do_check_true(yield engine.processIncomingCommands());
+ let { subject, data } = yield promiseHandlerCalled;
+ do_check_eq(subject.uri, uri);
+ do_check_eq(subject.client, remoteId);
+ do_check_eq(subject.title, title);
+ do_check_eq(data, null);
});
-add_test(function test_optional_client_fields() {
+add_task(function* test_optional_client_fields() {
_("Ensure that we produce records with the fields added in Bug 1097222.");
const SUPPORTED_PROTOCOL_VERSIONS = ["1.1", "1.5"];
- let local = engine._store.createRecord(engine.localID, "clients");
+ let local = yield engine._store.createRecord(engine.localID, "clients");
do_check_eq(local.name, engine.localName);
do_check_eq(local.type, engine.localType);
do_check_eq(local.version, Services.appinfo.version);
do_check_array_eq(local.protocols, SUPPORTED_PROTOCOL_VERSIONS);
// Optional fields.
// Make sure they're what they ought to be...
do_check_eq(local.os, Services.appinfo.OS);
@@ -594,17 +569,15 @@ add_test(function test_optional_client_f
// ... and also that they're non-empty.
do_check_true(!!local.os);
do_check_true(!!local.appPackage);
do_check_true(!!local.application);
// We don't currently populate device or formfactor.
// See Bug 1100722, Bug 1100723.
-
- run_next_test();
});
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Clients").level = Log.Level.Trace;
run_next_test();
}
--- a/services/sync/tests/unit/test_clients_escape.js
+++ b/services/sync/tests/unit/test_clients_escape.js
@@ -8,32 +8,36 @@ Cu.import("resource://services-sync/util
Cu.import("resource://testing-common/services/sync/utils.js");
function run_test() {
_("Set up test fixtures.");
ensureLegacyIdentityManager();
Service.identity.username = "john@example.com";
Service.clusterURL = "http://fakebase/";
+ run_next_test();
+}
+
+add_task(function* () {
let baseUri = "http://fakebase/1.1/foo/storage/";
let pubUri = baseUri + "keys/pubkey";
let privUri = baseUri + "keys/privkey";
Service.identity.syncKey = "abcdeabcdeabcdeabcdeabcdea";
let keyBundle = Service.identity.syncKeyBundle;
let engine = Service.clientsEngine;
try {
_("Test that serializing client records results in uploadable ascii");
engine.localID = "ascii";
engine.localName = "wéävê";
_("Make sure we have the expected record");
- let record = engine._createRecord("ascii");
+ let record = yield engine._createRecord("ascii");
do_check_eq(record.id, "ascii");
do_check_eq(record.name, "wéävê");
_("Encrypting record...");
record.encrypt(keyBundle);
_("Encrypted.");
let serialized = JSON.stringify(record);
@@ -50,15 +54,15 @@ function run_test() {
do_check_eq(checkCount, serialized.length);
_("Making sure the record still looks like it did before");
record.decrypt(keyBundle);
do_check_eq(record.id, "ascii");
do_check_eq(record.name, "wéävê");
_("Sanity check that creating the record also gives the same");
- record = engine._createRecord("ascii");
+ record = yield engine._createRecord("ascii");
do_check_eq(record.id, "ascii");
do_check_eq(record.name, "wéävê");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
--- a/services/sync/tests/unit/test_collection_inc_get.js
+++ b/services/sync/tests/unit/test_collection_inc_get.js
@@ -1,16 +1,20 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
_("Make sure Collection can correctly incrementally parse GET requests");
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/service.js");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let base = "http://fake/";
let coll = new Collection("http://fake/uri/", WBORecord, Service);
let stream = { _data: "" };
let called, recCount, sum;
_("Not-JSON, string payloads are strings");
called = false;
stream._data = '{"id":"hello","payload":"world"}\n';
@@ -180,9 +184,9 @@ function run_test() {
coll._onProgress.call(stream);
_("should still have 3 records with sum 111");
do_check_eq(recCount, 3);
do_check_eq(sum, 111);
_("should have consumed nothing but still have nothing");
do_check_eq(stream._data, "");
do_check_false(called);
_("\n");
-}
+});
--- a/services/sync/tests/unit/test_collections_recovery.js
+++ b/services/sync/tests/unit/test_collections_recovery.js
@@ -38,45 +38,43 @@ add_identity_test(this, function test_mi
johnU(coll, new ServerCollection({}, true).handler());
}
let server = httpd_setup(handlers);
Service.serverURL = server.baseURI;
try {
let fresh = 0;
let orig = Service._freshStart;
- Service._freshStart = function() {
+ Service._freshStart = Task.async(function*() {
_("Called _freshStart.");
- orig.call(Service);
+ yield orig.call(Service);
fresh++;
- };
+ });
_("Startup, no meta/global: freshStart called once.");
- Service.sync();
+ yield Service.sync();
do_check_eq(fresh, 1);
fresh = 0;
_("Regular sync: no need to freshStart.");
- Service.sync();
+ yield Service.sync();
do_check_eq(fresh, 0);
_("Simulate a bad info/collections.");
delete johnColls.crypto;
- Service.sync();
+ yield Service.sync();
do_check_eq(fresh, 1);
fresh = 0;
_("Regular sync: no need to freshStart.");
- Service.sync();
+ yield Service.sync();
do_check_eq(fresh, 0);
} finally {
Svc.Prefs.resetBranch("");
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- yield deferred.promise;
+ yield promiseStopServer(server);
}
});
function run_test() {
initTestLogging("Trace");
run_next_test();
}
--- a/services/sync/tests/unit/test_corrupt_keys.js
+++ b/services/sync/tests/unit/test_corrupt_keys.js
@@ -14,20 +14,20 @@ Cu.import("resource://services-sync/util
Cu.import("resource://testing-common/services/sync/utils.js");
Cu.import("resource://gre/modules/Promise.jsm");
add_task(function test_locally_changed_keys() {
let passphrase = "abcdeabcdeabcdeabcdeabcdea";
let hmacErrorCount = 0;
function counting(f) {
- return function() {
+ return Task.async(function*() {
hmacErrorCount++;
- return f.call(this);
- };
+ return (yield f.call(this));
+ });
}
Service.handleHMACEvent = counting(Service.handleHMACEvent);
let server = new SyncServer();
let johndoe = server.registerUser("johndoe", "password");
johndoe.createContents({
meta: {},
@@ -74,24 +74,24 @@ add_task(function test_locally_changed_k
m.upload(Service.resource(Service.metaURL));
_("New meta/global: " + JSON.stringify(johndoe.collection("meta").wbo("global")));
// Upload keys.
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
- do_check_true(serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success);
+ do_check_true((yield serverKeys.upload(Service.resource(Service.cryptoKeysURL))).success);
// Check that login works.
- do_check_true(Service.login("johndoe", "ilovejane", passphrase));
+ do_check_true(yield Service.login("johndoe", "ilovejane", passphrase));
do_check_true(Service.isLoggedIn);
// Sync should upload records.
- Service.sync();
+ yield Service.sync();
// Tabs exist.
_("Tabs modified: " + johndoe.modified("tabs"));
do_check_true(johndoe.modified("tabs") > 0);
let coll_modified = Service.collectionKeys.lastModified;
// Let's create some server side history records.
@@ -120,36 +120,36 @@ add_task(function test_locally_changed_k
}
history.timestamp = Date.now() / 1000;
let old_key_time = johndoe.modified("crypto");
_("Old key time: " + old_key_time);
// Check that we can decrypt one.
let rec = new CryptoWrapper("history", "record-no--0");
- rec.fetch(Service.resource(Service.storageURL + "history/record-no--0"));
+ yield rec.fetch(Service.resource(Service.storageURL + "history/record-no--0"));
_(JSON.stringify(rec));
do_check_true(!!rec.decrypt(liveKeys));
do_check_eq(hmacErrorCount, 0);
// Fill local key cache with bad data.
corrupt_local_keys();
_("Keys now: " + Service.collectionKeys.keyForCollection("history").keyPair);
do_check_eq(hmacErrorCount, 0);
_("HMAC error count: " + hmacErrorCount);
// Now syncing should succeed, after one HMAC error.
- Service.sync();
+ yield Service.sync();
do_check_eq(hmacErrorCount, 1);
_("Keys now: " + Service.collectionKeys.keyForCollection("history").keyPair);
// And look! We downloaded history!
- let store = Service.engineManager.get("history")._store;
+ let store = (yield Service.engineManager.get("history"))._store;
do_check_true(yield promiseIsURIVisited("http://foo/bar?record-no--0"));
do_check_true(yield promiseIsURIVisited("http://foo/bar?record-no--1"));
do_check_true(yield promiseIsURIVisited("http://foo/bar?record-no--2"));
do_check_true(yield promiseIsURIVisited("http://foo/bar?record-no--3"));
do_check_true(yield promiseIsURIVisited("http://foo/bar?record-no--4"));
do_check_eq(hmacErrorCount, 1);
_("Busting some new server values.");
@@ -178,31 +178,29 @@ add_task(function test_locally_changed_k
_("Server key time hasn't changed.");
do_check_eq(johndoe.modified("crypto"), old_key_time);
_("Resetting HMAC error timer.");
Service.lastHMACEvent = 0;
_("Syncing...");
- Service.sync();
+ yield Service.sync();
_("Keys now: " + Service.collectionKeys.keyForCollection("history").keyPair);
_("Server keys have been updated, and we skipped over 5 more HMAC errors without adjusting history.");
do_check_true(johndoe.modified("crypto") > old_key_time);
do_check_eq(hmacErrorCount, 6);
do_check_false(yield promiseIsURIVisited("http://foo/bar?record-no--5"));
do_check_false(yield promiseIsURIVisited("http://foo/bar?record-no--6"));
do_check_false(yield promiseIsURIVisited("http://foo/bar?record-no--7"));
do_check_false(yield promiseIsURIVisited("http://foo/bar?record-no--8"));
do_check_false(yield promiseIsURIVisited("http://foo/bar?record-no--9"));
} finally {
Svc.Prefs.resetBranch("");
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- yield deferred.promise;
+ yield server.promiseStop();
}
});
function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
ensureLegacyIdentityManager();
--- a/services/sync/tests/unit/test_engine.js
+++ b/services/sync/tests/unit/test_engine.js
@@ -1,26 +1,27 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://services-common/observers.js");
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
+Cu.import("resource://gre/modules/PromiseUtils.jsm");
function SteamStore(engine) {
Store.call(this, "Steam", engine);
this.wasWiped = false;
}
SteamStore.prototype = {
__proto__: Store.prototype,
- wipe: function() {
+ wipe: Task.async(function*() {
this.wasWiped = true;
- }
+ }),
};
function SteamTracker(name, engine) {
Tracker.call(this, name || "Steam", engine);
}
SteamTracker.prototype = {
__proto__: Tracker.prototype
};
@@ -30,23 +31,23 @@ function SteamEngine(name, service) {
this.wasReset = false;
this.wasSynced = false;
}
SteamEngine.prototype = {
__proto__: Engine.prototype,
_storeObj: SteamStore,
_trackerObj: SteamTracker,
- _resetClient: function () {
+ _resetClient: Task.async(function* () {
this.wasReset = true;
- },
+ }),
- _sync: function () {
+ _sync: Task.async(function* () {
this.wasSynced = true;
- }
+ }),
};
var engineObserver = {
topics: [],
observe: function(subject, topic, data) {
do_check_eq(data, "steam");
this.topics.push(topic);
@@ -90,67 +91,69 @@ add_test(function test_score() {
// Setting an attribute that has a getter produces an error in
// Firefox <= 3.6 and is ignored in later versions. Either way,
// the attribute's value won't change.
}
do_check_eq(engine.score, 5);
run_next_test();
});
-add_test(function test_resetClient() {
+add_task(function test_resetClient() {
_("Engine.resetClient calls _resetClient");
let engine = new SteamEngine("Steam", Service);
do_check_false(engine.wasReset);
- engine.resetClient();
+ yield engine.resetClient();
do_check_true(engine.wasReset);
do_check_eq(engineObserver.topics[0], "weave:engine:reset-client:start");
do_check_eq(engineObserver.topics[1], "weave:engine:reset-client:finish");
engine.wasReset = false;
engineObserver.reset();
- run_next_test();
});
-add_test(function test_invalidChangedIDs() {
+add_task(function test_invalidChangedIDs() {
_("Test that invalid changed IDs on disk don't end up live.");
let engine = new SteamEngine("Steam", Service);
let tracker = engine._tracker;
tracker.changedIDs = 5;
- tracker.saveChangedIDs(function onSaved() {
+ let deferred = PromiseUtils.defer();
+ yield tracker.saveChangedIDs(function onSaved() {
tracker.changedIDs = {placeholder: true};
- tracker.loadChangedIDs(function onLoaded(json) {
- do_check_null(json);
- do_check_true(tracker.changedIDs.placeholder);
- run_next_test();
- });
+ tracker.loadChangedIDs().then(
+ json => {
+ do_check_null(json);
+ do_check_true(tracker.changedIDs.placeholder);
+ deferred.resolve();
+ }
+ );
});
+ yield deferred.promise;
});
-add_test(function test_wipeClient() {
+add_task(function* test_wipeClient() {
_("Engine.wipeClient calls resetClient, wipes store, clears changed IDs");
let engine = new SteamEngine("Steam", Service);
do_check_false(engine.wasReset);
do_check_false(engine._store.wasWiped);
- do_check_true(engine._tracker.addChangedID("a-changed-id"));
+ do_check_true(yield engine._tracker.addChangedID("a-changed-id"));
do_check_true("a-changed-id" in engine._tracker.changedIDs);
- engine.wipeClient();
+ yield engine.wipeClient();
do_check_true(engine.wasReset);
do_check_true(engine._store.wasWiped);
do_check_eq(JSON.stringify(engine._tracker.changedIDs), "{}");
do_check_eq(engineObserver.topics[0], "weave:engine:wipe-client:start");
do_check_eq(engineObserver.topics[1], "weave:engine:reset-client:start");
do_check_eq(engineObserver.topics[2], "weave:engine:reset-client:finish");
do_check_eq(engineObserver.topics[3], "weave:engine:wipe-client:finish");
engine.wasReset = false;
engine._store.wasWiped = false;
engineObserver.reset();
- run_next_test();
});
add_test(function test_enabled() {
_("Engine.enabled corresponds to preference");
let engine = new SteamEngine("Steam", Service);
try {
do_check_false(engine.enabled);
Svc.Prefs.set("engine.steam", true);
@@ -159,40 +162,39 @@ add_test(function test_enabled() {
engine.enabled = false;
do_check_false(Svc.Prefs.get("engine.steam"));
run_next_test();
} finally {
Svc.Prefs.resetBranch("");
}
});
-add_test(function test_sync() {
+add_task(function* test_sync() {
let engine = new SteamEngine("Steam", Service);
try {
_("Engine.sync doesn't call _sync if it's not enabled");
do_check_false(engine.enabled);
do_check_false(engine.wasSynced);
- engine.sync();
+ yield engine.sync();
do_check_false(engine.wasSynced);
_("Engine.sync calls _sync if it's enabled");
engine.enabled = true;
- engine.sync();
+ yield engine.sync();
do_check_true(engine.wasSynced);
do_check_eq(engineObserver.topics[0], "weave:engine:sync:start");
do_check_eq(engineObserver.topics[1], "weave:engine:sync:finish");
- run_next_test();
} finally {
Svc.Prefs.resetBranch("");
engine.wasSynced = false;
engineObserver.reset();
}
});
-add_test(function test_disabled_no_track() {
+add_task(function test_disabled_no_track() {
_("When an engine is disabled, its tracker is not tracking.");
let engine = new SteamEngine("Steam", Service);
let tracker = engine._tracker;
do_check_eq(engine, tracker.engine);
do_check_false(engine.enabled);
do_check_false(tracker._isTracking);
do_check_empty(tracker.changedIDs);
@@ -202,16 +204,14 @@ add_test(function test_disabled_no_track
do_check_false(tracker._isTracking);
do_check_empty(tracker.changedIDs);
engine.enabled = true;
tracker.observe(null, "weave:engine:start-tracking", null);
do_check_true(tracker._isTracking);
do_check_empty(tracker.changedIDs);
- tracker.addChangedID("abcdefghijkl");
+ yield tracker.addChangedID("abcdefghijkl");
do_check_true(0 < tracker.changedIDs["abcdefghijkl"]);
Svc.Prefs.set("engine." + engine.prefName, false);
do_check_false(tracker._isTracking);
do_check_empty(tracker.changedIDs);
-
- run_next_test();
});
--- a/services/sync/tests/unit/test_engine_abort.js
+++ b/services/sync/tests/unit/test_engine_abort.js
@@ -3,17 +3,17 @@
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/rotaryengine.js");
Cu.import("resource://testing-common/services/sync/utils.js");
-add_test(function test_processIncoming_abort() {
+add_task(function* test_processIncoming_abort() {
_("An abort exception, raised in applyIncoming, will abort _processIncoming.");
let engine = new RotaryEngine(Service);
let collection = new ServerCollection();
let id = Utils.makeGUID();
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
@@ -35,35 +35,35 @@ add_test(function test_processIncoming_a
cause: "Nooo"};
_("Throwing: " + JSON.stringify(ex));
throw ex;
};
_("Trying _processIncoming. It will throw after aborting.");
let err;
try {
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
} catch (ex) {
err = ex;
}
do_check_eq(err, "Nooo");
err = undefined;
_("Trying engine.sync(). It will abort without error.");
try {
// This will quietly fail.
- engine.sync();
+ yield engine.sync();
} catch (ex) {
err = ex;
}
do_check_eq(err, undefined);
- server.stop(run_next_test);
+ yield promiseStopServer(server);
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
});
function run_test() {
run_next_test();
}
--- a/services/sync/tests/unit/test_errorhandler.js
+++ b/services/sync/tests/unit/test_errorhandler.js
@@ -6,16 +6,17 @@ Cu.import("resource://services-sync/cons
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/keys.js");
Cu.import("resource://services-sync/policies.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/status.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
Cu.import("resource://gre/modules/FileUtils.jsm");
+Cu.import("resource://gre/modules/PromiseUtils.jsm");
var fakeServer = new SyncServer();
fakeServer.start();
do_register_cleanup(function() {
return new Promise(resolve => {
fakeServer.stop(resolve);
});
@@ -39,23 +40,21 @@ function setLastSync(lastSyncValue) {
function CatapultEngine() {
SyncEngine.call(this, "Catapult", Service);
}
CatapultEngine.prototype = {
__proto__: SyncEngine.prototype,
exception: null, // tests fill this in
_sync: function _sync() {
- if (this.exception) {
- throw this.exception;
- }
+ return this.exception ? Promise.reject(this.exception) : Promise.resolve();
}
};
-var engineManager = Service.engineManager;
+let engineManager = Service.engineManager;
engineManager.register(CatapultEngine);
// This relies on Service/ErrorHandler being a singleton. Fixing this will take
// a lot of work.
var errorHandler = Service.errorHandler;
function run_test() {
initTestLogging("Trace");
@@ -70,17 +69,17 @@ function run_test() {
}
function generateCredentialsChangedFailure() {
// Make sync fail due to changed credentials. We simply re-encrypt
// the keys with a different Sync Key, without changing the local one.
let newSyncKeyBundle = new SyncKeyBundle("johndoe", "23456234562345623456234562");
let keys = Service.collectionKeys.asWBO();
keys.encrypt(newSyncKeyBundle);
- keys.upload(Service.resource(Service.cryptoKeysURL));
+ yield keys.upload(Service.resource(Service.cryptoKeysURL));
}
function service_unavailable(request, response) {
let body = "Service Unavailable";
response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
response.setHeader("Retry-After", "42");
response.bodyOutputStream.write(body, body.length);
}
@@ -146,94 +145,80 @@ function setUp(server) {
() => generateAndUploadKeys()
);
}
function generateAndUploadKeys() {
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
- return serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success;
+ return serverKeys.upload(Service.resource(Service.cryptoKeysURL)).then(
+ result => result.success
+ );
}
function clean() {
- Service.startOver();
- Status.resetSync();
- Status.resetBackoff();
- errorHandler.didReportProlongedError = false;
+ return Service.startOver().then(
+ () => {
+ Status.resetSync();
+ Status.resetBackoff();
+ errorHandler.didReportProlongedError = false;
+ }
+ );
}
add_identity_test(this, function test_401_logout() {
let server = sync_httpd_setup();
yield setUp(server);
// By calling sync, we ensure we're logged in.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(Service.isLoggedIn);
- let deferred = Promise.defer();
- Svc.Obs.add("weave:service:sync:error", onSyncError);
- function onSyncError() {
- _("Got weave:service:sync:error in first sync.");
- Svc.Obs.remove("weave:service:sync:error", onSyncError);
-
- // Wait for the automatic next sync.
- function onLoginError() {
- _("Got weave:service:login:error in second sync.");
- Svc.Obs.remove("weave:service:login:error", onLoginError);
-
- let errorCount = sumHistogram("WEAVE_STORAGE_AUTH_ERRORS", { key: "info/collections" });
- do_check_eq(errorCount, 2);
-
- let expected = isConfiguredWithLegacyIdentity() ?
- LOGIN_FAILED_LOGIN_REJECTED : LOGIN_FAILED_NETWORK_ERROR;
-
- do_check_eq(Status.login, expected);
- do_check_false(Service.isLoggedIn);
-
- // Clean up.
- Utils.nextTick(function () {
- Service.startOver();
- server.stop(deferred.resolve);
- });
- }
- Svc.Obs.add("weave:service:login:error", onLoginError);
- }
-
+ let promiseSyncError = promiseOneObserver("weave:service:sync:error");
// Make sync fail due to login rejected.
yield configureIdentity({username: "janedoe"});
Service._updateCachedURLs();
_("Starting first sync.");
- Service.sync();
+ yield Service.sync();
_("First sync done.");
- yield deferred.promise;
+ yield promiseSyncError;
+
+ // Wait for the automatic next sync - it should result in a login error.
+ yield promiseOneObserver("weave:service:login:error");
+
+ let expected = isConfiguredWithLegacyIdentity() ?
+ LOGIN_FAILED_LOGIN_REJECTED : LOGIN_FAILED_NETWORK_ERROR;
+ do_check_eq(Status.login, expected);
+ do_check_false(Service.isLoggedIn);
+
+ Service.startOver();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_credentials_changed_logout() {
let server = sync_httpd_setup();
yield setUp(server);
// By calling sync, we ensure we're logged in.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(Service.isLoggedIn);
- generateCredentialsChangedFailure();
- Service.sync();
+ yield generateCredentialsChangedFailure();
+ yield Service.sync();
do_check_eq(Status.sync, CREDENTIALS_CHANGED);
do_check_false(Service.isLoggedIn);
// Clean up.
- Service.startOver();
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- yield deferred.promise;
+ yield Service.startOver();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_no_lastSync_pref() {
// Test reported error.
Status.resetSync();
errorHandler.dontIgnoreErrors = true;
Status.sync = CREDENTIALS_CHANGED;
do_check_true(errorHandler.shouldReportError());
@@ -462,31 +447,29 @@ add_identity_test(this, function test_sh
add_identity_test(this, function test_shouldReportError_master_password() {
_("Test error ignored due to locked master password");
let server = sync_httpd_setup();
yield setUp(server);
// Monkey patch Service.verifyLogin to imitate
// master password being locked.
Service._verifyLogin = Service.verifyLogin;
- Service.verifyLogin = function () {
+ Service.verifyLogin = Task.async(function* () {
Status.login = MASTER_PASSWORD_LOCKED;
return false;
- };
+ });
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
+ yield Service.sync();
do_check_false(errorHandler.shouldReportError());
// Clean up.
Service.verifyLogin = Service._verifyLogin;
- clean();
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- yield deferred.promise;
+ yield clean();
+ yield promiseStopServer(server);
});
// Test that even if we don't have a cluster URL, a login failure due to
// authentication errors is always reported.
add_identity_test(this, function test_shouldReportLoginFailureWithNoCluster() {
// Ensure no clusterURL - any error not specific to login should not be reported.
Service.serverURL = "";
Service.clusterURL = "";
@@ -506,359 +489,320 @@ add_identity_test(this, function test_sh
// an fxaccounts environment?
add_task(function test_login_syncAndReportErrors_non_network_error() {
// Test non-network errors are reported
// when calling syncAndReportErrors
let server = sync_httpd_setup();
yield setUp(server);
Service.identity.basicPassword = null;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.login, LOGIN_FAILED_NO_PASSWORD);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+
+ yield promise;
+ do_check_eq(Status.login, LOGIN_FAILED_NO_PASSWORD);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_sync_syncAndReportErrors_non_network_error() {
// Test non-network errors are reported
// when calling syncAndReportErrors
let server = sync_httpd_setup();
yield setUp(server);
// By calling sync, we ensure we're logged in.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(Service.isLoggedIn);
- generateCredentialsChangedFailure();
+ yield generateCredentialsChangedFailure();
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, CREDENTIALS_CHANGED);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield promise;
+ do_check_eq(Status.sync, CREDENTIALS_CHANGED);
+ yield clean();
+ yield promiseStopServer(server);
});
// XXX - how to arrange for 'Service.identity.basicPassword = null;' in
// an fxaccounts environment?
add_task(function test_login_syncAndReportErrors_prolonged_non_network_error() {
// Test prolonged, non-network errors are
// reported when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
Service.identity.basicPassword = null;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.login, LOGIN_FAILED_NO_PASSWORD);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+ do_check_eq(Status.login, LOGIN_FAILED_NO_PASSWORD);
+
+ yield clean();
+ yield promiseStopServer(server);
+
});
add_identity_test(this, function test_sync_syncAndReportErrors_prolonged_non_network_error() {
// Test prolonged, non-network errors are
// reported when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
// By calling sync, we ensure we're logged in.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(Service.isLoggedIn);
- generateCredentialsChangedFailure();
+ yield generateCredentialsChangedFailure();
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, CREDENTIALS_CHANGED);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+ do_check_eq(Status.sync, CREDENTIALS_CHANGED);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_login_syncAndReportErrors_network_error() {
// Test network errors are reported when calling syncAndReportErrors.
yield configureIdentity({username: "broken.wipe"});
Service.serverURL = fakeServerUrl;
Service.clusterURL = fakeServerUrl;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.login, LOGIN_FAILED_NETWORK_ERROR);
-
- clean();
- deferred.resolve();
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+ do_check_eq(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+
+ yield clean();
});
-add_test(function test_sync_syncAndReportErrors_network_error() {
+add_task(function test_sync_syncAndReportErrors_network_error() {
// Test network errors are reported when calling syncAndReportErrors.
Services.io.offline = true;
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
-
- Services.io.offline = false;
- clean();
- run_next_test();
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+
+ Services.io.offline = false;
+ yield clean();
});
add_identity_test(this, function test_login_syncAndReportErrors_prolonged_network_error() {
// Test prolonged, network errors are reported
// when calling syncAndReportErrors.
yield configureIdentity({username: "johndoe"});
Service.serverURL = fakeServerUrl;
Service.clusterURL = fakeServerUrl;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.login, LOGIN_FAILED_NETWORK_ERROR);
-
- clean();
- deferred.resolve();
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+ do_check_eq(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+ yield clean();
});
-add_test(function test_sync_syncAndReportErrors_prolonged_network_error() {
+add_task(function test_sync_syncAndReportErrors_prolonged_network_error() {
// Test prolonged, network errors are reported
// when calling syncAndReportErrors.
Services.io.offline = true;
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
-
- Services.io.offline = false;
- clean();
- run_next_test();
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+
+ Services.io.offline = false;
+ yield clean();
});
add_task(function test_login_prolonged_non_network_error() {
// Test prolonged, non-network errors are reported
let server = sync_httpd_setup();
yield setUp(server);
Service.identity.basicPassword = null;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_task(function test_sync_prolonged_non_network_error() {
// Test prolonged, non-network errors are reported
let server = sync_httpd_setup();
yield setUp(server);
// By calling sync, we ensure we're logged in.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(Service.isLoggedIn);
- generateCredentialsChangedFailure();
+ yield generateCredentialsChangedFailure();
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
+
});
add_identity_test(this, function test_login_prolonged_network_error() {
// Test prolonged, network errors are reported
yield configureIdentity({username: "johndoe"});
Service.serverURL = fakeServerUrl;
Service.clusterURL = fakeServerUrl;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- deferred.resolve();
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
});
-add_test(function test_sync_prolonged_network_error() {
+add_task(function test_sync_prolonged_network_error() {
// Test prolonged, network errors are reported
Services.io.offline = true;
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- Services.io.offline = false;
- clean();
- run_next_test();
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ Services.io.offline = false;
+ yield clean();
});
add_task(function test_login_non_network_error() {
// Test non-network errors are reported
let server = sync_httpd_setup();
yield setUp(server);
Service.identity.basicPassword = null;
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:login:error", onSyncError);
- do_check_eq(Status.login, LOGIN_FAILED_NO_PASSWORD);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
Service.sync();
- yield deferred.promise;
+ yield promise;
+
+ do_check_eq(Status.login, LOGIN_FAILED_NO_PASSWORD);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_task(function test_sync_non_network_error() {
// Test non-network errors are reported
let server = sync_httpd_setup();
yield setUp(server);
// By calling sync, we ensure we're logged in.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(Service.isLoggedIn);
- generateCredentialsChangedFailure();
+ yield generateCredentialsChangedFailure();
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- do_check_eq(Status.sync, CREDENTIALS_CHANGED);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.sync, CREDENTIALS_CHANGED);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_login_network_error() {
yield configureIdentity({username: "johndoe"});
Service.serverURL = fakeServerUrl;
Service.clusterURL = fakeServerUrl;
- let deferred = Promise.defer();
+ let promise = promiseOneObserver("weave:ui:clear-error");
// Test network errors are not reported.
- Svc.Obs.add("weave:ui:clear-error", function onClearError() {
- Svc.Obs.remove("weave:ui:clear-error", onClearError);
-
- do_check_eq(Status.login, LOGIN_FAILED_NETWORK_ERROR);
- do_check_false(errorHandler.didReportProlongedError);
+ setLastSync(NON_PROLONGED_ERROR_DURATION);
+ yield Service.sync();
+ yield promise;
- Services.io.offline = false;
- clean();
- deferred.resolve()
- });
+ do_check_eq(Status.login, LOGIN_FAILED_NETWORK_ERROR);
+ do_check_false(errorHandler.didReportProlongedError);
- setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ Services.io.offline = false;
+ yield clean();
});
-add_test(function test_sync_network_error() {
+add_task(function* test_sync_network_error() {
// Test network errors are not reported.
Services.io.offline = true;
- Svc.Obs.add("weave:ui:sync:finish", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:sync:finish", onUIUpdate);
- do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
- do_check_false(errorHandler.didReportProlongedError);
-
- Services.io.offline = false;
- clean();
- run_next_test();
- });
+ let promise = promiseOneObserver("weave:ui:sync:finish");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ Services.io.offline = false;
+ yield clean();
});
add_identity_test(this, function test_sync_server_maintenance_error() {
// Test server maintenance errors are not reported.
let server = sync_httpd_setup();
yield setUp(server);
const BACKOFF = 42;
@@ -869,32 +813,29 @@ add_identity_test(this, function test_sy
function onSyncError() {
do_throw("Shouldn't get here!");
}
Svc.Obs.add("weave:ui:sync:error", onSyncError);
do_check_eq(Status.service, STATUS_OK);
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:finish", function onSyncFinish() {
- Svc.Obs.remove("weave:ui:sync:finish", onSyncFinish);
-
- do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
- do_check_eq(Status.sync, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:finish");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
+ do_check_eq(Status.sync, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ Svc.Obs.remove("weave:ui:sync:error", onSyncError);
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_info_collections_login_server_maintenance_error() {
// Test info/collections server maintenance errors are not reported.
let server = sync_httpd_setup();
yield setUp(server);
Service.username = "broken.info";
@@ -911,34 +852,31 @@ add_identity_test(this, function test_in
function onUIUpdate() {
do_throw("Shouldn't experience UI update!");
}
Svc.Obs.add("weave:ui:login:error", onUIUpdate);
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:clear-error", function onLoginFinish() {
- Svc.Obs.remove("weave:ui:clear-error", onLoginFinish);
-
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:clear-error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_meta_global_login_server_maintenance_error() {
// Test meta/global server maintenance errors are not reported.
let server = sync_httpd_setup();
yield setUp(server);
yield configureIdentity({username: "broken.meta"});
@@ -954,34 +892,31 @@ add_identity_test(this, function test_me
function onUIUpdate() {
do_throw("Shouldn't get here!");
}
Svc.Obs.add("weave:ui:login:error", onUIUpdate);
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:clear-error", function onLoginFinish() {
- Svc.Obs.remove("weave:ui:clear-error", onLoginFinish);
-
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:clear-error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_crypto_keys_login_server_maintenance_error() {
// Test crypto/keys server maintenance errors are not reported.
let server = sync_httpd_setup();
yield setUp(server);
yield configureIdentity({username: "broken.keys"});
@@ -1000,63 +935,58 @@ add_identity_test(this, function test_cr
function onUIUpdate() {
do_throw("Shouldn't get here!");
}
Svc.Obs.add("weave:ui:login:error", onUIUpdate);
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:clear-error", function onLoginFinish() {
- Svc.Obs.remove("weave:ui:clear-error", onLoginFinish);
-
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:clear-error");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
+ yield clean();
+ yield promiseStopServer(server);
});
add_task(function test_sync_prolonged_server_maintenance_error() {
// Test prolonged server maintenance errors are reported.
let server = sync_httpd_setup();
yield setUp(server);
const BACKOFF = 42;
let engine = engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 503,
headers: {"retry-after": BACKOFF}};
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:sync:error", onUIUpdate);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_info_collections_login_prolonged_server_maintenance_error(){
// Test info/collections prolonged server maintenance errors are reported.
let server = sync_httpd_setup();
yield setUp(server);
yield configureIdentity({username: "broken.info"});
@@ -1064,35 +994,33 @@ add_identity_test(this, function test_in
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_meta_global_login_prolonged_server_maintenance_error(){
// Test meta/global prolonged server maintenance errors are reported.
let server = sync_httpd_setup();
yield setUp(server);
yield configureIdentity({username: "broken.meta"});
@@ -1100,35 +1028,33 @@ add_identity_test(this, function test_me
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_download_crypto_keys_login_prolonged_server_maintenance_error(){
// Test crypto/keys prolonged server maintenance errors are reported.
let server = sync_httpd_setup();
yield setUp(server);
yield configureIdentity({username: "broken.keys"});
@@ -1138,35 +1064,33 @@ add_identity_test(this, function test_do
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_upload_crypto_keys_login_prolonged_server_maintenance_error(){
// Test crypto/keys prolonged server maintenance errors are reported.
let server = sync_httpd_setup();
// Start off with an empty account, do not upload a key.
yield configureIdentity({username: "broken.keys"});
@@ -1174,35 +1098,33 @@ add_identity_test(this, function test_up
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_wipeServer_login_prolonged_server_maintenance_error(){
// Test that we report prolonged server maintenance errors that occur whilst
// wiping the server.
let server = sync_httpd_setup();
// Start off with an empty account, do not upload a key.
@@ -1211,109 +1133,103 @@ add_identity_test(this, function test_wi
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_wipeRemote_prolonged_server_maintenance_error(){
// Test that we report prolonged server maintenance errors that occur whilst
// wiping all remote devices.
let server = sync_httpd_setup();
server.registerPathHandler("/1.1/broken.wipe/storage/catapult", service_unavailable);
yield configureIdentity({username: "broken.wipe"});
Service.serverURL = server.baseURI + "/maintenance/";
Service.clusterURL = server.baseURI + "/maintenance/";
- generateAndUploadKeys();
+ yield generateAndUploadKeys();
let engine = engineManager.get("catapult");
engine.exception = null;
engine.enabled = true;
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:sync:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
- do_check_eq(Svc.Prefs.get("firstSync"), "wipeRemote");
- do_check_true(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
Svc.Prefs.set("firstSync", "wipeRemote");
setLastSync(PROLONGED_ERROR_DURATION);
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, PROLONGED_SYNC_FAILURE);
+ do_check_eq(Svc.Prefs.get("firstSync"), "wipeRemote");
+ do_check_true(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_task(function test_sync_syncAndReportErrors_server_maintenance_error() {
// Test server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
const BACKOFF = 42;
let engine = engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 503,
headers: {"retry-after": BACKOFF}};
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:sync:error", onUIUpdate);
- do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
- do_check_eq(Status.sync, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
do_check_eq(Status.service, STATUS_OK);
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
+ do_check_eq(Status.sync, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_info_collections_login_syncAndReportErrors_server_maintenance_error() {
// Test info/collections server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
@@ -1322,35 +1238,33 @@ add_identity_test(this, function test_in
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_meta_global_login_syncAndReportErrors_server_maintenance_error() {
// Test meta/global server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
@@ -1359,35 +1273,33 @@ add_identity_test(this, function test_me
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_download_crypto_keys_login_syncAndReportErrors_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
@@ -1398,35 +1310,33 @@ add_identity_test(this, function test_do
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_upload_crypto_keys_login_syncAndReportErrors_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
// Start off with an empty account, do not upload a key.
@@ -1435,35 +1345,33 @@ add_identity_test(this, function test_up
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_wipeServer_login_syncAndReportErrors_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
// Start off with an empty account, do not upload a key.
@@ -1472,110 +1380,104 @@ add_identity_test(this, function test_wi
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_wipeRemote_syncAndReportErrors_server_maintenance_error(){
// Test that we report prolonged server maintenance errors that occur whilst
// wiping all remote devices.
let server = sync_httpd_setup();
yield configureIdentity({username: "broken.wipe"});
Service.serverURL = server.baseURI + "/maintenance/";
Service.clusterURL = server.baseURI + "/maintenance/";
- generateAndUploadKeys();
+ yield generateAndUploadKeys();
let engine = engineManager.get("catapult");
engine.exception = null;
engine.enabled = true;
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:sync:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, SYNC_FAILED);
- do_check_eq(Status.sync, SERVER_MAINTENANCE);
- do_check_eq(Svc.Prefs.get("firstSync"), "wipeRemote");
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
Svc.Prefs.set("firstSync", "wipeRemote");
setLastSync(NON_PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, SYNC_FAILED);
+ do_check_eq(Status.sync, SERVER_MAINTENANCE);
+ do_check_eq(Svc.Prefs.get("firstSync"), "wipeRemote");
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_task(function test_sync_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test prolonged server maintenance errors are
// reported when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
const BACKOFF = 42;
let engine = engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 503,
headers: {"retry-after": BACKOFF}};
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:sync:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:sync:error", onUIUpdate);
- do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
- do_check_eq(Status.sync, SERVER_MAINTENANCE);
- // syncAndReportErrors means dontIgnoreErrors, which means
- // didReportProlongedError not touched.
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:sync:error");
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
+ do_check_eq(Status.sync, SERVER_MAINTENANCE);
+ // syncAndReportErrors means dontIgnoreErrors, which means
+ // didReportProlongedError not touched.
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_info_collections_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test info/collections server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
@@ -1584,37 +1486,35 @@ add_identity_test(this, function test_in
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- // syncAndReportErrors means dontIgnoreErrors, which means
- // didReportProlongedError not touched.
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ // syncAndReportErrors means dontIgnoreErrors, which means
+ // didReportProlongedError not touched.
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_meta_global_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test meta/global server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
@@ -1623,37 +1523,35 @@ add_identity_test(this, function test_me
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- // syncAndReportErrors means dontIgnoreErrors, which means
- // didReportProlongedError not touched.
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ // syncAndReportErrors means dontIgnoreErrors, which means
+ // didReportProlongedError not touched.
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_download_crypto_keys_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
yield setUp(server);
@@ -1664,37 +1562,35 @@ add_identity_test(this, function test_do
Service.collectionKeys.clear();
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- // syncAndReportErrors means dontIgnoreErrors, which means
- // didReportProlongedError not touched.
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ // syncAndReportErrors means dontIgnoreErrors, which means
+ // didReportProlongedError not touched.
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_upload_crypto_keys_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
// Start off with an empty account, do not upload a key.
@@ -1703,37 +1599,35 @@ add_identity_test(this, function test_up
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- // syncAndReportErrors means dontIgnoreErrors, which means
- // didReportProlongedError not touched.
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ // syncAndReportErrors means dontIgnoreErrors, which means
+ // didReportProlongedError not touched.
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_wipeServer_login_syncAndReportErrors_prolonged_server_maintenance_error() {
// Test crypto/keys server maintenance errors are reported
// when calling syncAndReportErrors.
let server = sync_httpd_setup();
// Start off with an empty account, do not upload a key.
@@ -1742,54 +1636,53 @@ add_identity_test(this, function test_wi
Service.clusterURL = server.baseURI + "/maintenance/";
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- let deferred = Promise.defer();
- Svc.Obs.add("weave:ui:login:error", function onUIUpdate() {
- Svc.Obs.remove("weave:ui:login:error", onUIUpdate);
- do_check_true(Status.enforceBackoff);
- do_check_eq(backoffInterval, 42);
- do_check_eq(Status.service, LOGIN_FAILED);
- do_check_eq(Status.login, SERVER_MAINTENANCE);
- // syncAndReportErrors means dontIgnoreErrors, which means
- // didReportProlongedError not touched.
- do_check_false(errorHandler.didReportProlongedError);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:ui:login:error");
do_check_false(Status.enforceBackoff);
do_check_eq(Status.service, STATUS_OK);
setLastSync(PROLONGED_ERROR_DURATION);
- errorHandler.syncAndReportErrors();
- yield deferred.promise;
+ yield errorHandler.syncAndReportErrors();
+ yield promise;
+
+ do_check_true(Status.enforceBackoff);
+ do_check_eq(backoffInterval, 42);
+ do_check_eq(Status.service, LOGIN_FAILED);
+ do_check_eq(Status.login, SERVER_MAINTENANCE);
+ // syncAndReportErrors means dontIgnoreErrors, which means
+ // didReportProlongedError not touched.
+ do_check_false(errorHandler.didReportProlongedError);
+
+ yield clean();
+ yield promiseStopServer(server);
});
add_task(function test_sync_engine_generic_fail() {
let server = sync_httpd_setup();
let engine = engineManager.get("catapult");
engine.enabled = true;
engine.sync = function sync() {
Svc.Obs.notify("weave:engine:sync:error", "", "catapult");
+ return Promise.resolve();
};
let log = Log.repository.getLogger("Sync.ErrorHandler");
Svc.Prefs.set("log.appender.file.logOnError", true);
do_check_eq(Status.engines["catapult"], undefined);
- let deferred = Promise.defer();
+ let deferred = PromiseUtils.defer();
// Don't wait for reset-file-log until the sync is underway.
// This avoids us catching a delayed notification from an earlier test.
Svc.Obs.add("weave:engine:sync:finish", function onEngineFinish() {
Svc.Obs.remove("weave:engine:sync:finish", onEngineFinish);
log.info("Adding reset-file-log observer.");
Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
@@ -1801,82 +1694,78 @@ add_task(function test_sync_engine_gener
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
// Test Error log was written on SYNC_FAILED_PARTIAL.
let entries = logsdir.directoryEntries;
do_check_true(entries.hasMoreElements());
let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
do_check_true(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
- clean();
+ clean().then(
+ () => {
+ let syncErrors = sumHistogram("WEAVE_ENGINE_SYNC_ERRORS", { key: "catapult" });
+ do_check_true(syncErrors, 1);
- let syncErrors = sumHistogram("WEAVE_ENGINE_SYNC_ERRORS", { key: "catapult" });
- do_check_true(syncErrors, 1);
-
- server.stop(deferred.resolve);
+ server.stop(deferred.resolve);
+ }
+ );
});
});
do_check_true(yield setUp(server));
- Service.sync();
+ yield Service.sync();
yield deferred.promise;
});
-add_test(function test_logs_on_sync_error_despite_shouldReportError() {
+add_task(function test_logs_on_sync_error_despite_shouldReportError() {
_("Ensure that an error is still logged when weave:service:sync:error " +
"is notified, despite shouldReportError returning false.");
let log = Log.repository.getLogger("Sync.ErrorHandler");
Svc.Prefs.set("log.appender.file.logOnError", true);
log.info("TESTING");
// Ensure that we report no error.
Status.login = MASTER_PASSWORD_LOCKED;
do_check_false(errorHandler.shouldReportError());
- Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
- Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
-
- // Test that error log was written.
- let entries = logsdir.directoryEntries;
- do_check_true(entries.hasMoreElements());
- let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
+ let promise = promiseOneObserver("weave:service:reset-file-log");
+ Svc.Obs.notify("weave:service:sync:error", {});
+ yield promise;
+ // Test that error log was written.
+ let entries = logsdir.directoryEntries;
+ do_check_true(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
do_check_true(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
- clean();
- run_next_test();
- });
- Svc.Obs.notify("weave:service:sync:error", {});
+ yield clean();
});
-add_test(function test_logs_on_login_error_despite_shouldReportError() {
+add_task(function test_logs_on_login_error_despite_shouldReportError() {
_("Ensure that an error is still logged when weave:service:login:error " +
"is notified, despite shouldReportError returning false.");
let log = Log.repository.getLogger("Sync.ErrorHandler");
Svc.Prefs.set("log.appender.file.logOnError", true);
log.info("TESTING");
// Ensure that we report no error.
Status.login = MASTER_PASSWORD_LOCKED;
do_check_false(errorHandler.shouldReportError());
- Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
- Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
+ let promise = promiseOneObserver("weave:service:reset-file-log");
+ Svc.Obs.notify("weave:service:login:error", {});
+ yield promise;
+ // Test that error log was written.
+ let entries = logsdir.directoryEntries;
+ do_check_true(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
+ do_check_true(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
- // Test that error log was written.
- let entries = logsdir.directoryEntries;
- do_check_true(entries.hasMoreElements());
- let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
- do_check_true(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
-
- clean();
- run_next_test();
- });
- Svc.Obs.notify("weave:service:login:error", {});
+ yield clean();
});
// This test should be the last one since it monkeypatches the engine object
// and we should only have one engine object throughout the file (bug 629664).
add_task(function test_engine_applyFailed() {
let server = sync_httpd_setup();
let engine = engineManager.get("catapult");
@@ -1884,30 +1773,27 @@ add_task(function test_engine_applyFaile
delete engine.exception;
engine.sync = function sync() {
Svc.Obs.notify("weave:engine:sync:applied", {newFailed:1}, "catapult");
};
let log = Log.repository.getLogger("Sync.ErrorHandler");
Svc.Prefs.set("log.appender.file.logOnError", true);
- let deferred = Promise.defer();
- Svc.Obs.add("weave:service:reset-file-log", function onResetFileLog() {
- Svc.Obs.remove("weave:service:reset-file-log", onResetFileLog);
-
- do_check_eq(Status.engines["catapult"], ENGINE_APPLY_FAIL);
- do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
-
- // Test Error log was written on SYNC_FAILED_PARTIAL.
- let entries = logsdir.directoryEntries;
- do_check_true(entries.hasMoreElements());
- let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
- do_check_true(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
-
- clean();
- server.stop(deferred.resolve);
- });
+ let promise = promiseOneObserver("weave:service:reset-file-log");
do_check_eq(Status.engines["catapult"], undefined);
do_check_true(yield setUp(server));
- Service.sync();
- yield deferred.promise;
+ yield Service.sync();
+ yield promise;
+
+ do_check_eq(Status.engines["catapult"], ENGINE_APPLY_FAIL);
+ do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
+
+ // Test Error log was written on SYNC_FAILED_PARTIAL.
+ let entries = logsdir.directoryEntries;
+ do_check_true(entries.hasMoreElements());
+ let logfile = entries.getNext().QueryInterface(Ci.nsILocalFile);
+ do_check_true(logfile.leafName.startsWith("error-sync-"), logfile.leafName);
+
+ yield clean();
+ yield promiseStopServer(server);
});
--- a/services/sync/tests/unit/test_errorhandler_eol.js
+++ b/services/sync/tests/unit/test_errorhandler_eol.js
@@ -78,17 +78,17 @@ add_identity_test(this, function test_20
do_check_eq("hard-eol", subject.code);
do_check_hard_eol(eh, start);
do_check_eq(Service.scheduler.eolInterval, Service.scheduler.syncInterval);
eh.clearServerAlerts();
server.stop(deferred.resolve);
};
Svc.Obs.add("weave:eol", obs);
- Service._fetchInfo();
+ yield Service._fetchInfo();
Service.scheduler.adjustSyncInterval(); // As if we failed or succeeded in syncing.
yield deferred.promise;
});
add_identity_test(this, function test_513_hard() {
let eh = Service.errorHandler;
let start = Date.now();
let server = sync_httpd_setup(handler513);
@@ -101,17 +101,17 @@ add_identity_test(this, function test_51
do_check_hard_eol(eh, start);
do_check_eq(Service.scheduler.eolInterval, Service.scheduler.syncInterval);
eh.clearServerAlerts();
server.stop(deferred.resolve);
};
Svc.Obs.add("weave:eol", obs);
try {
- Service._fetchInfo();
+ yield Service._fetchInfo();
Service.scheduler.adjustSyncInterval(); // As if we failed or succeeded in syncing.
} catch (ex) {
// Because fetchInfo will fail on a 513.
}
yield deferred.promise;
});
add_identity_test(this, function test_200_soft() {
@@ -126,12 +126,12 @@ add_identity_test(this, function test_20
do_check_eq("soft-eol", subject.code);
do_check_soft_eol(eh, start);
do_check_eq(Service.scheduler.singleDeviceInterval, Service.scheduler.syncInterval);
eh.clearServerAlerts();
server.stop(deferred.resolve);
};
Svc.Obs.add("weave:eol", obs);
- Service._fetchInfo();
+ yield Service._fetchInfo();
Service.scheduler.adjustSyncInterval(); // As if we failed or succeeded in syncing.
yield deferred.promise;
});
--- a/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
+++ b/services/sync/tests/unit/test_errorhandler_sync_checkServerError.js
@@ -11,30 +11,24 @@ Cu.import("resource://services-sync/util
Cu.import("resource://testing-common/services/sync/fakeservices.js");
Cu.import("resource://testing-common/services/sync/utils.js");
initTestLogging("Trace");
var engineManager = Service.engineManager;
engineManager.clear();
-function promiseStopServer(server) {
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- return deferred.promise;
-}
-
function CatapultEngine() {
SyncEngine.call(this, "Catapult", Service);
}
CatapultEngine.prototype = {
__proto__: SyncEngine.prototype,
exception: null, // tests fill this in
_sync: function _sync() {
- throw this.exception;
+ return Promise.reject(this.exception);
}
};
function sync_httpd_setup() {
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let collections = collectionsHelper.collections;
@@ -61,48 +55,48 @@ function sync_httpd_setup() {
function setUp(server) {
yield configureIdentity({username: "johndoe"});
Service.serverURL = server.baseURI + "/";
Service.clusterURL = server.baseURI + "/";
new FakeCryptoService();
}
-function generateAndUploadKeys(server) {
+function* generateAndUploadKeys(server) {
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
let res = Service.resource(server.baseURI + "/1.1/johndoe/storage/crypto/keys");
- return serverKeys.upload(res).success;
+ return (yield serverKeys.upload(res)).success;
}
add_identity_test(this, function test_backoff500() {
_("Test: HTTP 500 sets backoff status.");
let server = sync_httpd_setup();
yield setUp(server);
let engine = engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 500};
try {
do_check_false(Status.enforceBackoff);
// Forcibly create and upload keys here -- otherwise we don't get to the 500!
- do_check_true(generateAndUploadKeys(server));
+ do_check_true(yield generateAndUploadKeys(server));
- Service.login();
- Service.sync();
+ yield Service.login();
+ yield Service.sync();
do_check_true(Status.enforceBackoff);
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
} finally {
Status.resetBackoff();
- Service.startOver();
+ yield Service.startOver();
}
yield promiseStopServer(server);
});
add_identity_test(this, function test_backoff503() {
_("Test: HTTP 503 with Retry-After header leads to backoff notification and sets backoff status.");
let server = sync_httpd_setup();
yield setUp(server);
@@ -116,29 +110,29 @@ add_identity_test(this, function test_ba
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function (subject) {
backoffInterval = subject;
});
try {
do_check_false(Status.enforceBackoff);
- do_check_true(generateAndUploadKeys(server));
+ do_check_true(yield generateAndUploadKeys(server));
- Service.login();
- Service.sync();
+ yield Service.login();
+ yield Service.sync();
do_check_true(Status.enforceBackoff);
do_check_eq(backoffInterval, BACKOFF);
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
do_check_eq(Status.sync, SERVER_MAINTENANCE);
} finally {
Status.resetBackoff();
Status.resetSync();
- Service.startOver();
+ yield Service.startOver();
}
yield promiseStopServer(server);
});
add_identity_test(this, function test_overQuota() {
_("Test: HTTP 400 with body error code 14 means over quota.");
let server = sync_httpd_setup();
yield setUp(server);
@@ -148,107 +142,99 @@ add_identity_test(this, function test_ov
engine.exception = {status: 400,
toString() {
return "14";
}};
try {
do_check_eq(Status.sync, SYNC_SUCCEEDED);
- do_check_true(generateAndUploadKeys(server));
+ do_check_true(yield generateAndUploadKeys(server));
- Service.login();
- Service.sync();
+ yield Service.login();
+ yield Service.sync();
do_check_eq(Status.sync, OVER_QUOTA);
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
} finally {
Status.resetSync();
- Service.startOver();
+ yield Service.startOver();
}
yield promiseStopServer(server);
});
add_identity_test(this, function test_service_networkError() {
_("Test: Connection refused error from Service.sync() leads to the right status code.");
let server = sync_httpd_setup();
yield setUp(server);
- let deferred = Promise.defer();
- server.stop(() => {
- // Provoke connection refused.
- Service.clusterURL = "http://localhost:12345/";
+ yield promiseStopServer(server);
+ // Provoke connection refused.
+ Service.clusterURL = "http://localhost:12345/";
- try {
- do_check_eq(Status.sync, SYNC_SUCCEEDED);
+ try {
+ do_check_eq(Status.sync, SYNC_SUCCEEDED);
- Service._loggedIn = true;
- Service.sync();
+ Service._loggedIn = true;
+ yield Service.sync();
- do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
- do_check_eq(Status.service, SYNC_FAILED);
- } finally {
- Status.resetSync();
- Service.startOver();
- }
- deferred.resolve();
- });
- yield deferred.promise;
+ do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ do_check_eq(Status.service, SYNC_FAILED);
+ } finally {
+ Status.resetSync();
+ yield Service.startOver();
+ }
});
add_identity_test(this, function test_service_offline() {
_("Test: Wanting to sync in offline mode leads to the right status code but does not increment the ignorable error count.");
let server = sync_httpd_setup();
yield setUp(server);
- let deferred = Promise.defer();
- server.stop(() => {
- Services.io.offline = true;
- Services.prefs.setBoolPref("network.dns.offline-localhost", false);
+ yield promiseStopServer(server);
+ Services.io.offline = true;
+ Services.prefs.setBoolPref("network.dns.offline-localhost", false);
- try {
- do_check_eq(Status.sync, SYNC_SUCCEEDED);
-
- Service._loggedIn = true;
- Service.sync();
+ try {
+ do_check_eq(Status.sync, SYNC_SUCCEEDED);
- do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
- do_check_eq(Status.service, SYNC_FAILED);
- } finally {
- Status.resetSync();
- Service.startOver();
- }
- Services.io.offline = false;
- Services.prefs.clearUserPref("network.dns.offline-localhost");
- deferred.resolve();
- });
- yield deferred.promise;
+ Service._loggedIn = true;
+ yield Service.sync();
+
+ do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
+ do_check_eq(Status.service, SYNC_FAILED);
+ } finally {
+ Status.resetSync();
+ yield Service.startOver();
+ }
+ Services.io.offline = false;
+ Services.prefs.clearUserPref("network.dns.offline-localhost");
});
add_identity_test(this, function test_engine_networkError() {
_("Test: Network related exceptions from engine.sync() lead to the right status code.");
let server = sync_httpd_setup();
yield setUp(server);
let engine = engineManager.get("catapult");
engine.enabled = true;
engine.exception = Components.Exception("NS_ERROR_UNKNOWN_HOST",
Cr.NS_ERROR_UNKNOWN_HOST);
try {
do_check_eq(Status.sync, SYNC_SUCCEEDED);
- do_check_true(generateAndUploadKeys(server));
+ do_check_true(yield generateAndUploadKeys(server));
- Service.login();
- Service.sync();
+ yield Service.login();
+ yield Service.sync();
do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
} finally {
Status.resetSync();
- Service.startOver();
+ yield Service.startOver();
}
yield promiseStopServer(server);
});
add_identity_test(this, function test_resource_timeout() {
let server = sync_httpd_setup();
yield setUp(server);
@@ -256,26 +242,26 @@ add_identity_test(this, function test_re
engine.enabled = true;
// Resource throws this when it encounters a timeout.
engine.exception = Components.Exception("Aborting due to channel inactivity.",
Cr.NS_ERROR_NET_TIMEOUT);
try {
do_check_eq(Status.sync, SYNC_SUCCEEDED);
- do_check_true(generateAndUploadKeys(server));
+ do_check_true(yield generateAndUploadKeys(server));
- Service.login();
- Service.sync();
+ yield Service.login();
+ yield Service.sync();
do_check_eq(Status.sync, LOGIN_FAILED_NETWORK_ERROR);
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
} finally {
Status.resetSync();
- Service.startOver();
+ yield Service.startOver();
}
yield promiseStopServer(server);
});
function run_test() {
engineManager.register(CatapultEngine);
run_next_test();
}
--- a/services/sync/tests/unit/test_forms_store.js
+++ b/services/sync/tests/unit/test_forms_store.js
@@ -3,149 +3,166 @@
_("Make sure the form store follows the Store api and correctly accesses the backend form storage");
Cu.import("resource://services-sync/engines/forms.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://gre/modules/Services.jsm");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
+
let baseuri = "http://fake/uri/";
let engine = new FormEngine(Service);
let store = engine._store;
- function applyEnsureNoFailures(records) {
- do_check_eq(store.applyIncomingBatch(records).length, 0);
+ function* applyEnsureNoFailures(records) {
+ let result = yield store.applyIncomingBatch(records);
+ do_check_eq(result.length, 0);
}
_("Remove any existing entries");
- store.wipe();
- for (let id in store.getAllIDs()) {
+ yield store.wipe();
+ let allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
_("Add a form entry");
- applyEnsureNoFailures([{
+ yield applyEnsureNoFailures([{
id: Utils.makeGUID(),
name: "name!!",
value: "value??"
}]);
_("Should have 1 entry now");
let id = "";
- for (let _id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let _id in allIds) {
if (id == "")
id = _id;
else
do_throw("Should have only gotten one!");
}
- do_check_true(store.itemExists(id));
+ do_check_true(yield store.itemExists(id));
_("Should be able to find this entry as a dupe");
- do_check_eq(engine._findDupe({name: "name!!", value: "value??"}), id);
+ do_check_eq((yield engine._findDupe({name: "name!!", value: "value??"})), id);
- let rec = store.createRecord(id);
+ let rec = yield store.createRecord(id);
_("Got record for id", id, rec);
do_check_eq(rec.name, "name!!");
do_check_eq(rec.value, "value??");
_("Create a non-existent id for delete");
- do_check_true(store.createRecord("deleted!!").deleted);
+ let created = yield store.createRecord("deleted!!");
+ do_check_true(created.deleted);
_("Try updating.. doesn't do anything yet");
- store.update({});
+ yield store.update({});
_("Remove all entries");
- store.wipe();
- for (let id in store.getAllIDs()) {
+ yield store.wipe();
+ allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
_("Add another entry");
- applyEnsureNoFailures([{
+ yield applyEnsureNoFailures([{
id: Utils.makeGUID(),
name: "another",
value: "entry"
}]);
id = "";
- for (let _id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let _id in allIds) {
if (id == "")
id = _id;
else
do_throw("Should have only gotten one!");
}
_("Change the id of the new entry to something else");
- store.changeItemID(id, "newid");
+ yield store.changeItemID(id, "newid");
_("Make sure it's there");
- do_check_true(store.itemExists("newid"));
+ do_check_true(yield store.itemExists("newid"));
_("Remove the entry");
- store.remove({
+ yield store.remove({
id: "newid"
});
- for (let id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
_("Removing the entry again shouldn't matter");
- store.remove({
+ yield store.remove({
id: "newid"
});
- for (let id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
_("Add another entry to delete using applyIncomingBatch");
let toDelete = {
id: Utils.makeGUID(),
name: "todelete",
value: "entry"
};
- applyEnsureNoFailures([toDelete]);
+ yield applyEnsureNoFailures([toDelete]);
id = "";
- for (let _id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let _id in allIds) {
if (id == "")
id = _id;
else
do_throw("Should have only gotten one!");
}
- do_check_true(store.itemExists(id));
+ do_check_true(yield store.itemExists(id));
// mark entry as deleted
toDelete.id = id;
toDelete.deleted = true;
- applyEnsureNoFailures([toDelete]);
- for (let id in store.getAllIDs()) {
+ yield applyEnsureNoFailures([toDelete]);
+ allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
_("Add an entry to wipe");
- applyEnsureNoFailures([{
+ yield applyEnsureNoFailures([{
id: Utils.makeGUID(),
name: "towipe",
value: "entry"
}]);
- store.wipe();
+ yield store.wipe();
- for (let id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
_("Ensure we work if formfill is disabled.");
Services.prefs.setBoolPref("browser.formfill.enable", false);
try {
// a search
- for (let id in store.getAllIDs()) {
+ allIds = yield store.getAllIDs();
+ for (let id in allIds) {
do_throw("Shouldn't get any ids!");
}
// an update.
- applyEnsureNoFailures([{
+ yield applyEnsureNoFailures([{
id: Utils.makeGUID(),
name: "some",
value: "entry"
}]);
} finally {
Services.prefs.clearUserPref("browser.formfill.enable");
- store.wipe();
+ yield store.wipe();
}
-}
+});
--- a/services/sync/tests/unit/test_forms_tracker.js
+++ b/services/sync/tests/unit/test_forms_tracker.js
@@ -2,57 +2,61 @@
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://gre/modules/Log.jsm");
Cu.import("resource://services-sync/engines/forms.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
_("Verify we've got an empty tracker to work with.");
let engine = new FormEngine(Service);
let tracker = engine._tracker;
// Don't do asynchronous writes.
tracker.persistChangedIDs = false;
do_check_empty(tracker.changedIDs);
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
- function addEntry(name, value) {
- engine._store.create({name: name, value: value});
+ function* addEntry(name, value) {
+ yield engine._store.create({name: name, value: value});
}
- function removeEntry(name, value) {
- guid = engine._findDupe({name: name, value: value});
- engine._store.remove({id: guid});
+ function* removeEntry(name, value) {
+ guid = yield engine._findDupe({name: name, value: value});
+ yield engine._store.remove({id: guid});
}
try {
_("Create an entry. Won't show because we haven't started tracking yet");
- addEntry("name", "John Doe");
+ yield addEntry("name", "John Doe");
do_check_empty(tracker.changedIDs);
_("Tell the tracker to start tracking changes.");
Svc.Obs.notify("weave:engine:start-tracking");
- removeEntry("name", "John Doe");
- addEntry("email", "john@doe.com");
+ yield removeEntry("name", "John Doe");
+ yield addEntry("email", "john@doe.com");
do_check_attribute_count(tracker.changedIDs, 2);
_("Notifying twice won't do any harm.");
Svc.Obs.notify("weave:engine:start-tracking");
- addEntry("address", "Memory Lane");
+ yield addEntry("address", "Memory Lane");
do_check_attribute_count(tracker.changedIDs, 3);
_("Let's stop tracking again.");
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
Svc.Obs.notify("weave:engine:stop-tracking");
- removeEntry("address", "Memory Lane");
+ yield removeEntry("address", "Memory Lane");
do_check_empty(tracker.changedIDs);
_("Notifying twice won't do any harm.");
Svc.Obs.notify("weave:engine:stop-tracking");
- removeEntry("email", "john@doe.com");
+ yield removeEntry("email", "john@doe.com");
do_check_empty(tracker.changedIDs);
} finally {
_("Clean up.");
- engine._store.wipe();
+ yield engine._store.wipe();
}
-}
+});
--- a/services/sync/tests/unit/test_fxa_migration.js
+++ b/services/sync/tests/unit/test_fxa_migration.js
@@ -33,23 +33,16 @@ function promiseOneObserver(topic) {
let observer = function(subject, topic, data) {
Services.obs.removeObserver(observer, topic);
resolve({ subject: subject, data: data });
}
Services.obs.addObserver(observer, topic, false);
});
}
-function promiseStopServer(server) {
- return new Promise((resolve, reject) => {
- server.stop(resolve);
- });
-}
-
-
// Helpers
function configureLegacySync() {
let engine = new RotaryEngine(Service);
engine.enabled = true;
Svc.Prefs.set("registerEngines", engine.name);
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
let contents = {
@@ -128,17 +121,17 @@ add_task(function *testMigration() {
// so should still be not waiting for a user.
Assert.deepEqual((yield fxaMigrator._queueCurrentUserState()), null,
"no user state before server EOL");
// Start a sync - this will cause an EOL notification which the migrator's
// observer will notice.
let promise = promiseOneObserver("fxa-migration:state-changed");
_("Starting sync");
- Service.sync();
+ yield Service.sync();
_("Finished sync");
// We should have seen the observer, so be waiting for an FxA user.
Assert.equal((yield promise).data, fxaMigrator.STATE_USER_FXA, "now waiting for FxA.")
// Re-calling our user-state promise should also reflect the same state.
Assert.equal((yield fxaMigrator._queueCurrentUserState()),
fxaMigrator.STATE_USER_FXA,
@@ -179,63 +172,60 @@ add_task(function *testMigration() {
"now waiting for verification");
// Before we verify the user, fire off a sync that calls us back during
// the sync and before it completes - this way we can ensure we do the right
// thing in terms of blocking sync and waiting for it to complete.
let wasWaiting = false;
// This is a PITA as sync is pseudo-blocking.
- engine._syncFinish = function () {
+ engine._syncFinish = Task.async(function* () {
// We aren't in a generator here, so use a helper to block on promises.
function getState() {
- let cb = Async.makeSpinningCallback();
- fxaMigrator._queueCurrentUserState().then(state => cb(null, state));
- return cb.wait();
+ return fxaMigrator._queueCurrentUserState();
}
// should still be waiting for verification.
- Assert.equal(getState(), fxaMigrator.STATE_USER_FXA_VERIFIED,
+ Assert.equal((yield getState()), fxaMigrator.STATE_USER_FXA_VERIFIED,
"still waiting for verification");
// arrange for the user to be verified. The fxAccount's mock story is
// broken, so go behind its back.
config.fxaccount.user.verified = true;
- fxAccounts.setSignedInUser(config.fxaccount.user);
+ yield fxAccounts.setSignedInUser(config.fxaccount.user);
Services.obs.notifyObservers(null, ONVERIFIED_NOTIFICATION, null);
// spinningly wait for the migrator to catch up - sync is running so
// we should be in a 'null' user-state as there is no user-action
// necessary.
- let cb = Async.makeSpinningCallback();
- promiseOneObserver("fxa-migration:state-changed").then(({ data: state }) => cb(null, state));
- Assert.equal(cb.wait(), null, "no user action necessary while sync completes.");
+ let state = yield promiseOneObserver("fxa-migration:state-changed");
+ Assert.equal(state.data, null, "no user action necessary while sync completes.");
// We must not have started writing the sentinel yet.
Assert.ok(!haveStartedSentinel, "haven't written a sentinel yet");
// sync should be blocked from continuing
Assert.ok(Service.scheduler.isBlocked, "sync is blocked.")
wasWaiting = true;
- throw ex;
- };
+ throw "abort abort!";
+ });
_("Starting sync");
- Service.sync();
+ yield Service.sync();
_("Finished sync");
// mock sync so we can ensure the final sync is scheduled with the FxA user.
// (letting a "normal" sync complete is a PITA without mocking huge amounts
// of FxA infra)
let promiseFinalSync = new Promise((resolve, reject) => {
let oldSync = Service.sync;
- Service.sync = function() {
+ Service.sync = Task.async(function *() {
Service.sync = oldSync;
resolve();
- }
+ });
});
Assert.ok(wasWaiting, "everything was good while sync was running.")
// The migration is now going to run to completion.
// sync should still be "blocked"
Assert.ok(Service.scheduler.isBlocked, "sync is blocked.");
@@ -258,17 +248,17 @@ add_task(function *testMigration() {
Assert.deepEqual((yield fxaMigrator._queueCurrentUserState()),
null,
"still no user action necessary");
// and our engines should be in the same enabled/disabled state as before.
Assert.ok(!Service.engineManager.get("addons").enabled, "addons is still disabled");
Assert.ok(Service.engineManager.get("passwords").enabled, "passwords is still enabled");
// aaaand, we are done - clean up.
- yield promiseStopServer(server);
+ yield server.promiseStop();
});
// Test our tokenServer URL is set correctly given we've changed the prefname
// it uses.
add_task(function* testTokenServerOldPrefName() {
let value = "http://custom-token-server/";
// Set the pref we used in the past...
Services.prefs.setCharPref("services.sync.tokenServerURI", value);
--- a/services/sync/tests/unit/test_fxa_migration_sentinel.js
+++ b/services/sync/tests/unit/test_fxa_migration_sentinel.js
@@ -15,23 +15,17 @@ Cu.import("resource://services-sync/reco
Services.prefs.setCharPref("services.sync.username", "foo");
// Now import sync
Cu.import("resource://services-sync/service.js");
const USER = "foo";
const PASSPHRASE = "abcdeabcdeabcdeabcdeabcdea";
-function promiseStopServer(server) {
- return new Promise((resolve, reject) => {
- server.stop(resolve);
- });
-}
-
-var numServerRequests = 0;
+let numServerRequests = 0;
// Helpers
function configureLegacySync() {
let contents = {
meta: {global: {}},
crypto: {},
};
@@ -62,24 +56,24 @@ add_task(function *() {
Assert.equal((yield Service.getFxAMigrationSentinel()), null, "no sentinel to start");
let sentinel = {foo: "bar"};
yield Service.setFxAMigrationSentinel(sentinel);
Assert.deepEqual((yield Service.getFxAMigrationSentinel()), sentinel, "got the sentinel back");
- yield promiseStopServer(server);
+ yield server.promiseStop();
});
// Test the records are cached by the record manager.
add_task(function *() {
// Arrange for a legacy sync user.
let server = configureLegacySync();
- Service.login();
+ yield Service.login();
// Reset the request count here as the login would have made some.
numServerRequests = 0;
Assert.equal((yield Service.getFxAMigrationSentinel()), null, "no sentinel to start");
Assert.equal(numServerRequests, 1, "first fetch should hit the server");
let sentinel = {foo: "bar"};
@@ -90,26 +84,26 @@ add_task(function *() {
Assert.equal(numServerRequests, 2, "second fetch should not should hit the server");
// Clobber the caches and ensure we still get the correct value back when we
// do hit the server.
Service.recordManager.clearCache();
Assert.deepEqual((yield Service.getFxAMigrationSentinel()), sentinel, "got the sentinel back");
Assert.equal(numServerRequests, 3, "should have re-hit the server with empty caches");
- yield promiseStopServer(server);
+ yield server.promiseStop();
});
// Test the records are cached by a sync.
add_task(function* () {
let server = configureLegacySync();
// A first sync clobbers meta/global due to it being empty, so we first
// do a sync which forces a good set of data on the server.
- Service.sync();
+ yield Service.sync();
// Now create a sentinel exists on the server. It's encrypted, so we need to
// put an encrypted version.
let cryptoWrapper = new CryptoWrapper("meta", "fxa_credentials");
let sentinel = {foo: "bar"};
cryptoWrapper.cleartext = {
id: "fxa_credentials",
sentinel: sentinel,
@@ -123,28 +117,28 @@ add_task(function* () {
};
server.createContents(USER, {
meta: {fxa_credentials: payload},
crypto: {},
});
// Another sync - this will cause the encrypted record to be fetched.
- Service.sync();
+ yield Service.sync();
// Reset the request count here as the sync will have made many!
numServerRequests = 0;
// Asking for the sentinel should use the copy cached in the record manager.
Assert.deepEqual((yield Service.getFxAMigrationSentinel()), sentinel, "got it");
Assert.equal(numServerRequests, 0, "should not have hit the server");
// And asking for it again should work (we have to work around the fact the
// ciphertext is clobbered on first decrypt...)
Assert.deepEqual((yield Service.getFxAMigrationSentinel()), sentinel, "got it again");
Assert.equal(numServerRequests, 0, "should not have hit the server");
- yield promiseStopServer(server);
+ yield server.promiseStop();
});
function run_test() {
initTestLogging();
run_next_test();
}
--- a/services/sync/tests/unit/test_fxa_node_reassignment.js
+++ b/services/sync/tests/unit/test_fxa_node_reassignment.js
@@ -102,17 +102,17 @@ function getReassigned() {
}
/**
* Make a test request to `url`, then watch the result of two syncs
* to ensure that a node request was made.
* Runs `between` between the two. This can be used to undo deliberate failure
* setup, detach observers, etc.
*/
-function syncAndExpectNodeReassignment(server, firstNotification, between,
+function* syncAndExpectNodeReassignment(server, firstNotification, between,
secondNotification, url) {
_("Starting syncAndExpectNodeReassignment\n");
let deferred = Promise.defer();
function onwards() {
let numTokenRequestsBefore;
function onFirstSync() {
_("First sync completed.");
Svc.Obs.remove(firstNotification, onFirstSync);
@@ -131,32 +131,38 @@ function syncAndExpectNodeReassignment(s
Svc.Obs.remove(secondNotification, onSecondSync);
Service.scheduler.clearSyncTriggers();
// Make absolutely sure that any event listeners are done with their work
// before we proceed.
waitForZeroTimer(function () {
_("Second sync nextTick.");
do_check_eq(numTokenRequests, numTokenRequestsBefore + 1, "fetched a new token");
- Service.startOver();
- server.stop(deferred.resolve);
+ Service.startOver().then(
+ () => server.promiseStop()
+ ).then(
+ () => deferred.resolve()
+ );
});
}
Svc.Obs.add(firstNotification, onFirstSync);
- Service.sync();
+ return Service.sync();
}
// Make sure that it works!
_("Making request to " + url + " which should 401");
+ let deferredRest = Promise.defer();
let request = new RESTRequest(url);
request.get(function () {
do_check_eq(request.response.status, 401);
- Utils.nextTick(onwards);
+ Utils.nextTick(deferredRest.resolve);
});
+ yield deferredRest.promise;
+ yield onwards();
yield deferred.promise;
}
add_task(function test_momentary_401_engine() {
_("Test a failure for engine URLs that's resolved by reassignment.");
let server = yield prepareServer();
let john = server.user("johndoe");
@@ -168,17 +174,17 @@ add_task(function test_momentary_401_eng
// through a sync.
let global = {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
rotary: {version: engine.version,
syncID: engine.syncID}}
john.createCollection("meta").insert("global", global);
_("First sync to prepare server contents.");
- Service.sync();
+ yield Service.sync();
_("Setting up Rotary collection to 401.");
let rotary = john.createCollection("rotary");
let oldHandler = rotary.collectionHandler;
rotary.collectionHandler = handleReassign.bind(this, undefined);
// We want to verify that the clusterURL pref has been cleared after a 401
// inside a sync. Flag the Rotary engine to need syncing.
@@ -207,17 +213,17 @@ add_task(function test_momentary_401_eng
});
// This test ends up being a failing info fetch *after we're already logged in*.
add_task(function test_momentary_401_info_collections_loggedin() {
_("Test a failure for info/collections after login that's resolved by reassignment.");
let server = yield prepareServer();
_("First sync to prepare server contents.");
- Service.sync();
+ yield Service.sync();
_("Arrange for info/collections to return a 401.");
let oldHandler = server.toplevelHandlers.info;
server.toplevelHandlers.info = handleReassign;
function undo() {
_("Undoing test changes.");
server.toplevelHandlers.info = oldHandler;
@@ -252,35 +258,33 @@ add_task(function test_momentary_401_inf
// Return a 401 for the next /info request - it will be reset immediately
// after a new token is fetched.
oldHandler = server.toplevelHandlers.info
server.toplevelHandlers.info = handleReassign;
do_check_false(Service.isLoggedIn, "not already logged in");
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, SYNC_SUCCEEDED, "sync succeeded");
// sync was successful - check we grabbed a new token.
do_check_true(sawTokenFetch, "a new token was fetched by this test.")
// and we are done.
- Service.startOver();
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- yield deferred.promise;
+ yield Service.startOver();
+ yield server.promiseStop();
});
// This test ends up being a failing meta/global fetch *after we're already logged in*.
add_task(function test_momentary_401_storage_loggedin() {
_("Test a failure for any storage URL after login that's resolved by" +
"reassignment.");
let server = yield prepareServer();
_("First sync to prepare server contents.");
- Service.sync();
+ yield Service.sync();
_("Arrange for meta/global to return a 401.");
let oldHandler = server.toplevelHandlers.storage;
server.toplevelHandlers.storage = handleReassign;
function undo() {
_("Undoing test changes.");
server.toplevelHandlers.storage = oldHandler;
@@ -313,9 +317,8 @@ add_task(function test_momentary_401_sto
do_check_false(Service.isLoggedIn, "already logged in");
yield syncAndExpectNodeReassignment(server,
"weave:service:login:error",
undo,
"weave:service:sync:finish",
Service.storageURL + "meta/global");
});
-
--- a/services/sync/tests/unit/test_fxa_service_cluster.js
+++ b/services/sync/tests/unit/test_fxa_service_cluster.js
@@ -15,32 +15,30 @@ add_task(function test_findCluster() {
headers: [],
body: "",
});
yield Service.identity.initializeWithCurrentIdentity();
yield Assert.rejects(Service.identity.whenReadyToAuthenticate.promise,
"should reject due to 500");
- Assert.throws(function() {
- Service._clusterManager._findCluster();
- });
+ yield Assert.rejects(Service._clusterManager._findCluster())
_("_findCluster() returns null on authentication errors.");
initializeIdentityWithTokenServerResponse({
status: 401,
headers: {"content-type": "application/json"},
body: "{}",
});
yield Service.identity.initializeWithCurrentIdentity();
yield Assert.rejects(Service.identity.whenReadyToAuthenticate.promise,
"should reject due to 401");
- cluster = Service._clusterManager._findCluster();
+ cluster = yield Service._clusterManager._findCluster();
Assert.strictEqual(cluster, null);
_("_findCluster() works with correct tokenserver response.");
let endpoint = "http://example.com/something";
initializeIdentityWithTokenServerResponse({
status: 200,
headers: {"content-type": "application/json"},
body:
@@ -50,17 +48,17 @@ add_task(function test_findCluster() {
id: "id",
key: "key",
uid: "uid",
})
});
yield Service.identity.initializeWithCurrentIdentity();
yield Service.identity.whenReadyToAuthenticate.promise;
- cluster = Service._clusterManager._findCluster();
+ cluster = yield Service._clusterManager._findCluster();
// The cluster manager ensures a trailing "/"
Assert.strictEqual(cluster, endpoint + "/");
Svc.Prefs.resetBranch("");
});
function run_test() {
initTestLogging();
--- a/services/sync/tests/unit/test_fxa_startOver.js
+++ b/services/sync/tests/unit/test_fxa_startOver.js
@@ -26,30 +26,30 @@ add_task(function* test_startover() {
// we expect the "legacy" provider (but can't instanceof that, as BrowserIDManager
// extends it)
do_check_false(Service.identity instanceof BrowserIDManager);
Service.serverURL = "https://localhost/";
Service.clusterURL = Service.serverURL;
- Service.login();
+ yield Service.login();
// We should have a cluster URL
do_check_true(Service.clusterURL.length > 0);
// remember some stuff so we can reset it after.
let oldIdentity = Service.identity;
let oldClusterManager = Service._clusterManager;
let deferred = Promise.defer();
Services.obs.addObserver(function observeStartOverFinished() {
Services.obs.removeObserver(observeStartOverFinished, "weave:service:start-over:finish");
deferred.resolve();
}, "weave:service:start-over:finish", false);
- Service.startOver();
+ yield Service.startOver();
yield deferred.promise; // wait for the observer to fire.
// the xpcom service should indicate FxA is enabled.
do_check_true(xps.fxAccountsEnabled);
// should have swapped identities.
do_check_true(Service.identity instanceof BrowserIDManager);
// should have clobbered the cluster URL
do_check_eq(Service.clusterURL, "");
--- a/services/sync/tests/unit/test_history_engine.js
+++ b/services/sync/tests/unit/test_history_engine.js
@@ -7,28 +7,30 @@ Cu.import("resource://services-sync/engi
Cu.import("resource://services-sync/identity.js");
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
Service.engineManager.clear();
-add_test(function test_setup() {
- PlacesTestUtils.clearHistory().then(run_next_test);
+add_task(function* test_setup() {
+ yield PlacesTestUtils.clearHistory();
});
-add_test(function test_processIncoming_mobile_history_batched() {
+add_task(function* test_processIncoming_mobile_history_batched() {
_("SyncEngine._processIncoming works on history engine.");
let FAKE_DOWNLOAD_LIMIT = 100;
Svc.Prefs.set("client.type", "mobile");
Service.engineManager.register(HistoryEngine);
+ Log.repository.getLogger("Sync.Engine.History").level = Log.Level.Trace;
+
// A collection that logs each GET
let collection = new ServerCollection();
collection.get_log = [];
collection._get = collection.get;
collection.get = function (options) {
this.get_log.push(options);
return this._get(options);
};
@@ -62,40 +64,40 @@ add_test(function test_processIncoming_m
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {history: {version: engine.version,
syncID: engine.syncID}};
try {
_("On a mobile client, we get new records from the server in batches of 50.");
- engine._syncStartup();
+ yield engine._syncStartup();
// Fake a lower limit.
engine.downloadLimit = FAKE_DOWNLOAD_LIMIT;
_("Last modified: " + engine.lastModified);
_("Processing...");
- engine._processIncoming();
+ yield engine._processIncoming();
_("Last modified: " + engine.lastModified);
- engine._syncFinish();
+ yield engine._syncFinish();
// Back to the normal limit.
_("Running again. Should fetch none, because of lastModified");
engine.downloadLimit = MAX_HISTORY_DOWNLOAD;
_("Processing...");
- engine._processIncoming();
+ yield engine._processIncoming();
_("Last modified: " + engine.lastModified);
_("Running again. Expecting to pull everything");
engine.lastModified = undefined;
engine.lastSync = 0;
_("Processing...");
- engine._processIncoming();
+ yield engine._processIncoming();
_("Last modified: " + engine.lastModified);
// Verify that the right number of GET requests with the right
// kind of parameters were made.
do_check_eq(collection.get_log.length,
// First try:
1 + // First 50...
@@ -127,21 +129,23 @@ add_test(function test_processIncoming_m
do_check_eq(collection.get_log[j].limit, undefined);
if (i < Math.floor((234 - 50) / MOBILE_BATCH_SIZE))
do_check_eq(collection.get_log[j].ids.length, MOBILE_BATCH_SIZE);
else
do_check_eq(collection.get_log[j].ids.length, 234 % MOBILE_BATCH_SIZE);
}
} finally {
- PlacesTestUtils.clearHistory().then(() => {
- server.stop(do_test_finished);
- Svc.Prefs.resetBranch("");
- Service.recordManager.clearCache();
- });
+ yield PlacesTestUtils.clearHistory();
+ yield promiseStopServer(server);
+ Svc.Prefs.resetBranch("");
+ Service.recordManager.clearCache();
}
});
function run_test() {
generateNewKeys(Service.collectionKeys);
+ initTestLogging("Trace");
+ Log.repository.getLogger("Sync.EngineManager").level = Log.Level.Trace;
+
run_next_test();
}
--- a/services/sync/tests/unit/test_history_store.js
+++ b/services/sync/tests/unit/test_history_store.js
@@ -29,214 +29,219 @@ function queryHistoryVisits(uri) {
let options = PlacesUtils.history.getNewQueryOptions();
options.queryType = Ci.nsINavHistoryQueryOptions.QUERY_TYPE_HISTORY;
options.resultType = Ci.nsINavHistoryQueryOptions.RESULTS_AS_VISIT;
options.sortingMode = Ci.nsINavHistoryQueryOptions.SORT_BY_DATE_ASCENDING;
return queryPlaces(uri, options);
}
function onNextTitleChanged(callback) {
- PlacesUtils.history.addObserver({
- onBeginUpdateBatch: function onBeginUpdateBatch() {},
- onEndUpdateBatch: function onEndUpdateBatch() {},
- onPageChanged: function onPageChanged() {},
- onTitleChanged: function onTitleChanged() {
- PlacesUtils.history.removeObserver(this);
- Utils.nextTick(callback);
- },
- onVisit: function onVisit() {},
- onDeleteVisits: function onDeleteVisits() {},
- onPageExpired: function onPageExpired() {},
- onDeleteURI: function onDeleteURI() {},
- onClearHistory: function onClearHistory() {},
- QueryInterface: XPCOMUtils.generateQI([
- Ci.nsINavHistoryObserver,
- Ci.nsINavHistoryObserver_MOZILLA_1_9_1_ADDITIONS,
- Ci.nsISupportsWeakReference
- ])
- }, true);
+ return new Promise(resolve => {
+ PlacesUtils.history.addObserver({
+ onBeginUpdateBatch: function onBeginUpdateBatch() {},
+ onEndUpdateBatch: function onEndUpdateBatch() {},
+ onPageChanged: function onPageChanged() {},
+ onTitleChanged: function onTitleChanged() {
+ PlacesUtils.history.removeObserver(this);
+ Utils.nextTick(function() {
+ Task.spawn(callback).then(resolve);
+ });
+ },
+ onVisit: function onVisit() {},
+ onDeleteVisits: function onDeleteVisits() {},
+ onPageExpired: function onPageExpired() {},
+ onDeleteURI: function onDeleteURI() {},
+ onClearHistory: function onClearHistory() {},
+ QueryInterface: XPCOMUtils.generateQI([
+ Ci.nsINavHistoryObserver,
+ Ci.nsINavHistoryObserver_MOZILLA_1_9_1_ADDITIONS,
+ Ci.nsISupportsWeakReference
+ ])
+ }, true);
+
+ });
}
// Ensure exceptions from inside callbacks leads to test failures while
// we still clean up properly.
function ensureThrows(func) {
return function() {
try {
func.apply(this, arguments);
} catch (ex) {
PlacesTestUtils.clearHistory();
do_throw(ex);
}
};
}
var store = new HistoryEngine(Service)._store;
-function applyEnsureNoFailures(records) {
- do_check_eq(store.applyIncomingBatch(records).length, 0);
+function* applyEnsureNoFailures(records) {
+ do_check_eq((yield store.applyIncomingBatch(records)).length, 0);
}
var fxuri, fxguid, tburi, tbguid;
function run_test() {
initTestLogging("Trace");
run_next_test();
}
-add_test(function test_store() {
+add_task(function* test_store() {
_("Verify that we've got an empty store to work with.");
- do_check_empty(store.getAllIDs());
+ do_check_empty(yield store.getAllIDs());
_("Let's create an entry in the database.");
fxuri = Utils.makeURI("http://getfirefox.com/");
let place = {
uri: fxuri,
title: "Get Firefox!",
visits: [{
visitDate: TIMESTAMP1,
transitionType: Ci.nsINavHistoryService.TRANSITION_LINK
}]
};
- PlacesUtils.asyncHistory.updatePlaces(place, {
- handleError: function handleError() {
- do_throw("Unexpected error in adding visit.");
- },
- handleResult: function handleResult() {},
- handleCompletion: onVisitAdded
+ yield new Promise(resolve => {
+ PlacesUtils.asyncHistory.updatePlaces(place, {
+ handleError: function handleError() {
+ do_throw("Unexpected error in adding visit.");
+ },
+ handleResult: function handleResult() {},
+ handleCompletion: resolve
+ });
});
- function onVisitAdded() {
- _("Verify that the entry exists.");
- let ids = Object.keys(store.getAllIDs());
- do_check_eq(ids.length, 1);
- fxguid = ids[0];
- do_check_true(store.itemExists(fxguid));
+ _("Verify that the entry exists.");
+ let ids = Object.keys(yield store.getAllIDs());
+ do_check_eq(ids.length, 1);
+ fxguid = ids[0];
+ do_check_true(yield store.itemExists(fxguid));
- _("If we query a non-existent record, it's marked as deleted.");
- let record = store.createRecord("non-existent");
- do_check_true(record.deleted);
+ _("If we query a non-existent record, it's marked as deleted.");
+ let record = yield store.createRecord("non-existent");
+ do_check_true(record.deleted);
- _("Verify createRecord() returns a complete record.");
- record = store.createRecord(fxguid);
- do_check_eq(record.histUri, fxuri.spec);
- do_check_eq(record.title, "Get Firefox!");
- do_check_eq(record.visits.length, 1);
- do_check_eq(record.visits[0].date, TIMESTAMP1);
- do_check_eq(record.visits[0].type, Ci.nsINavHistoryService.TRANSITION_LINK);
+ _("Verify createRecord() returns a complete record.");
+ record = yield store.createRecord(fxguid);
+ do_check_eq(record.histUri, fxuri.spec);
+ do_check_eq(record.title, "Get Firefox!");
+ do_check_eq(record.visits.length, 1);
+ do_check_eq(record.visits[0].date, TIMESTAMP1);
+ do_check_eq(record.visits[0].type, Ci.nsINavHistoryService.TRANSITION_LINK);
- _("Let's modify the record and have the store update the database.");
- let secondvisit = {date: TIMESTAMP2,
- type: Ci.nsINavHistoryService.TRANSITION_TYPED};
- onNextTitleChanged(ensureThrows(function() {
- let queryres = queryHistoryVisits(fxuri);
- do_check_eq(queryres.length, 2);
- do_check_eq(queryres[0].time, TIMESTAMP1);
- do_check_eq(queryres[0].title, "Hol Dir Firefox!");
- do_check_eq(queryres[1].time, TIMESTAMP2);
- do_check_eq(queryres[1].title, "Hol Dir Firefox!");
- run_next_test();
- }));
- applyEnsureNoFailures([
- {id: fxguid,
- histUri: record.histUri,
- title: "Hol Dir Firefox!",
- visits: [record.visits[0], secondvisit]}
- ]);
- }
+ _("Let's modify the record and have the store update the database.");
+ let secondvisit = {date: TIMESTAMP2,
+ type: Ci.nsINavHistoryService.TRANSITION_TYPED};
+ let promiseDone = onNextTitleChanged(ensureThrows(function() {
+ let queryres = queryHistoryVisits(fxuri);
+ do_check_eq(queryres.length, 2);
+ do_check_eq(queryres[0].time, TIMESTAMP1);
+ do_check_eq(queryres[0].title, "Hol Dir Firefox!");
+ do_check_eq(queryres[1].time, TIMESTAMP2);
+ do_check_eq(queryres[1].title, "Hol Dir Firefox!");
+ }));
+
+ yield applyEnsureNoFailures([
+ {id: fxguid,
+ histUri: record.histUri,
+ title: "Hol Dir Firefox!",
+ visits: [record.visits[0], secondvisit]}
+ ]);
+ yield promiseDone;
});
-add_test(function test_store_create() {
+add_task(function* test_store_create() {
_("Create a brand new record through the store.");
tbguid = Utils.makeGUID();
tburi = Utils.makeURI("http://getthunderbird.com");
- onNextTitleChanged(ensureThrows(function() {
- do_check_attribute_count(store.getAllIDs(), 2);
+ let promiseDone = onNextTitleChanged(ensureThrows(function() {
+ do_check_attribute_count((yield store.getAllIDs()), 2);
let queryres = queryHistoryVisits(tburi);
do_check_eq(queryres.length, 1);
do_check_eq(queryres[0].time, TIMESTAMP3);
do_check_eq(queryres[0].title, "The bird is the word!");
- run_next_test();
}));
- applyEnsureNoFailures([
+ yield applyEnsureNoFailures([
{id: tbguid,
histUri: tburi.spec,
title: "The bird is the word!",
visits: [{date: TIMESTAMP3,
type: Ci.nsINavHistoryService.TRANSITION_TYPED}]}
]);
+ yield promiseDone;
});
-add_test(function test_null_title() {
+add_task(function* test_null_title() {
_("Make sure we handle a null title gracefully (it can happen in some cases, e.g. for resource:// URLs)");
let resguid = Utils.makeGUID();
let resuri = Utils.makeURI("unknown://title");
- applyEnsureNoFailures([
+ yield applyEnsureNoFailures([
{id: resguid,
histUri: resuri.spec,
title: null,
visits: [{date: TIMESTAMP3,
type: Ci.nsINavHistoryService.TRANSITION_TYPED}]}
]);
- do_check_attribute_count(store.getAllIDs(), 3);
+ do_check_attribute_count((yield store.getAllIDs()), 3);
let queryres = queryHistoryVisits(resuri);
do_check_eq(queryres.length, 1);
do_check_eq(queryres[0].time, TIMESTAMP3);
- run_next_test();
});
-add_test(function test_invalid_records() {
+add_task(function* test_invalid_records() {
_("Make sure we handle invalid URLs in places databases gracefully.");
let connection = PlacesUtils.history
.QueryInterface(Ci.nsPIPlacesDatabase)
.DBConnection;
let stmt = connection.createAsyncStatement(
"INSERT INTO moz_places "
+ "(url, title, rev_host, visit_count, last_visit_date) "
+ "VALUES ('invalid-uri', 'Invalid URI', '.', 1, " + TIMESTAMP3 + ")"
);
- Async.querySpinningly(stmt);
+ yield Async.promiseQuery(stmt);
stmt.finalize();
// Add the corresponding visit to retain database coherence.
stmt = connection.createAsyncStatement(
"INSERT INTO moz_historyvisits "
+ "(place_id, visit_date, visit_type, session) "
+ "VALUES ((SELECT id FROM moz_places WHERE url = 'invalid-uri'), "
+ TIMESTAMP3 + ", " + Ci.nsINavHistoryService.TRANSITION_TYPED + ", 1)"
);
- Async.querySpinningly(stmt);
+ yield Async.promiseQuery(stmt);
stmt.finalize();
- do_check_attribute_count(store.getAllIDs(), 4);
+ do_check_attribute_count((yield store.getAllIDs()), 4);
_("Make sure we report records with invalid URIs.");
let invalid_uri_guid = Utils.makeGUID();
- let failed = store.applyIncomingBatch([{
+ let failed = yield store.applyIncomingBatch([{
id: invalid_uri_guid,
histUri: ":::::::::::::::",
title: "Doesn't have a valid URI",
visits: [{date: TIMESTAMP3,
type: Ci.nsINavHistoryService.TRANSITION_EMBED}]}
]);
do_check_eq(failed.length, 1);
do_check_eq(failed[0], invalid_uri_guid);
_("Make sure we handle records with invalid GUIDs gracefully (ignore).");
- applyEnsureNoFailures([
+ yield applyEnsureNoFailures([
{id: "invalid",
histUri: "http://invalid.guid/",
title: "Doesn't have a valid GUID",
visits: [{date: TIMESTAMP3,
type: Ci.nsINavHistoryService.TRANSITION_EMBED}]}
]);
_("Make sure we handle records with invalid visit codes or visit dates, gracefully ignoring those visits.");
let no_date_visit_guid = Utils.makeGUID();
let no_type_visit_guid = Utils.makeGUID();
let invalid_type_visit_guid = Utils.makeGUID();
let non_integer_visit_guid = Utils.makeGUID();
- failed = store.applyIncomingBatch([
+ failed = yield store.applyIncomingBatch([
{id: no_date_visit_guid,
histUri: "http://no.date.visit/",
title: "Visit has no date",
visits: [{type: Ci.nsINavHistoryService.TRANSITION_EMBED}]},
{id: no_type_visit_guid,
histUri: "http://no.type.visit/",
title: "Visit has no type",
visits: [{date: TIMESTAMP3}]},
@@ -249,49 +254,47 @@ add_test(function test_invalid_records()
histUri: "http://non.integer.visit/",
title: "Visit has non-integer date",
visits: [{date: 1234.567,
type: Ci.nsINavHistoryService.TRANSITION_EMBED}]}
]);
do_check_eq(failed.length, 0);
_("Make sure we handle records with javascript: URLs gracefully.");
- applyEnsureNoFailures([
+ yield applyEnsureNoFailures([
{id: Utils.makeGUID(),
histUri: "javascript:''",
title: "javascript:''",
visits: [{date: TIMESTAMP3,
type: Ci.nsINavHistoryService.TRANSITION_EMBED}]}
]);
_("Make sure we handle records without any visits gracefully.");
- applyEnsureNoFailures([
+ yield applyEnsureNoFailures([
{id: Utils.makeGUID(),
histUri: "http://getfirebug.com",
title: "Get Firebug!",
visits: []}
]);
- run_next_test();
});
-add_test(function test_remove() {
+add_task(function* test_remove() {
_("Remove an existent record and a non-existent from the store.");
- applyEnsureNoFailures([{id: fxguid, deleted: true},
- {id: Utils.makeGUID(), deleted: true}]);
- do_check_false(store.itemExists(fxguid));
+ yield applyEnsureNoFailures([{id: fxguid, deleted: true},
+ {id: Utils.makeGUID(), deleted: true}]);
+ do_check_false(yield store.itemExists(fxguid));
let queryres = queryHistoryVisits(fxuri);
do_check_eq(queryres.length, 0);
_("Make sure wipe works.");
- store.wipe();
- do_check_empty(store.getAllIDs());
+ yield store.wipe();
+ do_check_empty(yield store.getAllIDs());
queryres = queryHistoryVisits(fxuri);
do_check_eq(queryres.length, 0);
queryres = queryHistoryVisits(tburi);
do_check_eq(queryres.length, 0);
- run_next_test();
});
-add_test(function cleanup() {
+add_task(function* cleanup() {
_("Clean up.");
- PlacesTestUtils.clearHistory().then(run_next_test);
+ yield PlacesTestUtils.clearHistory();
});
--- a/services/sync/tests/unit/test_history_tracker.js
+++ b/services/sync/tests/unit/test_history_tracker.js
@@ -24,180 +24,192 @@ Service.engineManager.clear();
Service.engineManager.register(HistoryEngine);
var engine = Service.engineManager.get("history");
var tracker = engine._tracker;
// Don't write out by default.
tracker.persistChangedIDs = false;
var _counter = 0;
-function addVisit() {
+function* addVisit() {
let uriString = "http://getfirefox.com/" + _counter++;
let uri = Utils.makeURI(uriString);
_("Adding visit for URI " + uriString);
let place = {
uri: uri,
visits: [ {
visitDate: Date.now() * 1000,
transitionType: PlacesUtils.history.TRANSITION_LINK
} ]
};
- let cb = Async.makeSpinningCallback();
- PlacesUtils.asyncHistory.updatePlaces(place, {
- handleError: function () {
- _("Error adding visit for " + uriString);
- cb(new Error("Error adding history entry"));
- },
+ yield new Promise((resolve, reject) => {
+ PlacesUtils.asyncHistory.updatePlaces(place, {
+ handleError: function () {
+ _("Error adding visit for " + uriString);
+ reject(new Error("Error adding history entry"));
+ },
- handleResult: function () {
- },
+ handleResult: function () {
+ },
- handleCompletion: function () {
- _("Added visit for " + uriString);
- cb();
- }
+ handleCompletion: function () {
+ _("Added visit for " + uriString);
+ resolve();
+ }
+ });
});
+ return uri;
+}
- // Spin the event loop to embed this async call in a sync API.
- cb.wait();
- return uri;
+function nextTick() {
+ return new Promise(resolve => {
+ Utils.nextTick(resolve);
+ });
}
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Tracker.History").level = Log.Level.Trace;
run_next_test();
}
-add_test(function test_empty() {
+add_task(function* test_empty() {
_("Verify we've got an empty, disabled tracker to work with.");
do_check_empty(tracker.changedIDs);
do_check_eq(tracker.score, 0);
do_check_false(tracker._isTracking);
- run_next_test();
+});
+
+add_task(function* test_not_tracking(next) {
+ _("Create history item. Won't show because we haven't started tracking yet");
+ yield addVisit();
+ yield nextTick();
+ do_check_empty(tracker.changedIDs);
+ do_check_eq(tracker.score, 0);
});
-add_test(function test_not_tracking(next) {
- _("Create history item. Won't show because we haven't started tracking yet");
- addVisit();
- Utils.nextTick(function() {
- do_check_empty(tracker.changedIDs);
- do_check_eq(tracker.score, 0);
- run_next_test();
+add_task(function* test_start_tracking() {
+ _("Add hook for save completion.");
+ let promiseChangesSaved = new Promise(resolve => {
+ tracker.persistChangedIDs = true;
+ tracker.onSavedChangedIDs = function () {
+ _("changedIDs written to disk. Proceeding.");
+ // Turn this back off.
+ tracker.persistChangedIDs = false;
+ delete tracker.onSavedChangedIDs;
+ resolve();
+ };
});
-});
-add_test(function test_start_tracking() {
- _("Add hook for save completion.");
- tracker.persistChangedIDs = true;
- tracker.onSavedChangedIDs = function () {
- _("changedIDs written to disk. Proceeding.");
- // Turn this back off.
- tracker.persistChangedIDs = false;
- delete tracker.onSavedChangedIDs;
- run_next_test();
- };
-
+ let sawUpdated = false;
_("Tell the tracker to start tracking changes.");
onScoreUpdated(function() {
_("Score updated in test_start_tracking.");
do_check_attribute_count(tracker.changedIDs, 1);
do_check_eq(tracker.score, SCORE_INCREMENT_SMALL);
+ sawUpdated = true;
});
Svc.Obs.notify("weave:engine:start-tracking");
- addVisit();
+ yield addVisit();
+ yield promiseChangesSaved;
+ do_check_true(sawUpdated);
});
-add_test(function test_start_tracking_twice() {
+add_task(function* test_start_tracking_twice() {
_("Verifying preconditions from test_start_tracking.");
do_check_attribute_count(tracker.changedIDs, 1);
do_check_eq(tracker.score, SCORE_INCREMENT_SMALL);
_("Notifying twice won't do any harm.");
- onScoreUpdated(function() {
- _("Score updated in test_start_tracking_twice.");
- do_check_attribute_count(tracker.changedIDs, 2);
- do_check_eq(tracker.score, 2 * SCORE_INCREMENT_SMALL);
- run_next_test();
+ let promiseScoreUpdated = new Promise(resolve => {
+ onScoreUpdated(function() {
+ _("Score updated in test_start_tracking_twice.");
+ do_check_attribute_count(tracker.changedIDs, 2);
+ do_check_eq(tracker.score, 2 * SCORE_INCREMENT_SMALL);
+ resolve();
+ });
});
Svc.Obs.notify("weave:engine:start-tracking");
- addVisit();
+ yield addVisit();
+ yield promiseScoreUpdated;
});
-add_test(function test_track_delete() {
+add_task(function* test_track_delete() {
_("Deletions are tracked.");
// This isn't present because we weren't tracking when it was visited.
let uri = Utils.makeURI("http://getfirefox.com/0");
- let guid = engine._store.GUIDForUri(uri);
+ let guid = yield engine._store.GUIDForUri(uri);
do_check_false(guid in tracker.changedIDs);
- onScoreUpdated(function() {
- do_check_true(guid in tracker.changedIDs);
- do_check_attribute_count(tracker.changedIDs, 3);
- do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE + 2 * SCORE_INCREMENT_SMALL);
- run_next_test();
+ let promiseScoreUpdated = new Promise(resolve => {
+ onScoreUpdated(function() {
+ do_check_true(guid in tracker.changedIDs);
+ do_check_attribute_count(tracker.changedIDs, 3);
+ do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE + 2 * SCORE_INCREMENT_SMALL);
+ resolve();
+ });
});
do_check_eq(tracker.score, 2 * SCORE_INCREMENT_SMALL);
PlacesUtils.history.removePage(uri);
+ yield promiseScoreUpdated;
});
-add_test(function test_dont_track_expiration() {
+add_task(function* test_dont_track_expiration() {
_("Expirations are not tracked.");
- let uriToExpire = addVisit();
- let guidToExpire = engine._store.GUIDForUri(uriToExpire);
- let uriToRemove = addVisit();
- let guidToRemove = engine._store.GUIDForUri(uriToRemove);
+ let uriToExpire = yield addVisit();
+ let guidToExpire = yield engine._store.GUIDForUri(uriToExpire);
+ let uriToRemove = yield addVisit();
+ let guidToRemove = yield engine._store.GUIDForUri(uriToRemove);
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
do_check_false(guidToExpire in tracker.changedIDs);
do_check_false(guidToRemove in tracker.changedIDs);
- onScoreUpdated(function() {
- do_check_false(guidToExpire in tracker.changedIDs);
- do_check_true(guidToRemove in tracker.changedIDs);
- do_check_attribute_count(tracker.changedIDs, 1);
- run_next_test();
+ let promiseScoreUpdated = new Promise(resolve => {
+ onScoreUpdated(function() {
+ do_check_false(guidToExpire in tracker.changedIDs);
+ do_check_true(guidToRemove in tracker.changedIDs);
+ do_check_attribute_count(tracker.changedIDs, 1);
+ resolve();
+ });
});
// Observe expiration.
Services.obs.addObserver(function onExpiration(aSubject, aTopic, aData) {
Services.obs.removeObserver(onExpiration, aTopic);
// Remove the remaining page to update its score.
PlacesUtils.history.removePage(uriToRemove);
}, PlacesUtils.TOPIC_EXPIRATION_FINISHED, false);
// Force expiration of 1 entry.
Services.prefs.setIntPref("places.history.expiration.max_pages", 0);
Cc["@mozilla.org/places/expiration;1"]
.getService(Ci.nsIObserver)
.observe(null, "places-debug-start-expiration", 1);
+ yield promiseScoreUpdated;
});
-add_test(function test_stop_tracking() {
+add_task(function* test_stop_tracking() {
_("Let's stop tracking again.");
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
Svc.Obs.notify("weave:engine:stop-tracking");
- addVisit();
- Utils.nextTick(function() {
- do_check_empty(tracker.changedIDs);
- run_next_test();
- });
+ yield addVisit();
+ yield nextTick();
+ do_check_empty(tracker.changedIDs);
});
-add_test(function test_stop_tracking_twice() {
+add_task(function* test_stop_tracking_twice() {
_("Notifying twice won't do any harm.");
Svc.Obs.notify("weave:engine:stop-tracking");
- addVisit();
- Utils.nextTick(function() {
- do_check_empty(tracker.changedIDs);
- run_next_test();
- });
+ yield addVisit();
+ yield nextTick();
+ do_check_empty(tracker.changedIDs);
});
-add_test(function cleanup() {
+add_task(function* cleanup() {
_("Clean up.");
- PlacesTestUtils.clearHistory().then(run_next_test);
+ yield PlacesTestUtils.clearHistory();
});
--- a/services/sync/tests/unit/test_hmac_error.js
+++ b/services/sync/tests/unit/test_hmac_error.js
@@ -1,16 +1,17 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/rotaryengine.js");
Cu.import("resource://testing-common/services/sync/utils.js");
+Cu.import("resource://gre/modules/PromiseUtils.jsm");
// Track HMAC error counts.
var hmacErrorCount = 0;
(function () {
let hHE = Service.handleHMACEvent;
Service.handleHMACEvent = function () {
hmacErrorCount++;
return hHE.call(Service);
@@ -44,17 +45,17 @@ function shared_setup() {
let global = new ServerWBO("global", {engines: engines});
let keysWBO = new ServerWBO("keys");
let rotaryColl = new ServerCollection({}, true);
let clientsColl = new ServerCollection({}, true);
return [engine, rotaryColl, clientsColl, keysWBO, global];
}
-add_test(function hmac_error_during_404() {
+add_task(function* hmac_error_during_404() {
_("Attempt to replicate the HMAC error setup.");
let [engine, rotaryColl, clientsColl, keysWBO, global] = shared_setup();
// Hand out 404s for crypto/keys.
let keysHandler = keysWBO.handler();
let key404Counter = 0;
let keys404Handler = function (request, response) {
if (key404Counter > 0) {
@@ -78,35 +79,35 @@ add_test(function hmac_error_during_404(
"/1.1/foo/storage/rotary": upd("rotary", rotaryColl.handler())
};
let server = sync_httpd_setup(handlers);
Service.serverURL = server.baseURI;
try {
_("Syncing.");
- Service.sync();
+ yield Service.sync();
_("Partially resetting client, as if after a restart, and forcing redownload.");
Service.collectionKeys.clear();
engine.lastSync = 0; // So that we redownload records.
key404Counter = 1;
_("---------------------------");
- Service.sync();
+ yield Service.sync();
_("---------------------------");
// Two rotary items, one client record... no errors.
do_check_eq(hmacErrorCount, 0)
} finally {
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
});
-add_test(function hmac_error_during_node_reassignment() {
+add_task(function* hmac_error_during_node_reassignment() {
_("Attempt to replicate an HMAC error during node reassignment.");
let [engine, rotaryColl, clientsColl, keysWBO, global] = shared_setup();
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
// We'll provide a 401 mid-way through the sync. This function
// simulates shifting to a node which has no data.
@@ -181,30 +182,31 @@ add_test(function hmac_error_during_node
}
};
Svc.Obs.add("weave:service:sync:finish", obs);
Svc.Obs.add("weave:service:sync:error", obs);
// This kicks off the actual test. Split into a function here to allow this
// source file to broadly follow actual execution order.
- function onwards() {
+ function* onwards() {
_("== Invoking first sync.");
- Service.sync();
+ yield Service.sync();
_("We should not simultaneously have data but no keys on the server.");
let hasData = rotaryColl.wbo("flying") ||
rotaryColl.wbo("scotsman");
let hasKeys = keysWBO.modified;
_("We correctly handle 401s by aborting the sync and starting again.");
do_check_true(!hasData == !hasKeys);
_("Be prepared for the second (automatic) sync...");
}
+ let deferredFinished = PromiseUtils.defer();
_("Make sure that syncing again causes recovery.");
onSyncFinished = function() {
_("== First sync done.");
_("---------------------------");
onSyncFinished = function() {
_("== Second (automatic) sync done.");
hasData = rotaryColl.wbo("flying") ||
rotaryColl.wbo("scotsman");
@@ -224,24 +226,25 @@ add_test(function hmac_error_during_node
// Two rotary items, one client record... no errors.
do_check_eq(hmacErrorCount, 0)
Svc.Obs.remove("weave:service:sync:finish", obs);
Svc.Obs.remove("weave:service:sync:error", obs);
Svc.Prefs.resetBranch("");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ server.stop(deferredFinished.resolve);
};
Service.sync();
},
this);
};
};
- onwards();
+ yield onwards();
+ yield deferredFinished.promise;
});
function run_test() {
initTestLogging("Trace");
run_next_test();
}
--- a/services/sync/tests/unit/test_interval_triggers.js
+++ b/services/sync/tests/unit/test_interval_triggers.js
@@ -8,22 +8,16 @@ Cu.import("resource://services-sync/util
Cu.import("resource://testing-common/services/sync/utils.js");
Svc.DefaultPrefs.set("registerEngines", "");
Cu.import("resource://services-sync/service.js");
var scheduler = Service.scheduler;
var clientsEngine = Service.clientsEngine;
-function promiseStopServer(server) {
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- return deferred.promise;
-}
-
function sync_httpd_setup() {
let global = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {clients: {version: clientsEngine.version,
syncID: clientsEngine.syncID}}
});
let clientsColl = new ServerCollection({}, true);
@@ -36,17 +30,17 @@ function sync_httpd_setup() {
"/1.1/johndoe/storage/meta/global": upd("meta", global.handler()),
"/1.1/johndoe/info/collections": collectionsHelper.handler,
"/1.1/johndoe/storage/crypto/keys":
upd("crypto", (new ServerWBO("keys")).handler()),
"/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler())
});
}
-function setUp(server) {
+function* setUp(server) {
yield configureIdentity({username: "johndoe"});
Service.serverURL = server.baseURI + "/";
Service.clusterURL = server.baseURI + "/";
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
serverKeys.upload(Service.resource(Service.cryptoKeysURL));
}
@@ -76,91 +70,91 @@ add_identity_test(this, function test_su
do_check_false(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_false(scheduler.hasIncomingItems);
_("Test as long as numClients <= 1 our sync interval is SINGLE_USER.");
// idle == true && numClients <= 1 && hasIncomingItems == false
scheduler.idle = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 1);
do_check_true(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// idle == false && numClients <= 1 && hasIncomingItems == false
scheduler.idle = false;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 2);
do_check_false(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// idle == false && numClients <= 1 && hasIncomingItems == true
scheduler.hasIncomingItems = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 3);
do_check_false(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_true(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// idle == true && numClients <= 1 && hasIncomingItems == true
scheduler.idle = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 4);
do_check_true(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_true(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
_("Test as long as idle && numClients > 1 our sync interval is idleInterval.");
// idle == true && numClients > 1 && hasIncomingItems == true
Service.clientsEngine._store.create({id: "foo", cleartext: "bar"});
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 5);
do_check_true(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_true(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.idleInterval);
// idle == true && numClients > 1 && hasIncomingItems == false
scheduler.hasIncomingItems = false;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 6);
do_check_true(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.idleInterval);
_("Test non-idle, numClients > 1, no incoming items => activeInterval.");
// idle == false && numClients > 1 && hasIncomingItems == false
scheduler.idle = false;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 7);
do_check_false(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
_("Test non-idle, numClients > 1, incoming items => immediateInterval.");
// idle == false && numClients > 1 && hasIncomingItems == true
scheduler.hasIncomingItems = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 8);
do_check_false(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems); //gets reset to false
do_check_eq(scheduler.syncInterval, scheduler.immediateInterval);
Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
- Service.startOver();
+ yield Service.startOver();
yield promiseStopServer(server);
});
add_identity_test(this, function test_unsuccessful_sync_adjustSyncInterval() {
_("Test unsuccessful sync calling adjustSyncInterval");
let syncFailures = 0;
function onSyncError() {
@@ -180,91 +174,91 @@ add_identity_test(this, function test_un
do_check_false(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_false(scheduler.hasIncomingItems);
_("Test as long as numClients <= 1 our sync interval is SINGLE_USER.");
// idle == true && numClients <= 1 && hasIncomingItems == false
scheduler.idle = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 1);
do_check_true(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// idle == false && numClients <= 1 && hasIncomingItems == false
scheduler.idle = false;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 2);
do_check_false(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// idle == false && numClients <= 1 && hasIncomingItems == true
scheduler.hasIncomingItems = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 3);
do_check_false(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_true(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// idle == true && numClients <= 1 && hasIncomingItems == true
scheduler.idle = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 4);
do_check_true(scheduler.idle);
do_check_false(scheduler.numClients > 1);
do_check_true(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
_("Test as long as idle && numClients > 1 our sync interval is idleInterval.");
// idle == true && numClients > 1 && hasIncomingItems == true
Service.clientsEngine._store.create({id: "foo", cleartext: "bar"});
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 5);
do_check_true(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_true(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.idleInterval);
// idle == true && numClients > 1 && hasIncomingItems == false
scheduler.hasIncomingItems = false;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 6);
do_check_true(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.idleInterval);
_("Test non-idle, numClients > 1, no incoming items => activeInterval.");
// idle == false && numClients > 1 && hasIncomingItems == false
scheduler.idle = false;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 7);
do_check_false(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
_("Test non-idle, numClients > 1, incoming items => immediateInterval.");
// idle == false && numClients > 1 && hasIncomingItems == true
scheduler.hasIncomingItems = true;
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 8);
do_check_false(scheduler.idle);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.hasIncomingItems); //gets reset to false
do_check_eq(scheduler.syncInterval, scheduler.immediateInterval);
- Service.startOver();
+ yield Service.startOver();
Svc.Obs.remove("weave:service:sync:error", onSyncError);
yield promiseStopServer(server);
});
add_identity_test(this, function test_back_triggers_sync() {
let server = sync_httpd_setup();
yield setUp(server);
@@ -275,30 +269,30 @@ add_identity_test(this, function test_ba
// Multiple devices: sync is triggered.
clientsEngine._store.create({id: "foo", cleartext: "bar"});
scheduler.updateClientMode();
let deferred = Promise.defer();
Svc.Obs.add("weave:service:sync:finish", function onSyncFinish() {
Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
-
- Service.recordManager.clearCache();
- Svc.Prefs.resetBranch("");
- scheduler.setDefaults();
- clientsEngine.resetClient();
-
- Service.startOver();
- server.stop(deferred.resolve);
+ deferred.resolve();
});
scheduler.idle = true;
scheduler.observe(null, "active", Svc.Prefs.get("scheduler.idleTime"));
do_check_false(scheduler.idle);
yield deferred.promise;
+ Service.recordManager.clearCache();
+ Svc.Prefs.resetBranch("");
+ scheduler.setDefaults();
+ yield clientsEngine.resetClient();
+
+ yield Service.startOver();
+ yield promiseStopServer(server);
});
add_identity_test(this, function test_adjust_interval_on_sync_error() {
let server = sync_httpd_setup();
yield setUp(server);
let syncFailures = 0;
function onSyncError() {
@@ -311,24 +305,24 @@ add_identity_test(this, function test_ad
// Force a sync fail.
Svc.Prefs.set("firstSync", "notReady");
do_check_eq(syncFailures, 0);
do_check_false(scheduler.numClients > 1);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
clientsEngine._store.create({id: "foo", cleartext: "bar"});
- Service.sync();
+ yield Service.sync();
do_check_eq(syncFailures, 1);
do_check_true(scheduler.numClients > 1);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
Svc.Obs.remove("weave:service:sync:error", onSyncError);
- Service.startOver();
+ yield Service.startOver();
yield promiseStopServer(server);
});
add_identity_test(this, function test_bug671378_scenario() {
// Test scenario similar to bug 671378. This bug appeared when a score
// update occurred that wasn't large enough to trigger a sync so
// scheduleNextSync() was called without a time interval parameter,
// setting nextSync to a non-zero value and preventing the timer from
@@ -339,17 +333,17 @@ add_identity_test(this, function test_bu
let syncSuccesses = 0;
function onSyncFinish() {
_("Sync success.");
syncSuccesses++;
};
Svc.Obs.add("weave:service:sync:finish", onSyncFinish);
// After first sync call, syncInterval & syncTimer are singleDeviceInterval.
- Service.sync();
+ yield Service.sync();
do_check_eq(syncSuccesses, 1);
do_check_false(scheduler.numClients > 1);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_eq(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
let deferred = Promise.defer();
// Wrap scheduleNextSync so we are notified when it is finished.
scheduler._scheduleNextSync = scheduler.scheduleNextSync;
@@ -360,18 +354,17 @@ add_identity_test(this, function test_bu
// syncInterval and syncTimer values.
if (syncSuccesses == 2) {
do_check_neq(scheduler.nextSync, 0);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
do_check_true(scheduler.syncTimer.delay <= scheduler.activeInterval);
scheduler.scheduleNextSync = scheduler._scheduleNextSync;
Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
- Service.startOver();
- server.stop(deferred.resolve);
+ deferred.resolve();
}
};
// Set nextSync != 0
// syncInterval still hasn't been set by call to updateClientMode.
// Explicitly trying to invoke scheduleNextSync during a sync
// (to immitate a score update that isn't big enough to trigger a sync).
Svc.Obs.add("weave:service:sync:start", function onSyncStart() {
@@ -382,64 +375,64 @@ add_identity_test(this, function test_bu
scheduler.scheduleNextSync();
do_check_neq(scheduler.nextSync, 0);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_eq(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
});
});
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
- Service.sync();
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield Service.sync();
yield deferred.promise;
+ yield Service.startOver();
+ yield promiseStopServer(server);
});
-add_test(function test_adjust_timer_larger_syncInterval() {
+add_task(function test_adjust_timer_larger_syncInterval() {
_("Test syncInterval > current timout period && nextSync != 0, syncInterval is NOT used.");
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
scheduler.updateClientMode();
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
scheduler.scheduleNextSync();
// Ensure we have a small interval.
do_check_neq(scheduler.nextSync, 0);
do_check_eq(scheduler.syncTimer.delay, scheduler.activeInterval);
// Make interval large again
- clientsEngine._wipeClient();
+ yield clientsEngine._wipeClient();
scheduler.updateClientMode();
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
scheduler.scheduleNextSync();
// Ensure timer delay remains as the small interval.
do_check_neq(scheduler.nextSync, 0);
do_check_true(scheduler.syncTimer.delay <= scheduler.activeInterval);
//SyncSchedule.
- Service.startOver();
- run_next_test();
+ yield Service.startOver();
});
-add_test(function test_adjust_timer_smaller_syncInterval() {
+add_task(function test_adjust_timer_smaller_syncInterval() {
_("Test current timout > syncInterval period && nextSync != 0, syncInterval is used.");
scheduler.scheduleNextSync();
// Ensure we have a large interval.
do_check_neq(scheduler.nextSync, 0);
do_check_eq(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
// Make interval smaller
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
scheduler.updateClientMode();
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
scheduler.scheduleNextSync();
// Ensure smaller timer delay is used.
do_check_neq(scheduler.nextSync, 0);
do_check_true(scheduler.syncTimer.delay <= scheduler.activeInterval);
//SyncSchedule.
- Service.startOver();
- run_next_test();
+ yield Service.startOver();
});
--- a/services/sync/tests/unit/test_node_reassignment.js
+++ b/services/sync/tests/unit/test_node_reassignment.js
@@ -101,17 +101,17 @@ function getReassigned() {
}
/**
* Make a test request to `url`, then watch the result of two syncs
* to ensure that a node request was made.
* Runs `between` between the two. This can be used to undo deliberate failure
* setup, detach observers, etc.
*/
-function syncAndExpectNodeReassignment(server, firstNotification, between,
+function* syncAndExpectNodeReassignment(server, firstNotification, between,
secondNotification, url) {
let deferred = Promise.defer();
function onwards() {
let nodeFetched = false;
function onFirstSync() {
_("First sync completed.");
Svc.Obs.remove(firstNotification, onFirstSync);
Svc.Obs.add(secondNotification, onSecondSync);
@@ -137,31 +137,36 @@ function syncAndExpectNodeReassignment(s
Svc.Obs.remove(secondNotification, onSecondSync);
Service.scheduler.clearSyncTriggers();
// Make absolutely sure that any event listeners are done with their work
// before we proceed.
waitForZeroTimer(function () {
_("Second sync nextTick.");
do_check_true(nodeFetched);
- Service.startOver();
- server.stop(deferred.resolve);
+ Service.startOver().then(
+ () => server.promiseStop()
+ ).then(deferred.resolve);
});
}
Svc.Obs.add(firstNotification, onFirstSync);
- Service.sync();
+ return Service.sync();
}
+ let deferredRest = Promise.defer();
// Make sure that it works!
let request = new RESTRequest(url);
request.get(function () {
do_check_eq(request.response.status, 401);
- Utils.nextTick(onwards);
+ Utils.nextTick(deferredRest.resolve);
});
+
+ yield deferredRest.promise;
+ yield onwards();
yield deferred.promise;
}
add_task(function test_momentary_401_engine() {
_("Test a failure for engine URLs that's resolved by reassignment.");
let server = yield prepareServer();
let john = server.user("johndoe");
@@ -173,17 +178,17 @@ add_task(function test_momentary_401_eng
// through a sync.
let global = {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
rotary: {version: engine.version,
syncID: engine.syncID}}
john.createCollection("meta").insert("global", global);
_("First sync to prepare server contents.");
- Service.sync();
+ yield Service.sync();
_("Setting up Rotary collection to 401.");
let rotary = john.createCollection("rotary");
let oldHandler = rotary.collectionHandler;
rotary.collectionHandler = handleReassign.bind(this, undefined);
// We want to verify that the clusterURL pref has been cleared after a 401
// inside a sync. Flag the Rotary engine to need syncing.
@@ -212,17 +217,17 @@ add_task(function test_momentary_401_eng
});
// This test ends up being a failing fetch *after we're already logged in*.
add_task(function test_momentary_401_info_collections() {
_("Test a failure for info/collections that's resolved by reassignment.");
let server = yield prepareServer();
_("First sync to prepare server contents.");
- Service.sync();
+ yield Service.sync();
// Return a 401 for info requests, particularly info/collections.
let oldHandler = server.toplevelHandlers.info;
server.toplevelHandlers.info = handleReassign;
function undo() {
_("Undoing test changes.");
server.toplevelHandlers.info = oldHandler;
@@ -236,17 +241,17 @@ add_task(function test_momentary_401_inf
});
add_task(function test_momentary_401_storage_loggedin() {
_("Test a failure for any storage URL, not just engine parts. " +
"Resolved by reassignment.");
let server = yield prepareServer();
_("Performing initial sync to ensure we are logged in.")
- Service.sync();
+ yield Service.sync();
// Return a 401 for all storage requests.
let oldHandler = server.toplevelHandlers.storage;
server.toplevelHandlers.storage = handleReassign;
function undo() {
_("Undoing test changes.");
server.toplevelHandlers.storage = oldHandler;
@@ -365,25 +370,26 @@ add_task(function test_loop_avoidance_st
Service.scheduler.clearSyncTriggers();
// Make absolutely sure that any event listeners are done with their work
// before we proceed.
waitForZeroTimer(function () {
_("Third sync nextTick.");
do_check_false(getReassigned());
do_check_true(nodeFetched);
- Service.startOver();
- server.stop(deferred.resolve);
+ Service.startOver().then(
+ () => server.promiseStop()
+ ).then(deferred.resolve)
});
}
Svc.Obs.add(firstNotification, onFirstSync);
now = Date.now();
- Service.sync();
+ yield Service.sync();
yield deferred.promise;
});
add_task(function test_loop_avoidance_engine() {
_("Test that a repeated 401 in an engine doesn't result in a sync loop " +
"if node reassignment cannot resolve the failure.");
let server = yield prepareServer();
let john = server.user("johndoe");
@@ -397,17 +403,17 @@ add_task(function test_loop_avoidance_en
// through a sync.
let global = {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
rotary: {version: engine.version,
syncID: engine.syncID}}
john.createCollection("meta").insert("global", global);
_("First sync to prepare server contents.");
- Service.sync();
+ yield Service.sync();
_("Setting up Rotary collection to 401.");
let rotary = john.createCollection("rotary");
let oldHandler = rotary.collectionHandler;
rotary.collectionHandler = handleReassign.bind(this, undefined);
// Flag the Rotary engine to need syncing.
john.collection("rotary").timestamp += 1000;
@@ -420,18 +426,19 @@ add_task(function test_loop_avoidance_en
function beforeSuccessfulSync() {
_("Undoing test changes.");
rotary.collectionHandler = oldHandler;
}
function afterSuccessfulSync() {
Svc.Obs.remove("weave:service:login:start", onLoginStart);
- Service.startOver();
- server.stop(deferred.resolve);
+ Service.startOver().then(
+ () => server.promiseStop()
+ ).then(deferred.resolve);
}
let firstNotification = "weave:service:sync:finish";
let secondNotification = "weave:service:sync:finish";
let thirdNotification = "weave:service:sync:finish";
let nodeFetched = false;
@@ -512,11 +519,11 @@ add_task(function test_loop_avoidance_en
do_check_true(nodeFetched);
afterSuccessfulSync();
});
}
Svc.Obs.add(firstNotification, onFirstSync);
now = Date.now();
- Service.sync();
+ yield Service.sync();
yield deferred.promise;
});
new file mode 100644
--- /dev/null
+++ b/services/sync/tests/unit/test_password_mpenabled.js
@@ -0,0 +1,137 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/stages/enginesync.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-sync/engines/passwords.js");
+Cu.import("resource://services-sync/service.js");
+Cu.import("resource://testing-common/services/sync/utils.js");
+
+function run_test() {
+ initTestLogging("Trace");
+ run_next_test();
+}
+
+add_task(function test_simple() {
+ ensureLegacyIdentityManager();
+ // Stub fxAccountsEnabled
+ let xpcs = Cc["@mozilla.org/weave/service;1"]
+ .getService(Components.interfaces.nsISupports)
+ .wrappedJSObject;
+ let fxaEnabledGetter = xpcs.__lookupGetter__("fxAccountsEnabled");
+ xpcs.__defineGetter__("fxAccountsEnabled", () => true);
+
+ // Stub mpEnabled.
+ let mpEnabledF = Utils.mpEnabled;
+ let mpEnabled = false;
+ Utils.mpEnabled = function() mpEnabled;
+
+ let manager = Service.engineManager;
+
+ Service.engineManager.register(PasswordEngine);
+ let engine = Service.engineManager.get("passwords");
+ let wipeCount = 0;
+ let engineWipeServerF = engine.wipeServer;
+ engine.wipeServer = function() {
+ ++wipeCount;
+ return Promise.resolve();
+ }
+
+ // A server for the metadata.
+ let server = new SyncServer();
+ let johndoe = server.registerUser("johndoe", "password");
+ johndoe.createContents({
+ meta: {global: {engines: {passwords: {version: engine.version,
+ syncID: engine.syncID}}}},
+ crypto: {},
+ clients: {}
+ });
+ server.start();
+ setBasicCredentials("johndoe", "password", "abcdeabcdeabcdeabcdeabcdea");
+ Service.serverURL = server.baseURI;
+ Service.clusterURL = server.baseURI;
+
+ let engineSync = new EngineSynchronizer(Service);
+ engineSync._log.level = Log.Level.Trace;
+
+ function assertEnabled(expected, message) {
+ Assert.strictEqual(engine.enabled, expected, message);
+ // The preference *must* reflect the actual state.
+ Assert.strictEqual(Svc.Prefs.get("engine." + engine.prefName), expected,
+ message + " (pref should match enabled state)");
+ }
+
+ try {
+ assertEnabled(true, "password engine should be enabled by default")
+ let engineMeta = yield Service.recordManager.get(engine.metaURL);
+ // This engine should be in the meta/global
+ Assert.notStrictEqual(engineMeta.payload.engines[engine.name], undefined,
+ "The engine should appear in the metadata");
+ Assert.ok(!engineMeta.changed, "the metadata for the password engine hasn't changed");
+
+ // (pretend to) enable a master-password
+ mpEnabled = true;
+ // The password engine should be locally disabled...
+ assertEnabled(false, "if mp is locked the engine should be disabled");
+ // ...but not declined.
+ Assert.ok(!manager.isDeclined("passwords"), "password engine is not declined");
+ // Next time a sync would happen, we call _updateEnabledEngines(), which
+ // would remove the engine from the metadata - call that now.
+ yield engineSync._updateEnabledEngines();
+ // The global meta should no longer list the engine.
+ engineMeta = yield Service.recordManager.get(engine.metaURL);
+ Assert.strictEqual(engineMeta.payload.engines[engine.name], undefined,
+ "The engine should have vanished");
+ // And we should have wiped the server data.
+ Assert.strictEqual(wipeCount, 1, "wipeServer should have been called");
+
+ // Now simulate an incoming meta/global indicating the engine should be
+ // enabled. We should fail to actually enable it - the pref should remain
+ // false and we wipe the server for anything another device might have
+ // stored.
+ let meta = {
+ payload: {
+ engines: {
+ "passwords": {"version":1,"syncID":"yfBi2v7PpFO2"},
+ },
+ },
+ };
+ yield engineSync._updateEnabledFromMeta(meta, 3, manager);
+ Assert.strictEqual(wipeCount, 2, "wipeServer should have been called");
+ Assert.ok(!manager.isDeclined("passwords"), "password engine is not declined");
+ assertEnabled(false, "engine still not enabled locally");
+
+ // Let's turn the MP off - but *not* re-enable it locally.
+ mpEnabled = false;
+ // Just disabling the MP isn't enough to force it back to enabled.
+ assertEnabled(false, "engine still not enabled locally");
+ // Another incoming metadata record with the engine enabled should cause
+ // it to be enabled locally.
+ meta = {
+ payload: {
+ engines: {
+ "passwords": 1,
+ },
+ },
+ };
+ yield engineSync._updateEnabledFromMeta(meta, 3, manager);
+ Assert.strictEqual(wipeCount, 2, "wipeServer should *not* have been called again");
+ Assert.ok(!manager.isDeclined("passwords"), "password engine is not declined");
+ // It should be enabled locally.
+ assertEnabled(true, "engine now enabled locally");
+ // Next time a sync starts it should magically re-appear in our meta/global
+ yield engine._syncStartup();
+ engineMeta = yield Service.recordManager.get(engine.metaURL);
+ Assert.equal(engineMeta.payload.engines[engine.name].version, engine.version,
+ "The engine should re-appear in the metadata");
+ } finally {
+ // restore the damage we did above...
+ engine.wipeServer = engineWipeServerF;
+ yield engine._store.wipe();
+ // Un-stub mpEnabled and fxAccountsEnabled
+ Utils.mpEnabled = mpEnabledF;
+ xpcs.__defineGetter__("fxAccountsEnabled", fxaEnabledGetter);
+ yield server.promiseStop();
+ }
+});
--- a/services/sync/tests/unit/test_password_store.js
+++ b/services/sync/tests/unit/test_password_store.js
@@ -1,54 +1,54 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://services-sync/engines/passwords.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
-function checkRecord(name, record, expectedCount, timeCreated,
+function* checkRecord(name, record, expectedCount, timeCreated,
expectedTimeCreated, timePasswordChanged,
expectedTimePasswordChanged, recordIsUpdated) {
let engine = Service.engineManager.get("passwords");
let store = engine._store;
let count = {};
let logins = Services.logins.findLogins(count, record.hostname,
record.formSubmitURL, null);
_("Record" + name + ":" + JSON.stringify(logins));
_("Count" + name + ":" + count.value);
do_check_eq(count.value, expectedCount);
if (expectedCount > 0) {
- do_check_true(!!store.getAllIDs()[record.id]);
+ do_check_true(!!(yield store.getAllIDs())[record.id]);
let stored_record = logins[0].QueryInterface(Ci.nsILoginMetaInfo);
if (timeCreated !== undefined) {
do_check_eq(stored_record.timeCreated, expectedTimeCreated);
}
if (timePasswordChanged !== undefined) {
if (recordIsUpdated) {
do_check_true(stored_record.timePasswordChanged >= expectedTimePasswordChanged);
} else {
do_check_eq(stored_record.timePasswordChanged, expectedTimePasswordChanged);
}
return stored_record.timePasswordChanged;
}
} else {
- do_check_true(!store.getAllIDs()[record.id]);
+ do_check_true(!(yield store.getAllIDs())[record.id]);
}
}
-function changePassword(name, hostname, password, expectedCount, timeCreated,
+function* changePassword(name, hostname, password, expectedCount, timeCreated,
expectedTimeCreated, timePasswordChanged,
expectedTimePasswordChanged, insert, recordIsUpdated) {
const BOGUS_GUID = "zzzzzz" + hostname;
let record = {id: BOGUS_GUID,
hostname: hostname,
formSubmitURL: hostname,
@@ -65,87 +65,92 @@ function changePassword(name, hostname,
record.timePasswordChanged = timePasswordChanged;
}
let engine = Service.engineManager.get("passwords");
let store = engine._store;
if (insert) {
- do_check_eq(store.applyIncomingBatch([record]).length, 0);
+ do_check_eq((yield store.applyIncomingBatch([record])).length, 0);
}
- return checkRecord(name, record, expectedCount, timeCreated,
+ let result = yield checkRecord(name, record, expectedCount, timeCreated,
expectedTimeCreated, timePasswordChanged,
expectedTimePasswordChanged, recordIsUpdated);
-
+ return result;
}
function test_apply_records_with_times(hostname, timeCreated, timePasswordChanged) {
// The following record is going to be inserted in the store and it needs
// to be found there. Then its timestamps are going to be compared to
// the expected values.
changePassword(" ", hostname, "password", 1, timeCreated, timeCreated,
timePasswordChanged, timePasswordChanged, true);
}
-function test_apply_multiple_records_with_times() {
+function* test_apply_multiple_records_with_times() {
// The following records are going to be inserted in the store and they need
// to be found there. Then their timestamps are going to be compared to
// the expected values.
- changePassword("A", "http://foo.a.com", "password", 1, undefined, undefined,
- undefined, undefined, true);
- changePassword("B", "http://foo.b.com", "password", 1, 1000, 1000, undefined,
- undefined, true);
- changePassword("C", "http://foo.c.com", "password", 1, undefined, undefined,
- 1000, 1000, true);
- changePassword("D", "http://foo.d.com", "password", 1, 1000, 1000, 1000,
- 1000, true);
+ yield changePassword("A", "http://foo.a.com", "password", 1, undefined, undefined,
+ undefined, undefined, true);
+ yield changePassword("B", "http://foo.b.com", "password", 1, 1000, 1000, undefined,
+ undefined, true);
+ yield changePassword("C", "http://foo.c.com", "password", 1, undefined, undefined,
+ 1000, 1000, true);
+ yield changePassword("D", "http://foo.d.com", "password", 1, 1000, 1000, 1000,
+ 1000, true);
// The following records are not going to be inserted in the store and they
// are not going to be found there.
- changePassword("NotInStoreA", "http://foo.aaaa.com", "password", 0,
- undefined, undefined, undefined, undefined, false);
- changePassword("NotInStoreB", "http://foo.bbbb.com", "password", 0, 1000,
- 1000, undefined, undefined, false);
- changePassword("NotInStoreC", "http://foo.cccc.com", "password", 0,
- undefined, undefined, 1000, 1000, false);
- changePassword("NotInStoreD", "http://foo.dddd.com", "password", 0, 1000,
- 1000, 1000, 1000, false);
+ yield changePassword("NotInStoreA", "http://foo.aaaa.com", "password", 0,
+ undefined, undefined, undefined, undefined, false);
+ yield changePassword("NotInStoreB", "http://foo.bbbb.com", "password", 0, 1000,
+ 1000, undefined, undefined, false);
+ yield changePassword("NotInStoreC", "http://foo.cccc.com", "password", 0,
+ undefined, undefined, 1000, 1000, false);
+ yield changePassword("NotInStoreD", "http://foo.dddd.com", "password", 0, 1000,
+ 1000, 1000, 1000, false);
}
-function test_apply_same_record_with_different_times() {
+function* test_apply_same_record_with_different_times() {
// The following record is going to be inserted multiple times in the store
// and it needs to be found there. Then its timestamps are going to be
// compared to the expected values.
var timePasswordChanged = 100;
- timePasswordChanged = changePassword("A", "http://a.tn", "password", 1, 100,
- 100, 100, timePasswordChanged, true);
- timePasswordChanged = changePassword("A", "http://a.tn", "password", 1, 100,
- 100, 800, timePasswordChanged, true,
- true);
- timePasswordChanged = changePassword("A", "http://a.tn", "password", 1, 500,
- 100, 800, timePasswordChanged, true,
- true);
- timePasswordChanged = changePassword("A", "http://a.tn", "password2", 1, 500,
- 100, 1536213005222, timePasswordChanged,
- true, true);
- timePasswordChanged = changePassword("A", "http://a.tn", "password2", 1, 500,
- 100, 800, timePasswordChanged, true, true);
+ timePasswordChanged = yield changePassword("A", "http://a.tn", "password", 1, 100,
+ 100, 100, timePasswordChanged, true);
+ timePasswordChanged = yield changePassword("A", "http://a.tn", "password", 1, 100,
+ 100, 800, timePasswordChanged, true,
+ true);
+ timePasswordChanged = yield changePassword("A", "http://a.tn", "password", 1, 500,
+ 100, 800, timePasswordChanged, true,
+ true);
+ timePasswordChanged = yield changePassword("A", "http://a.tn", "password2", 1, 500,
+ 100, 1536213005222, timePasswordChanged,
+ true, true);
+ timePasswordChanged = yield changePassword("A", "http://a.tn", "password2", 1, 500,
+ 100, 800, timePasswordChanged, true, true);
}
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Engine.Passwords").level = Log.Level.Trace;
Log.repository.getLogger("Sync.Store.Passwords").level = Log.Level.Trace;
+ run_next_test();
+}
+
+add_task(function* () {
+
const BOGUS_GUID_A = "zzzzzzzzzzzz";
const BOGUS_GUID_B = "yyyyyyyyyyyy";
let recordA = {id: BOGUS_GUID_A,
hostname: "http://foo.bar.com",
formSubmitURL: "http://foo.bar.com/baz",
httpRealm: "secure",
username: "john",
password: "smith",
@@ -157,18 +162,22 @@ function run_test() {
username: "john",
password: "smith",
usernameField: "username",
passwordField: "password"};
let engine = Service.engineManager.get("passwords");
let store = engine._store;
+ function* applyEnsureNoFailures(records) {
+ do_check_eq((yield store.applyIncomingBatch(records)).length, 0);
+ }
+
try {
- do_check_eq(store.applyIncomingBatch([recordA, recordB]).length, 0);
+ yield applyEnsureNoFailures([recordA, recordB]);
// Only the good record makes it to Services.logins.
let badCount = {};
let goodCount = {};
let badLogins = Services.logins.findLogins(badCount, recordA.hostname,
recordA.formSubmitURL,
recordA.httpRealm);
let goodLogins = Services.logins.findLogins(goodCount, recordB.hostname,
@@ -176,24 +185,24 @@ function run_test() {
_("Bad: " + JSON.stringify(badLogins));
_("Good: " + JSON.stringify(goodLogins));
_("Count: " + badCount.value + ", " + goodCount.value);
do_check_eq(goodCount.value, 1);
do_check_eq(badCount.value, 0);
- do_check_true(!!store.getAllIDs()[BOGUS_GUID_B]);
- do_check_true(!store.getAllIDs()[BOGUS_GUID_A]);
+ do_check_true(!!(yield store.getAllIDs())[BOGUS_GUID_B]);
+ do_check_true(!(yield store.getAllIDs())[BOGUS_GUID_A]);
- test_apply_records_with_times("http://afoo.baz.com", undefined, undefined);
- test_apply_records_with_times("http://bfoo.baz.com", 1000, undefined);
- test_apply_records_with_times("http://cfoo.baz.com", undefined, 2000);
- test_apply_records_with_times("http://dfoo.baz.com", 1000, 2000);
+ yield test_apply_records_with_times("http://afoo.baz.com", undefined, undefined);
+ yield test_apply_records_with_times("http://bfoo.baz.com", 1000, undefined);
+ yield test_apply_records_with_times("http://cfoo.baz.com", undefined, 2000);
+ yield test_apply_records_with_times("http://dfoo.baz.com", 1000, 2000);
- test_apply_multiple_records_with_times();
+ yield test_apply_multiple_records_with_times();
- test_apply_same_record_with_different_times();
+ yield test_apply_same_record_with_different_times();
} finally {
- store.wipe();
+ yield store.wipe();
}
-}
\ No newline at end of file
+});
--- a/services/sync/tests/unit/test_places_guid_downgrade.js
+++ b/services/sync/tests/unit/test_places_guid_downgrade.js
@@ -82,17 +82,17 @@ add_test(function test_initial_state() {
// Check our schema version to make sure it is actually at 10.
do_check_eq(db.schemaVersion, 10);
db.close();
run_next_test();
});
-add_test(function test_history_guids() {
+add_task(function test_history_guids() {
let engine = new HistoryEngine(Service);
let store = engine._store;
let places = [
{
uri: fxuri,
title: "Get Firefox!",
visits: [{
@@ -104,112 +104,111 @@ add_test(function test_history_guids() {
uri: tburi,
title: "Get Thunderbird!",
visits: [{
visitDate: Date.now() * 1000,
transitionType: Ci.nsINavHistoryService.TRANSITION_LINK
}]
}
];
- PlacesUtils.asyncHistory.updatePlaces(places, {
- handleError: function handleError() {
- do_throw("Unexpected error in adding visit.");
- },
- handleResult: function handleResult() {},
- handleCompletion: onVisitAdded
+ yield new Promise(resolve => {
+ PlacesUtils.asyncHistory.updatePlaces(places, {
+ handleError: function handleError() {
+ do_throw("Unexpected error in adding visit.");
+ },
+ handleResult: function handleResult() {},
+ handleCompletion: resolve,
+ });
});
- function onVisitAdded() {
- let fxguid = store.GUIDForUri(fxuri, true);
- let tbguid = store.GUIDForUri(tburi, true);
+ function* onVisitAdded() {
+ let fxguid = yield store.GUIDForUri(fxuri, true);
+ let tbguid = yield store.GUIDForUri(tburi, true);
dump("fxguid: " + fxguid + "\n");
dump("tbguid: " + tbguid + "\n");
_("History: Verify GUIDs are added to the guid column.");
let connection = PlacesUtils.history
.QueryInterface(Ci.nsPIPlacesDatabase)
.DBConnection;
let stmt = connection.createAsyncStatement(
"SELECT id FROM moz_places WHERE guid = :guid");
stmt.params.guid = fxguid;
- let result = Async.querySpinningly(stmt, ["id"]);
+ let result = yield Async.promiseQuery(stmt, ["id"]);
do_check_eq(result.length, 1);
stmt.params.guid = tbguid;
- result = Async.querySpinningly(stmt, ["id"]);
+ result = yield Async.promiseQuery(stmt, ["id"]);
do_check_eq(result.length, 1);
stmt.finalize();
_("History: Verify GUIDs weren't added to annotations.");
stmt = connection.createAsyncStatement(
"SELECT a.content AS guid FROM moz_annos a WHERE guid = :guid");
stmt.params.guid = fxguid;
- result = Async.querySpinningly(stmt, ["guid"]);
+ result = yield Async.promiseQuery(stmt, ["guid"]);
do_check_eq(result.length, 0);
stmt.params.guid = tbguid;
- result = Async.querySpinningly(stmt, ["guid"]);
+ result = yield Async.promiseQuery(stmt, ["guid"]);
do_check_eq(result.length, 0);
stmt.finalize();
-
- run_next_test();
}
+ yield onVisitAdded();
});
-add_test(function test_bookmark_guids() {
+add_task(function test_bookmark_guids() {
let engine = new BookmarksEngine(Service);
let store = engine._store;
let fxid = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder,
fxuri,
PlacesUtils.bookmarks.DEFAULT_INDEX,
"Get Firefox!");
let tbid = PlacesUtils.bookmarks.insertBookmark(
PlacesUtils.bookmarks.toolbarFolder,
tburi,
PlacesUtils.bookmarks.DEFAULT_INDEX,
"Get Thunderbird!");
- let fxguid = store.GUIDForId(fxid);
- let tbguid = store.GUIDForId(tbid);
+ let fxguid = yield store.GUIDForId(fxid);
+ let tbguid = yield store.GUIDForId(tbid);
_("Bookmarks: Verify GUIDs are added to the guid column.");
let connection = PlacesUtils.history
.QueryInterface(Ci.nsPIPlacesDatabase)
.DBConnection;
let stmt = connection.createAsyncStatement(
"SELECT id FROM moz_bookmarks WHERE guid = :guid");
stmt.params.guid = fxguid;
- let result = Async.querySpinningly(stmt, ["id"]);
+ let result = yield Async.promiseQuery(stmt, ["id"]);
do_check_eq(result.length, 1);
do_check_eq(result[0].id, fxid);
stmt.params.guid = tbguid;
- result = Async.querySpinningly(stmt, ["id"]);
+ result = yield Async.promiseQuery(stmt, ["id"]);
do_check_eq(result.length, 1);
do_check_eq(result[0].id, tbid);
stmt.finalize();
_("Bookmarks: Verify GUIDs weren't added to annotations.");
stmt = connection.createAsyncStatement(
"SELECT a.content AS guid FROM moz_items_annos a WHERE guid = :guid");
stmt.params.guid = fxguid;
- result = Async.querySpinningly(stmt, ["guid"]);
+ result = yield Async.promiseQuery(stmt, ["guid"]);
do_check_eq(result.length, 0);
stmt.params.guid = tbguid;
- result = Async.querySpinningly(stmt, ["guid"]);
+ result = yield Async.promiseQuery(stmt, ["guid"]);
do_check_eq(result.length, 0);
stmt.finalize();
-
- run_next_test();
});
function run_test() {
setPlacesDatabase("places_v10_from_v11.sqlite");
run_next_test();
}
--- a/services/sync/tests/unit/test_prefs_store.js
+++ b/services/sync/tests/unit/test_prefs_store.js
@@ -18,16 +18,20 @@ function makePersona(id) {
return {
id: id || Math.random().toString(),
name: Math.random().toString(),
headerURL: "http://localhost:1234/a"
};
}
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let store = Service.engineManager.get("prefs")._store;
let prefs = new Preferences();
try {
_("Test fixtures.");
Svc.Prefs.set("prefs.sync.testing.int", true);
Svc.Prefs.set("prefs.sync.testing.string", true);
Svc.Prefs.set("prefs.sync.testing.bool", true);
@@ -38,31 +42,31 @@ function run_test() {
prefs.set("testing.int", 123);
prefs.set("testing.string", "ohai");
prefs.set("testing.bool", true);
prefs.set("testing.dont.change", "Please don't change me.");
prefs.set("testing.turned.off", "I won't get synced.");
prefs.set("testing.not.turned.on", "I won't get synced either!");
_("The GUID corresponds to XUL App ID.");
- let allIDs = store.getAllIDs();
+ let allIDs = yield store.getAllIDs();
let ids = Object.keys(allIDs);
do_check_eq(ids.length, 1);
do_check_eq(ids[0], PREFS_GUID);
do_check_true(allIDs[PREFS_GUID], true);
- do_check_true(store.itemExists(PREFS_GUID));
- do_check_false(store.itemExists("random-gibberish"));
+ do_check_true(yield store.itemExists(PREFS_GUID));
+ do_check_false(yield store.itemExists("random-gibberish"));
_("Unknown prefs record is created as deleted.");
- let record = store.createRecord("random-gibberish", "prefs");
+ let record = yield store.createRecord("random-gibberish", "prefs");
do_check_true(record.deleted);
_("Prefs record contains only prefs that should be synced.");
- record = store.createRecord(PREFS_GUID, "prefs");
+ record = yield store.createRecord(PREFS_GUID, "prefs");
do_check_eq(record.value["testing.int"], 123);
do_check_eq(record.value["testing.string"], "ohai");
do_check_eq(record.value["testing.bool"], true);
do_check_eq(record.value["testing.nonexistent"], null);
do_check_false("testing.turned.off" in record.value);
do_check_false("testing.not.turned.on" in record.value);
_("Prefs record contains pref sync prefs too.");
@@ -79,17 +83,17 @@ function run_test() {
record.value = {
"testing.int": 42,
"testing.string": "im in ur prefs",
"testing.bool": false,
"testing.deleteme": null,
"testing.somepref": "im a new pref from other device",
"services.sync.prefs.sync.testing.somepref": true
};
- store.update(record);
+ yield store.update(record);
do_check_eq(prefs.get("testing.int"), 42);
do_check_eq(prefs.get("testing.string"), "im in ur prefs");
do_check_eq(prefs.get("testing.bool"), false);
do_check_eq(prefs.get("testing.deleteme"), undefined);
do_check_eq(prefs.get("testing.dont.change"), "Please don't change me.");
do_check_eq(prefs.get("testing.somepref"), "im a new pref from other device");
do_check_eq(Svc.Prefs.get("prefs.sync.testing.somepref"), true);
@@ -102,34 +106,34 @@ function run_test() {
let persona1 = makePersona();
let persona2 = makePersona();
let usedThemes = JSON.stringify([persona1, persona2]);
record.value = {
"lightweightThemes.selectedThemeID": persona1.id,
"lightweightThemes.usedThemes": usedThemes
};
- store.update(record);
+ yield store.update(record);
do_check_eq(prefs.get("lightweightThemes.selectedThemeID"), persona1.id);
do_check_true(Utils.deepEquals(LightweightThemeManager.currentTheme,
persona1));
_("Disable persona");
record.value = {
"lightweightThemes.selectedThemeID": null,
"lightweightThemes.usedThemes": usedThemes
};
- store.update(record);
+ yield store.update(record);
do_check_false(!!prefs.get("lightweightThemes.selectedThemeID"));
do_check_eq(LightweightThemeManager.currentTheme, null);
_("Only the current app's preferences are applied.");
record = new PrefRec("prefs", "some-fake-app");
record.value = {
"testing.int": 98
};
- store.update(record);
+ yield store.update(record);
do_check_eq(prefs.get("testing.int"), 42);
} finally {
prefs.resetBranch("");
}
-}
+});
--- a/services/sync/tests/unit/test_prefs_tracker.js
+++ b/services/sync/tests/unit/test_prefs_tracker.js
@@ -4,16 +4,20 @@
Cu.import("resource://gre/modules/Preferences.jsm");
Cu.import("resource://services-common/utils.js");
Cu.import("resource://services-sync/constants.js");
Cu.import("resource://services-sync/engines/prefs.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let engine = Service.engineManager.get("prefs");
let tracker = engine._tracker;
// Don't write out by default.
tracker.persistChangedIDs = false;
let prefs = new Preferences();
@@ -23,26 +27,26 @@ function run_test() {
do_check_eq(Svc.Prefs.get("engine.prefs.modified"), undefined);
do_check_false(tracker.modified);
tracker.modified = true;
do_check_eq(Svc.Prefs.get("engine.prefs.modified"), true);
do_check_true(tracker.modified);
_("Engine's getChangedID() just returns the one GUID we have.");
- let changedIDs = engine.getChangedIDs();
+ let changedIDs = yield engine.getChangedIDs();
let ids = Object.keys(changedIDs);
do_check_eq(ids.length, 1);
do_check_eq(ids[0], CommonUtils.encodeBase64URL(Services.appinfo.ID));
Svc.Prefs.set("engine.prefs.modified", false);
do_check_false(tracker.modified);
_("No modified state, so no changed IDs.");
- do_check_empty(engine.getChangedIDs());
+ do_check_empty(yield engine.getChangedIDs());
_("Initial score is 0");
do_check_eq(tracker.score, 0);
_("Test fixtures.");
Svc.Prefs.set("prefs.sync.testing.int", true);
_("Test fixtures haven't upped the tracker score yet because it hasn't started tracking yet.");
@@ -50,39 +54,39 @@ function run_test() {
_("Tell the tracker to start tracking changes.");
Svc.Obs.notify("weave:engine:start-tracking");
prefs.set("testing.int", 23);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE);
do_check_eq(tracker.modified, true);
_("Clearing changed IDs reset modified status.");
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
do_check_eq(tracker.modified, false);
_("Resetting a pref ups the score, too.");
prefs.reset("testing.int");
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 2);
do_check_eq(tracker.modified, true);
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
_("So does changing a pref sync pref.");
Svc.Prefs.set("prefs.sync.testing.int", false);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 3);
do_check_eq(tracker.modified, true);
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
_("Now that the pref sync pref has been flipped, changes to it won't be picked up.");
prefs.set("testing.int", 42);
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 3);
do_check_eq(tracker.modified, false);
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
_("Changing some other random pref won't do anything.");
prefs.set("testing.other", "blergh");
do_check_eq(tracker.score, SCORE_INCREMENT_XLARGE * 3);
do_check_eq(tracker.modified, false);
} finally {
Svc.Obs.notify("weave:engine:stop-tracking");
prefs.resetBranch("");
}
-}
+});
--- a/services/sync/tests/unit/test_records_wbo.js
+++ b/services/sync/tests/unit/test_records_wbo.js
@@ -4,17 +4,17 @@
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/identity.js");
Cu.import("resource://services-sync/resource.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
-function test_toJSON() {
+add_test(function test_toJSON() {
_("Create a record, for now without a TTL.");
let wbo = new WBORecord("coll", "a_record");
wbo.modified = 12345;
wbo.sortindex = 42;
wbo.payload = {};
_("Verify that the JSON representation contains the WBO properties, but not TTL.");
let json = JSON.parse(JSON.stringify(wbo));
@@ -22,65 +22,64 @@ function test_toJSON() {
do_check_eq(json.sortindex, 42);
do_check_eq(json.payload, "{}");
do_check_false("ttl" in json);
_("Set a TTL, make sure it's present in the JSON representation.");
wbo.ttl = 30*60;
json = JSON.parse(JSON.stringify(wbo));
do_check_eq(json.ttl, 30*60);
-}
+ run_next_test();
+})
-function test_fetch() {
+add_task(function* test_fetch() {
let record = {id: "asdf-1234-asdf-1234",
modified: 2454725.98283,
payload: JSON.stringify({cheese: "roquefort"})};
let record2 = {id: "record2",
modified: 2454725.98284,
payload: JSON.stringify({cheese: "gruyere"})};
let coll = [{id: "record2",
modified: 2454725.98284,
payload: JSON.stringify({cheese: "gruyere"})}];
_("Setting up server.");
let server = httpd_setup({
"/record": httpd_handler(200, "OK", JSON.stringify(record)),
"/record2": httpd_handler(200, "OK", JSON.stringify(record2)),
"/coll": httpd_handler(200, "OK", JSON.stringify(coll))
});
- do_test_pending();
try {
_("Fetching a WBO record");
let rec = new WBORecord("coll", "record");
- rec.fetch(Service.resource(server.baseURI + "/record"));
+ yield rec.fetch(Service.resource(server.baseURI + "/record"));
do_check_eq(rec.id, "asdf-1234-asdf-1234"); // NOT "record"!
do_check_eq(rec.modified, 2454725.98283);
do_check_eq(typeof(rec.payload), "object");
do_check_eq(rec.payload.cheese, "roquefort");
_("Fetching a WBO record using the record manager");
- let rec2 = Service.recordManager.get(server.baseURI + "/record2");
+ let rec2 = yield Service.recordManager.get(server.baseURI + "/record2");
do_check_eq(rec2.id, "record2");
do_check_eq(rec2.modified, 2454725.98284);
do_check_eq(typeof(rec2.payload), "object");
do_check_eq(rec2.payload.cheese, "gruyere");
do_check_eq(Service.recordManager.response.status, 200);
// Testing collection extraction.
_("Extracting collection.");
let rec3 = new WBORecord("tabs", "foo"); // Create through constructor.
do_check_eq(rec3.collection, "tabs");
} finally {
- server.stop(do_test_finished);
+ yield promiseStopServer(server);
}
-}
+})
function run_test() {
initTestLogging("Trace");
ensureLegacyIdentityManager();
- test_toJSON();
- test_fetch();
+ run_next_test();
}
--- a/services/sync/tests/unit/test_resource.js
+++ b/services/sync/tests/unit/test_resource.js
@@ -146,17 +146,20 @@ function server_headers(metadata, respon
let body = JSON.stringify(headers);
response.setStatusLine(metadata.httpVersion, 200, "OK");
response.bodyOutputStream.write(body, body.length);
}
function run_test() {
initTestLogging("Trace");
- do_test_pending();
+ run_next_test();
+}
+
+add_task(function* () {
let logger = Log.repository.getLogger('Test');
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
let server = httpd_setup({
"/open": server_open,
"/protected": server_protected,
"/404": server_404,
@@ -174,17 +177,17 @@ function run_test() {
Svc.Prefs.set("network.numRetries", 1); // speed up test
// This apparently has to come first in order for our PAC URL to be hit.
// Don't put any other HTTP requests earlier in the file!
_("Testing handling of proxy auth redirection.");
PACSystemSettings.PACURI = server.baseURI + "/pac1";
installFakePAC();
let proxiedRes = new Resource(server.baseURI + "/open");
- let content = proxiedRes.get();
+ let content = yield proxiedRes.get();
do_check_true(pacFetched);
do_check_true(fetched);
do_check_eq(content, "This path exists");
pacFetched = fetched = false;
uninstallFakePAC();
_("Resource object members");
let res = new Resource(server.baseURI + "/open");
@@ -193,17 +196,17 @@ function run_test() {
do_check_eq(res.spec, server.baseURI + "/open");
do_check_eq(typeof res.headers, "object");
do_check_eq(typeof res.authenticator, "object");
// Initially res.data is null since we haven't performed a GET or
// PUT/POST request yet.
do_check_eq(res.data, null);
_("GET a non-password-protected resource");
- content = res.get();
+ content = yield res.get();
do_check_eq(content, "This path exists");
do_check_eq(content.status, 200);
do_check_true(content.success);
// res.data has been updated with the result from the request
do_check_eq(res.data, content);
// Observe logging messages.
logger = res._log;
@@ -231,195 +234,195 @@ function run_test() {
_("Test that the BasicAuthenticator doesn't screw up header case.");
let res1 = new Resource(server.baseURI + "/foo");
res1.setHeader("Authorization", "Basic foobar");
do_check_eq(res1.headers["authorization"], "Basic foobar");
_("GET a password protected resource (test that it'll fail w/o pass, no throw)");
let res2 = new Resource(server.baseURI + "/protected");
- content = res2.get();
+ content = yield res2.get();
do_check_eq(content, "This path exists and is protected - failed");
do_check_eq(content.status, 401);
do_check_false(content.success);
_("GET a password protected resource");
let res3 = new Resource(server.baseURI + "/protected");
let identity = new IdentityManager();
let auth = identity.getBasicResourceAuthenticator("guest", "guest");
res3.authenticator = auth;
do_check_eq(res3.authenticator, auth);
- content = res3.get();
+ content = yield res3.get();
do_check_eq(content, "This path exists and is protected");
do_check_eq(content.status, 200);
do_check_true(content.success);
_("GET a non-existent resource (test that it'll fail, but not throw)");
let res4 = new Resource(server.baseURI + "/404");
- content = res4.get();
+ content = yield res4.get();
do_check_eq(content, "File not found");
do_check_eq(content.status, 404);
do_check_false(content.success);
// Check some headers of the 404 response
do_check_eq(content.headers.connection, "close");
do_check_eq(content.headers.server, "httpd.js");
do_check_eq(content.headers["content-length"], 14);
_("PUT to a resource (string)");
let res5 = new Resource(server.baseURI + "/upload");
- content = res5.put(JSON.stringify(sample_data));
+ content = yield res5.put(JSON.stringify(sample_data));
do_check_eq(content, "Valid data upload via PUT");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("PUT to a resource (object)");
- content = res5.put(sample_data);
+ content = yield res5.put(sample_data);
do_check_eq(content, "Valid data upload via PUT");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("PUT without data arg (uses resource.data) (string)");
res5.data = JSON.stringify(sample_data);
- content = res5.put();
+ content = yield res5.put();
do_check_eq(content, "Valid data upload via PUT");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("PUT without data arg (uses resource.data) (object)");
res5.data = sample_data;
- content = res5.put();
+ content = yield res5.put();
do_check_eq(content, "Valid data upload via PUT");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("POST to a resource (string)");
- content = res5.post(JSON.stringify(sample_data));
+ content = yield res5.post(JSON.stringify(sample_data));
do_check_eq(content, "Valid data upload via POST");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("POST to a resource (object)");
- content = res5.post(sample_data);
+ content = yield res5.post(sample_data);
do_check_eq(content, "Valid data upload via POST");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("POST without data arg (uses resource.data) (string)");
res5.data = JSON.stringify(sample_data);
- content = res5.post();
+ content = yield res5.post();
do_check_eq(content, "Valid data upload via POST");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("POST without data arg (uses resource.data) (object)");
res5.data = sample_data;
- content = res5.post();
+ content = yield res5.post();
do_check_eq(content, "Valid data upload via POST");
do_check_eq(content.status, 200);
do_check_eq(res5.data, content);
_("DELETE a resource");
let res6 = new Resource(server.baseURI + "/delete");
- content = res6.delete();
+ content = yield res6.delete();
do_check_eq(content, "This resource has been deleted")
do_check_eq(content.status, 200);
_("JSON conversion of response body");
let res7 = new Resource(server.baseURI + "/json");
- content = res7.get();
+ content = yield res7.get();
do_check_eq(content, JSON.stringify(sample_data));
do_check_eq(content.status, 200);
do_check_eq(JSON.stringify(content.obj), JSON.stringify(sample_data));
_("X-Weave-Timestamp header updates AsyncResource.serverTime");
// Before having received any response containing the
// X-Weave-Timestamp header, AsyncResource.serverTime is null.
do_check_eq(AsyncResource.serverTime, null);
let res8 = new Resource(server.baseURI + "/timestamp");
- content = res8.get();
+ content = yield res8.get();
do_check_eq(AsyncResource.serverTime, TIMESTAMP);
_("GET: no special request headers");
let res9 = new Resource(server.baseURI + "/headers");
- content = res9.get();
+ content = yield res9.get();
do_check_eq(content, '{}');
_("PUT: Content-Type defaults to text/plain");
- content = res9.put('data');
+ content = yield res9.put('data');
do_check_eq(content, JSON.stringify({"content-type": "text/plain"}));
_("POST: Content-Type defaults to text/plain");
- content = res9.post('data');
+ content = yield res9.post('data');
do_check_eq(content, JSON.stringify({"content-type": "text/plain"}));
_("setHeader(): setting simple header");
res9.setHeader('X-What-Is-Weave', 'awesome');
do_check_eq(res9.headers['x-what-is-weave'], 'awesome');
- content = res9.get();
+ content = yield res9.get();
do_check_eq(content, JSON.stringify({"x-what-is-weave": "awesome"}));
_("setHeader(): setting multiple headers, overwriting existing header");
res9.setHeader('X-WHAT-is-Weave', 'more awesomer');
res9.setHeader('X-Another-Header', 'hello world');
do_check_eq(res9.headers['x-what-is-weave'], 'more awesomer');
do_check_eq(res9.headers['x-another-header'], 'hello world');
- content = res9.get();
+ content = yield res9.get();
do_check_eq(content, JSON.stringify({"x-another-header": "hello world",
"x-what-is-weave": "more awesomer"}));
_("Setting headers object");
res9.headers = {};
- content = res9.get();
+ content = yield res9.get();
do_check_eq(content, "{}");
_("PUT/POST: override default Content-Type");
res9.setHeader('Content-Type', 'application/foobar');
do_check_eq(res9.headers['content-type'], 'application/foobar');
- content = res9.put('data');
+ content = yield res9.put('data');
do_check_eq(content, JSON.stringify({"content-type": "application/foobar"}));
- content = res9.post('data');
+ content = yield res9.post('data');
do_check_eq(content, JSON.stringify({"content-type": "application/foobar"}));
_("X-Weave-Backoff header notifies observer");
let backoffInterval;
function onBackoff(subject, data) {
backoffInterval = subject;
}
Observers.add("weave:service:backoff:interval", onBackoff);
let res10 = new Resource(server.baseURI + "/backoff");
- content = res10.get();
+ content = yield res10.get();
do_check_eq(backoffInterval, 600);
_("X-Weave-Quota-Remaining header notifies observer on successful requests.");
let quotaValue;
function onQuota(subject, data) {
quotaValue = subject;
}
Observers.add("weave:service:quota:remaining", onQuota);
res10 = new Resource(server.baseURI + "/quota-error");
- content = res10.get();
+ content = yield res10.get();
do_check_eq(content.status, 400);
do_check_eq(quotaValue, undefined); // HTTP 400, so no observer notification.
res10 = new Resource(server.baseURI + "/quota-notice");
- content = res10.get();
+ content = yield res10.get();
do_check_eq(content.status, 200);
do_check_eq(quotaValue, 1048576);
_("Error handling in _request() preserves exception information");
let error;
let res11 = new Resource("http://localhost:12345/does/not/exist");
try {
- content = res11.get();
+ content = yield res11.get();
} catch(ex) {
error = ex;
}
do_check_eq(error.result, Cr.NS_ERROR_CONNECTION_REFUSED);
do_check_eq(error.message, "NS_ERROR_CONNECTION_REFUSED");
do_check_eq(typeof error.stack, "string");
_("Checking handling of errors in onProgress.");
@@ -429,17 +432,17 @@ function run_test() {
Services.io.newURI("::::::::", null, null);
};
res18._onProgress = onProgress;
let oldWarn = res18._log.warn;
let warnings = [];
res18._log.warn = function(msg) { warnings.push(msg) };
error = undefined;
try {
- content = res18.get();
+ content = yield res18.get();
} catch (ex) {
error = ex;
}
// It throws and logs.
do_check_eq(error.result, Cr.NS_ERROR_MALFORMED_URI);
do_check_eq(error, "Error: NS_ERROR_MALFORMED_URI");
do_check_eq(warnings.pop(),
@@ -452,17 +455,17 @@ function run_test() {
throw "BOO!";
};
res18._onProgress = onProgress;
oldWarn = res18._log.warn;
warnings = [];
res18._log.warn = function(msg) { warnings.push(msg) };
error = undefined;
try {
- content = res18.get();
+ content = yield res18.get();
} catch (ex) {
error = ex;
}
// It throws and logs.
do_check_eq(error.result, Cr.NS_ERROR_XPC_JS_THREW_STRING);
do_check_eq(error, "Error: NS_ERROR_XPC_JS_THREW_STRING");
do_check_eq(warnings.pop(),
@@ -470,17 +473,17 @@ function run_test() {
server.baseURI + "/json");
_("Ensure channel timeouts are thrown appropriately.");
let res19 = new Resource(server.baseURI + "/json");
res19.ABORT_TIMEOUT = 0;
error = undefined;
try {
- content = res19.get();
+ content = yield res19.get();
} catch (ex) {
error = ex;
}
do_check_eq(error.result, Cr.NS_ERROR_NET_TIMEOUT);
_("Testing URI construction.");
let args = [];
args.push("newer=" + 1234);
@@ -490,10 +493,10 @@ function run_test() {
let query = "?" + args.join("&");
let uri1 = Utils.makeURI("http://foo/" + query)
.QueryInterface(Ci.nsIURL);
let uri2 = Utils.makeURI("http://foo/")
.QueryInterface(Ci.nsIURL);
uri2.query = query;
do_check_eq(uri1.query, uri2.query);
- server.stop(do_test_finished);
-}
+ yield promiseStopServer(server);
+});
--- a/services/sync/tests/unit/test_resource_header.js
+++ b/services/sync/tests/unit/test_resource_header.js
@@ -41,25 +41,25 @@ function triggerRedirect() {
"}";
let prefsService = Cc["@mozilla.org/preferences-service;1"].getService(Ci.nsIPrefService);
let prefs = prefsService.getBranch("network.proxy.");
prefs.setIntPref("type", 2);
prefs.setCharPref("autoconfig_url", "data:text/plain," + PROXY_FUNCTION);
}
-add_test(function test_headers_copied() {
+add_task(function* test_headers_copied() {
triggerRedirect();
_("Issuing request.");
let resource = new Resource(TEST_URL);
resource.setHeader("Authorization", "Basic foobar");
resource.setHeader("X-Foo", "foofoo");
- let result = resource.get(TEST_URL);
+ let result = yield resource.get(TEST_URL);
_("Result: " + result);
do_check_eq(result, BODY);
do_check_eq(auth, "Basic foobar");
do_check_eq(foo, "foofoo");
- httpServer.stop(run_next_test);
+ yield promiseStopServer(httpServer);
});
--- a/services/sync/tests/unit/test_resource_ua.js
+++ b/services/sync/tests/unit/test_resource_ua.js
@@ -39,23 +39,22 @@ function run_test() {
expectedUA = Services.appinfo.name + "/" + Services.appinfo.version +
" FxSync/" + WEAVE_VERSION + "." +
Services.appinfo.appBuildID;
run_next_test();
}
-add_test(function test_fetchInfo() {
+add_task(function* test_fetchInfo() {
_("Testing _fetchInfo.");
- Service._fetchInfo();
+ yield Service._fetchInfo();
_("User-Agent: " + ua);
do_check_eq(ua, expectedUA + ".desktop");
ua = "";
- run_next_test();
});
add_test(function test_desktop_post() {
_("Testing direct Resource POST.");
let r = new AsyncResource(server.baseURI + "/1.1/johndoe/storage/meta/global");
r.post("foo=bar", function (error, content) {
_("User-Agent: " + ua);
do_check_eq(ua, expectedUA + ".desktop");
--- a/services/sync/tests/unit/test_score_triggers.js
+++ b/services/sync/tests/unit/test_score_triggers.js
@@ -73,77 +73,76 @@ add_test(function test_tracker_score_upd
do_check_eq(scoreUpdated, 1);
} finally {
Svc.Obs.remove("weave:engine:score:updated", onScoreUpdated);
tracker.resetScore();
run_next_test();
}
});
-add_test(function test_sync_triggered() {
+add_task(function test_sync_triggered() {
let server = sync_httpd_setup();
setUp(server);
- Service.login();
+ yield Service.login();
Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
- Svc.Obs.add("weave:service:sync:finish", function onSyncFinish() {
- Svc.Obs.remove("weave:service:sync:finish", onSyncFinish);
- _("Sync completed!");
- server.stop(run_next_test);
- });
+ let promiseFinished = promiseOneObserver("weave:service:sync:finish");
do_check_eq(Status.login, LOGIN_SUCCEEDED);
tracker.score += SCORE_INCREMENT_XLARGE;
+ yield promiseFinished;
+ yield promiseStopServer(server);
});
-add_test(function test_clients_engine_sync_triggered() {
+add_task(function test_clients_engine_sync_triggered() {
_("Ensure that client engine score changes trigger a sync.");
// The clients engine is not registered like other engines. Therefore,
// it needs special treatment throughout the code. Here, we verify the
// global score tracker gives it that treatment. See bug 676042 for more.
let server = sync_httpd_setup();
setUp(server);
- Service.login();
+ yield Service.login();
const TOPIC = "weave:service:sync:finish";
- Svc.Obs.add(TOPIC, function onSyncFinish() {
- Svc.Obs.remove(TOPIC, onSyncFinish);
- _("Sync due to clients engine change completed.");
- server.stop(run_next_test);
- });
+ let promiseFinished = promiseOneObserver(TOPIC);
Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
do_check_eq(Status.login, LOGIN_SUCCEEDED);
Service.clientsEngine._tracker.score += SCORE_INCREMENT_XLARGE;
+ yield promiseFinished;
+ yield promiseStopServer(server);
});
-add_test(function test_incorrect_credentials_sync_not_triggered() {
+add_task(function test_incorrect_credentials_sync_not_triggered() {
_("Ensure that score changes don't trigger a sync if Status.login != LOGIN_SUCCEEDED.");
let server = sync_httpd_setup();
setUp(server);
// Ensure we don't actually try to sync.
function onSyncStart() {
do_throw("Should not get here!");
}
Svc.Obs.add("weave:service:sync:start", onSyncStart);
// First wait >100ms (nsITimers can take up to that much time to fire, so
// we can account for the timer in delayedAutoconnect) and then one event
// loop tick (to account for a possible call to weave:service:sync:start).
- Utils.namedTimer(function() {
- Utils.nextTick(function() {
- Svc.Obs.remove("weave:service:sync:start", onSyncStart);
-
- do_check_eq(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
-
- Service.startOver();
- server.stop(run_next_test);
- });
- }, 150, {}, "timer");
+ let promiseWaited = new Promise(resolve => {
+ Utils.namedTimer(function() {
+ Utils.nextTick(function() {
+ Svc.Obs.remove("weave:service:sync:start", onSyncStart);
+ resolve();
+ });
+ }, 150, {}, "timer");
+ });
// Faking incorrect credentials to prevent score update.
Status.login = LOGIN_FAILED_LOGIN_REJECTED;
tracker.score += SCORE_INCREMENT_XLARGE;
+ yield promiseWaited;
+ do_check_eq(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
+
+ yield Service.startOver();
+ yield promiseStopServer(server);
});
--- a/services/sync/tests/unit/test_service_cluster.js
+++ b/services/sync/tests/unit/test_service_cluster.js
@@ -1,110 +1,110 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
-function do_check_throws(func) {
+function* do_check_rejects(func) {
var raised = false;
try {
- func();
+ yield func();
} catch (ex) {
raised = true;
}
do_check_true(raised);
}
-add_test(function test_findCluster() {
+add_task(function* test_findCluster() {
_("Test Service._findCluster()");
let server;
ensureLegacyIdentityManager();
try {
_("_findCluster() throws on network errors (e.g. connection refused).");
- do_check_throws(function() {
+ yield do_check_rejects(function*() {
Service.serverURL = "http://dummy:9000/";
Service.identity.account = "johndoe";
- Service._clusterManager._findCluster();
+ yield Service._clusterManager._findCluster();
});
server = httpd_setup({
"/user/1.0/johndoe/node/weave": httpd_handler(200, "OK", "http://weave.user.node/"),
"/user/1.0/jimdoe/node/weave": httpd_handler(200, "OK", "null"),
"/user/1.0/janedoe/node/weave": httpd_handler(404, "Not Found", "Not Found"),
"/user/1.0/juliadoe/node/weave": httpd_handler(400, "Bad Request", "Bad Request"),
"/user/1.0/joedoe/node/weave": httpd_handler(500, "Server Error", "Server Error")
});
Service.serverURL = server.baseURI;
Service.identity.account = "johndoe";
_("_findCluster() returns the user's cluster node");
- let cluster = Service._clusterManager._findCluster();
+ let cluster = yield Service._clusterManager._findCluster();
do_check_eq(cluster, "http://weave.user.node/");
_("A 'null' response is converted to null.");
Service.identity.account = "jimdoe";
- cluster = Service._clusterManager._findCluster();
+ cluster = yield Service._clusterManager._findCluster();
do_check_eq(cluster, null);
_("If a 404 is encountered, the server URL is taken as the cluster URL");
Service.identity.account = "janedoe";
- cluster = Service._clusterManager._findCluster();
+ cluster = yield Service._clusterManager._findCluster();
do_check_eq(cluster, Service.serverURL);
_("A 400 response will throw an error.");
Service.identity.account = "juliadoe";
- do_check_throws(function() {
- Service._clusterManager._findCluster();
+ yield do_check_rejects(function*() {
+ yield Service._clusterManager._findCluster();
});
_("Any other server response (e.g. 500) will throw an error.");
Service.identity.account = "joedoe";
- do_check_throws(function() {
- Service._clusterManager._findCluster();
+ yield do_check_rejects(function*() {
+ yield Service._clusterManager._findCluster();
});
} finally {
Svc.Prefs.resetBranch("");
if (server) {
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
}
});
-add_test(function test_setCluster() {
+add_task(function* test_setCluster() {
_("Test Service._setCluster()");
let server = httpd_setup({
"/user/1.0/johndoe/node/weave": httpd_handler(200, "OK", "http://weave.user.node/"),
"/user/1.0/jimdoe/node/weave": httpd_handler(200, "OK", "null")
});
try {
Service.serverURL = server.baseURI;
Service.identity.account = "johndoe";
_("Check initial state.");
do_check_eq(Service.clusterURL, "");
_("Set the cluster URL.");
- do_check_true(Service._clusterManager.setCluster());
+ do_check_true(yield Service._clusterManager.setCluster());
do_check_eq(Service.clusterURL, "http://weave.user.node/");
_("Setting it again won't make a difference if it's the same one.");
- do_check_false(Service._clusterManager.setCluster());
+ do_check_false(yield Service._clusterManager.setCluster());
do_check_eq(Service.clusterURL, "http://weave.user.node/");
_("A 'null' response won't make a difference either.");
Service.identity.account = "jimdoe";
- do_check_false(Service._clusterManager.setCluster());
+ do_check_false(yield Service._clusterManager.setCluster());
do_check_eq(Service.clusterURL, "http://weave.user.node/");
} finally {
Svc.Prefs.resetBranch("");
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
});
function run_test() {
initTestLogging();
run_next_test();
}
--- a/services/sync/tests/unit/test_service_detect_upgrade.js
+++ b/services/sync/tests/unit/test_service_detect_upgrade.js
@@ -8,17 +8,17 @@ Cu.import("resource://services-sync/engi
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
Service.engineManager.register(TabEngine);
-add_test(function v4_upgrade() {
+add_task(function v4_upgrade() {
let passphrase = "abcdeabcdeabcdeabcdeabcdea";
let clients = new ServerCollection();
let meta_global = new ServerWBO('global');
// Tracking info/collections.
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
@@ -58,143 +58,143 @@ add_test(function v4_upgrade() {
image: "image"
}
}]}]};
delete Svc.Session;
Svc.Session = {
getBrowserState: () => JSON.stringify(myTabs)
};
- Service.status.resetSync();
+ yield Service.status.resetSync();
_("Logging in.");
Service.serverURL = server.baseURI;
- Service.login("johndoe", "ilovejane", passphrase);
+ yield Service.login("johndoe", "ilovejane", passphrase);
do_check_true(Service.isLoggedIn);
Service.verifyAndFetchSymmetricKeys();
- do_check_true(Service._remoteSetup());
+ do_check_true(yield Service._remoteSetup());
- function test_out_of_date() {
+ function* test_out_of_date() {
_("Old meta/global: " + JSON.stringify(meta_global));
meta_global.payload = JSON.stringify({"syncID": "foooooooooooooooooooooooooo",
"storageVersion": STORAGE_VERSION + 1});
collections.meta = Date.now() / 1000;
_("New meta/global: " + JSON.stringify(meta_global));
Service.recordManager.set(Service.metaURL, meta_global);
try {
- Service.sync();
+ yield Service.sync();
}
catch (ex) {
}
do_check_eq(Service.status.sync, VERSION_OUT_OF_DATE);
}
// See what happens when we bump the storage version.
_("Syncing after server has been upgraded.");
- test_out_of_date();
+ yield test_out_of_date();
// Same should happen after a wipe.
_("Syncing after server has been upgraded and wiped.");
- Service.wipeServer();
- test_out_of_date();
+ yield Service.wipeServer();
+ yield test_out_of_date();
// Now's a great time to test what happens when keys get replaced.
_("Syncing afresh...");
- Service.logout();
+ yield Service.logout();
Service.collectionKeys.clear();
Service.serverURL = server.baseURI;
meta_global.payload = JSON.stringify({"syncID": "foooooooooooooobbbbbbbbbbbb",
"storageVersion": STORAGE_VERSION});
collections.meta = Date.now() / 1000;
Service.recordManager.set(Service.metaURL, meta_global);
- Service.login("johndoe", "ilovejane", passphrase);
+ yield Service.login("johndoe", "ilovejane", passphrase);
do_check_true(Service.isLoggedIn);
- Service.sync();
+ yield Service.sync();
do_check_true(Service.isLoggedIn);
let serverDecrypted;
let serverKeys;
let serverResp;
- function retrieve_server_default() {
+ function* retrieve_server_default() {
serverKeys = serverResp = serverDecrypted = null;
serverKeys = new CryptoWrapper("crypto", "keys");
- serverResp = serverKeys.fetch(Service.resource(Service.cryptoKeysURL)).response;
+ serverResp = (yield serverKeys.fetch(Service.resource(Service.cryptoKeysURL))).response;
do_check_true(serverResp.success);
serverDecrypted = serverKeys.decrypt(Service.identity.syncKeyBundle);
_("Retrieved WBO: " + JSON.stringify(serverDecrypted));
_("serverKeys: " + JSON.stringify(serverKeys));
return serverDecrypted.default;
}
- function retrieve_and_compare_default(should_succeed) {
- let serverDefault = retrieve_server_default();
+ function* retrieve_and_compare_default(should_succeed) {
+ let serverDefault = yield retrieve_server_default();
let localDefault = Service.collectionKeys.keyForCollection().keyPairB64;
_("Retrieved keyBundle: " + JSON.stringify(serverDefault));
_("Local keyBundle: " + JSON.stringify(localDefault));
if (should_succeed)
do_check_eq(JSON.stringify(serverDefault), JSON.stringify(localDefault));
else
do_check_neq(JSON.stringify(serverDefault), JSON.stringify(localDefault));
}
// Uses the objects set above.
- function set_server_keys(pair) {
+ function* set_server_keys(pair) {
serverDecrypted.default = pair;
serverKeys.cleartext = serverDecrypted;
serverKeys.encrypt(Service.identity.syncKeyBundle);
- serverKeys.upload(Service.resource(Service.cryptoKeysURL));
+ yield serverKeys.upload(Service.resource(Service.cryptoKeysURL));
}
_("Checking we have the latest keys.");
- retrieve_and_compare_default(true);
+ yield retrieve_and_compare_default(true);
_("Update keys on server.");
- set_server_keys(["KaaaaaaaaaaaHAtfmuRY0XEJ7LXfFuqvF7opFdBD/MY=",
- "aaaaaaaaaaaapxMO6TEWtLIOv9dj6kBAJdzhWDkkkis="]);
+ yield set_server_keys(["KaaaaaaaaaaaHAtfmuRY0XEJ7LXfFuqvF7opFdBD/MY=",
+ "aaaaaaaaaaaapxMO6TEWtLIOv9dj6kBAJdzhWDkkkis="]);
_("Checking that we no longer have the latest keys.");
- retrieve_and_compare_default(false);
+ yield retrieve_and_compare_default(false);
_("Indeed, they're what we set them to...");
do_check_eq("KaaaaaaaaaaaHAtfmuRY0XEJ7LXfFuqvF7opFdBD/MY=",
- retrieve_server_default()[0]);
+ (yield retrieve_server_default())[0]);
_("Sync. Should download changed keys automatically.");
let oldClientsModified = collections.clients;
let oldTabsModified = collections.tabs;
- Service.login("johndoe", "ilovejane", passphrase);
- Service.sync();
+ yield Service.login("johndoe", "ilovejane", passphrase);
+ yield Service.sync();
_("New key should have forced upload of data.");
_("Tabs: " + oldTabsModified + " < " + collections.tabs);
_("Clients: " + oldClientsModified + " < " + collections.clients);
do_check_true(collections.clients > oldClientsModified);
do_check_true(collections.tabs > oldTabsModified);
_("... and keys will now match.");
- retrieve_and_compare_default(true);
+ yield retrieve_and_compare_default(true);
// Clean up.
- Service.startOver();
+ yield Service.startOver();
} finally {
Svc.Prefs.resetBranch("");
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
});
-add_test(function v5_upgrade() {
+add_task(function v5_upgrade() {
let passphrase = "abcdeabcdeabcdeabcdeabcdea";
// Tracking info/collections.
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let collections = collectionsHelper.collections;
let keysWBO = new ServerWBO("keys");
@@ -227,70 +227,70 @@ add_test(function v5_upgrade() {
image: "image"
}
}]}]};
delete Svc.Session;
Svc.Session = {
getBrowserState: () => JSON.stringify(myTabs)
};
- Service.status.resetSync();
+ yield Service.status.resetSync();
setBasicCredentials("johndoe", "ilovejane", passphrase);
Service.serverURL = server.baseURI + "/";
Service.clusterURL = server.baseURI + "/";
// Test an upgrade where the contents of the server would cause us to error
// -- keys decrypted with a different sync key, for example.
_("Testing v4 -> v5 (or similar) upgrade.");
- function update_server_keys(syncKeyBundle, wboName, collWBO) {
+ function* update_server_keys(syncKeyBundle, wboName, collWBO) {
generateNewKeys(Service.collectionKeys);
serverKeys = Service.collectionKeys.asWBO("crypto", wboName);
serverKeys.encrypt(syncKeyBundle);
let res = Service.resource(Service.storageURL + collWBO);
- do_check_true(serverKeys.upload(res).success);
+ do_check_true((yield serverKeys.upload(res)).success);
}
_("Bumping version.");
// Bump version on the server.
let m = new WBORecord("meta", "global");
m.payload = {"syncID": "foooooooooooooooooooooooooo",
"storageVersion": STORAGE_VERSION + 1};
m.upload(Service.resource(Service.metaURL));
_("New meta/global: " + JSON.stringify(meta_global));
// Fill the keys with bad data.
let badKeys = new SyncKeyBundle("foobar", "aaaaaaaaaaaaaaaaaaaaaaaaaa");
- update_server_keys(badKeys, "keys", "crypto/keys"); // v4
- update_server_keys(badKeys, "bulk", "crypto/bulk"); // v5
+ yield update_server_keys(badKeys, "keys", "crypto/keys"); // v4
+ yield update_server_keys(badKeys, "bulk", "crypto/bulk"); // v5
_("Generating new keys.");
generateNewKeys(Service.collectionKeys);
// Now sync and see what happens. It should be a version fail, not a crypto
// fail.
_("Logging in.");
try {
- Service.login("johndoe", "ilovejane", passphrase);
+ yield Service.login("johndoe", "ilovejane", passphrase);
}
catch (e) {
_("Exception: " + e);
}
_("Status: " + Service.status);
do_check_false(Service.isLoggedIn);
do_check_eq(VERSION_OUT_OF_DATE, Service.status.sync);
// Clean up.
- Service.startOver();
+ yield Service.startOver();
} finally {
Svc.Prefs.resetBranch("");
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
});
function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
run_next_test();
--- a/services/sync/tests/unit/test_service_login.js
+++ b/services/sync/tests/unit/test_service_login.js
@@ -24,26 +24,25 @@ function login_handling(handler) {
function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
run_next_test();
}
-add_test(function test_offline() {
+add_task(function* test_offline() {
try {
_("The right bits are set when we're offline.");
Services.io.offline = true;
- do_check_false(!!Service.login());
+ do_check_false(!!(yield Service.login()));
do_check_eq(Service.status.login, LOGIN_FAILED_NETWORK_ERROR);
Services.io.offline = false;
} finally {
Svc.Prefs.resetBranch("");
- run_next_test();
}
});
function setup() {
let janeHelper = track_collections_helper();
let janeU = janeHelper.with_updated_collection;
let janeColls = janeHelper.collections;
let johnHelper = track_collections_helper();
@@ -62,101 +61,101 @@ function setup() {
"/1.1/janedoe/storage/crypto/keys": janeU("crypto", new ServerWBO("keys").handler()),
"/1.1/janedoe/storage/meta/global": janeU("meta", new ServerWBO("global").handler())
});
Service.serverURL = server.baseURI;
return server;
}
-add_test(function test_login_logout() {
+add_task(function* test_login_logout() {
let server = setup();
try {
_("Force the initial state.");
ensureLegacyIdentityManager();
Service.status.service = STATUS_OK;
do_check_eq(Service.status.service, STATUS_OK);
_("Try logging in. It won't work because we're not configured yet.");
- Service.login();
+ yield Service.login();
do_check_eq(Service.status.service, CLIENT_NOT_CONFIGURED);
do_check_eq(Service.status.login, LOGIN_FAILED_NO_USERNAME);
do_check_false(Service.isLoggedIn);
_("Try again with username and password set.");
Service.identity.account = "johndoe";
Service.identity.basicPassword = "ilovejane";
- Service.login();
+ yield Service.login();
do_check_eq(Service.status.service, CLIENT_NOT_CONFIGURED);
do_check_eq(Service.status.login, LOGIN_FAILED_NO_PASSPHRASE);
do_check_false(Service.isLoggedIn);
_("Success if passphrase is set.");
Service.identity.syncKey = "foo";
- Service.login();
+ yield Service.login();
do_check_eq(Service.status.service, STATUS_OK);
do_check_eq(Service.status.login, LOGIN_SUCCEEDED);
do_check_true(Service.isLoggedIn);
_("We can also pass username, password and passphrase to login().");
- Service.login("janedoe", "incorrectpassword", "bar");
+ yield Service.login("janedoe", "incorrectpassword", "bar");
setBasicCredentials("janedoe", "incorrectpassword", "bar");
do_check_eq(Service.status.service, LOGIN_FAILED);
do_check_eq(Service.status.login, LOGIN_FAILED_LOGIN_REJECTED);
do_check_false(Service.isLoggedIn);
_("Try again with correct password.");
- Service.login("janedoe", "ilovejohn");
+ yield Service.login("janedoe", "ilovejohn");
do_check_eq(Service.status.service, STATUS_OK);
do_check_eq(Service.status.login, LOGIN_SUCCEEDED);
do_check_true(Service.isLoggedIn);
_("Calling login() with parameters when the client is unconfigured sends notification.");
let notified = false;
Svc.Obs.add("weave:service:setup-complete", function() {
notified = true;
});
setBasicCredentials(null, null, null);
- Service.login("janedoe", "ilovejohn", "bar");
+ yield Service.login("janedoe", "ilovejohn", "bar");
do_check_true(notified);
do_check_eq(Service.status.service, STATUS_OK);
do_check_eq(Service.status.login, LOGIN_SUCCEEDED);
do_check_true(Service.isLoggedIn);
_("Logout.");
- Service.logout();
+ yield Service.logout();
do_check_false(Service.isLoggedIn);
_("Logging out again won't do any harm.");
- Service.logout();
+ yield Service.logout();
do_check_false(Service.isLoggedIn);
} finally {
Svc.Prefs.resetBranch("");
- server.stop(run_next_test);
+ promiseStopServer(server);
}
});
-add_test(function test_login_on_sync() {
+add_task(function* test_login_on_sync() {
let server = setup();
setBasicCredentials("johndoe", "ilovejane", "bar");
try {
_("Sync calls login.");
let oldLogin = Service.login;
let loginCalled = false;
Service.login = function() {
loginCalled = true;
Service.status.login = LOGIN_SUCCEEDED;
this._loggedIn = false; // So that sync aborts.
return true;
};
- Service.sync();
+ yield Service.sync();
do_check_true(loginCalled);
Service.login = oldLogin;
// Stub mpLocked.
let mpLockedF = Utils.mpLocked;
let mpLocked = true;
Utils.mpLocked = () => mpLocked;
@@ -211,35 +210,35 @@ add_test(function test_login_on_sync() {
let oldClearSyncTriggers = Service.scheduler.clearSyncTriggers;
let oldLockedSync = Service._lockedSync;
let cSTCalled = false;
let lockedSyncCalled = false;
Service.scheduler.clearSyncTriggers = function() { cSTCalled = true; };
- Service._lockedSync = function() { lockedSyncCalled = true; };
+ Service._lockedSync = function() { lockedSyncCalled = true; return Promise.resolve();};
_("If master password is canceled, login fails and we report lockage.");
- do_check_false(!!Service.login());
+ do_check_false(!!(yield Service.login()));
do_check_eq(Service.status.login, MASTER_PASSWORD_LOCKED);
do_check_eq(Service.status.service, LOGIN_FAILED);
_("Locked? " + Utils.mpLocked());
_("checkSync reports the correct term.");
do_check_eq(Service._checkSync(), kSyncMasterPasswordLocked);
_("Sync doesn't proceed and clears triggers if MP is still locked.");
- Service.sync();
+ yield Service.sync();
do_check_true(cSTCalled);
do_check_false(lockedSyncCalled);
Service.identity.__defineGetter__("syncKey", oldGetter);
Service.identity.__defineSetter__("syncKey", oldSetter);
// N.B., a bunch of methods are stubbed at this point. Be careful putting
// new tests after this point!
} finally {
Svc.Prefs.resetBranch("");
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
});
--- a/services/sync/tests/unit/test_service_passwordUTF8.js
+++ b/services/sync/tests/unit/test_service_passwordUTF8.js
@@ -49,47 +49,50 @@ function change_password(request, respon
}
response.setStatusLine(request.httpVersion, statusCode, status);
response.setHeader("WWW-Authenticate", 'Basic realm="secret"', false);
response.bodyOutputStream.write(body, body.length);
}
function run_test() {
initTestLogging("Trace");
+ run_next_test();
+}
+
+add_task(function* () {
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let collections = collectionsHelper.collections;
ensureLegacyIdentityManager();
- do_test_pending();
let server = httpd_setup({
"/1.1/johndoe/info/collections": login_handling(collectionsHelper.handler),
"/1.1/johndoe/storage/meta/global": upd("meta", new ServerWBO("global").handler()),
"/1.1/johndoe/storage/crypto/keys": upd("crypto", new ServerWBO("keys").handler()),
"/user/1.0/johndoe/password": change_password
});
setBasicCredentials("johndoe", JAPANESE, "irrelevant");
Service.serverURL = server.baseURI;
try {
_("Try to log in with the password.");
server_password = "foobar";
- do_check_false(Service.verifyLogin());
+ do_check_false(yield Service.verifyLogin());
do_check_eq(server_password, "foobar");
_("Make the server password the low byte version of our password.");
server_password = LOWBYTES;
- do_check_false(Service.verifyLogin());
+ do_check_false(yield Service.verifyLogin());
do_check_eq(server_password, LOWBYTES);
_("Can't use a password that has the same low bytes as ours.");
server_password = Utils.encodeUTF8(JAPANESE);
Service.identity.basicPassword = APPLES;
- do_check_false(Service.verifyLogin());
+ do_check_false(yield Service.verifyLogin());
do_check_eq(server_password, Utils.encodeUTF8(JAPANESE));
} finally {
- server.stop(do_test_finished);
+ yield promiseStopServer(server);
Svc.Prefs.resetBranch("");
}
-}
+});
--- a/services/sync/tests/unit/test_service_startOver.js
+++ b/services/sync/tests/unit/test_service_startOver.js
@@ -23,17 +23,17 @@ BlaEngine.prototype = {
Service.engineManager.register(BlaEngine);
function run_test() {
initTestLogging("Trace");
run_next_test();
}
-add_identity_test(this, function test_resetLocalData() {
+add_identity_test(this, function* test_resetLocalData() {
yield configureIdentity();
Service.status.enforceBackoff = true;
Service.status.backoffInterval = 42;
Service.status.minimumNextSync = 23;
Service.persistLogin();
// Verify set up.
do_check_eq(Service.status.checkSetup(), STATUS_OK);
@@ -42,60 +42,56 @@ add_identity_test(this, function test_re
let observerCalled = false;
Svc.Obs.add("weave:service:start-over", function onStartOver() {
Svc.Obs.remove("weave:service:start-over", onStartOver);
observerCalled = true;
do_check_eq(Service.status.service, CLIENT_NOT_CONFIGURED);
});
- Service.startOver();
+ yield Service.startOver();
do_check_true(observerCalled);
// Verify the site was nuked from orbit.
do_check_eq(Svc.Prefs.get("username"), undefined);
do_check_eq(Service.identity.basicPassword, null);
do_check_eq(Service.identity.syncKey, null);
do_check_eq(Service.status.service, CLIENT_NOT_CONFIGURED);
do_check_false(Service.status.enforceBackoff);
do_check_eq(Service.status.backoffInterval, 0);
do_check_eq(Service.status.minimumNextSync, 0);
});
-add_test(function test_removeClientData() {
+add_task(function* test_removeClientData() {
let engine = Service.engineManager.get("bla");
// No cluster URL = no removal.
do_check_false(engine.removed);
- Service.startOver();
+ yield Service.startOver();
do_check_false(engine.removed);
Service.serverURL = "https://localhost/";
Service.clusterURL = Service.serverURL;
do_check_false(engine.removed);
- Service.startOver();
+ yield Service.startOver();
do_check_true(engine.removed);
-
- run_next_test();
});
-add_test(function test_reset_SyncScheduler() {
+add_task(function* test_reset_SyncScheduler() {
// Some non-default values for SyncScheduler's attributes.
Service.scheduler.idle = true;
Service.scheduler.hasIncomingItems = true;
Service.scheduler.numClients = 42;
Service.scheduler.nextSync = Date.now();
Service.scheduler.syncThreshold = MULTI_DEVICE_THRESHOLD;
Service.scheduler.syncInterval = Service.scheduler.activeInterval;
- Service.startOver();
+ yield Service.startOver();
do_check_false(Service.scheduler.idle);
do_check_false(Service.scheduler.hasIncomingItems);
do_check_eq(Service.scheduler.numClients, 0);
do_check_eq(Service.scheduler.nextSync, 0);
do_check_eq(Service.scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
do_check_eq(Service.scheduler.syncInterval, Service.scheduler.singleDeviceInterval);
-
- run_next_test();
});
--- a/services/sync/tests/unit/test_service_sync_401.js
+++ b/services/sync/tests/unit/test_service_sync_401.js
@@ -17,22 +17,23 @@ function login_handling(handler) {
response.bodyOutputStream.write(body, body.length);
}
};
}
function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+}
+add_task(function* () {
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let collections = collectionsHelper.collections;
- do_test_pending();
let server = httpd_setup({
"/1.1/johndoe/storage/crypto/keys": upd("crypto", new ServerWBO("keys").handler()),
"/1.1/johndoe/storage/meta/global": upd("meta", new ServerWBO("global").handler()),
"/1.1/johndoe/info/collections": login_handling(collectionsHelper.handler)
});
const GLOBAL_SCORE = 42;
@@ -44,41 +45,41 @@ function run_test() {
Svc.Prefs.set("lastPing", Math.floor(Date.now() / 1000));
let threw = false;
Svc.Obs.add("weave:service:sync:error", function (subject, data) {
threw = true;
});
_("Initial state: We're successfully logged in.");
- Service.login();
+ yield Service.login();
do_check_true(Service.isLoggedIn);
do_check_eq(Service.status.login, LOGIN_SUCCEEDED);
_("Simulate having changed the password somewhere else.");
Service.identity.basicPassword = "ilovejosephine";
_("Let's try to sync.");
- Service.sync();
+ yield Service.sync();
_("Verify that sync() threw an exception.");
do_check_true(threw);
_("We're no longer logged in.");
do_check_false(Service.isLoggedIn);
_("Sync status won't have changed yet, because we haven't tried again.");
_("globalScore is reset upon starting a sync.");
do_check_eq(Service.scheduler.globalScore, 0);
_("Our next sync will fail appropriately.");
try {
- Service.sync();
+ yield Service.sync();
} catch (ex) {
}
do_check_eq(Service.status.login, LOGIN_FAILED_LOGIN_REJECTED);
} finally {
Svc.Prefs.resetBranch("");
- server.stop(do_test_finished);
+ yield server.promiseStop();
}
-}
+});
--- a/services/sync/tests/unit/test_service_sync_locked.js
+++ b/services/sync/tests/unit/test_service_sync_locked.js
@@ -1,15 +1,19 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let debug = [];
let info = [];
function augmentLogger(old) {
let d = old.debug;
let i = old.info;
old.debug = function(m) { debug.push(m); d.call(old, m); }
old.info = function(m) { info.push(m); i.call(old, m); }
@@ -20,17 +24,17 @@ function run_test() {
augmentLogger(Service._log);
// Avoid daily ping
Svc.Prefs.set("lastPing", Math.floor(Date.now() / 1000));
_("Check that sync will log appropriately if already in 'progress'.");
Service._locked = true;
- Service.sync();
+ yield Service.sync();
Service._locked = false;
do_check_eq(debug[debug.length - 2],
"Exception: Could not acquire lock. Label: \"service.js: login\". No traceback available");
do_check_eq(info[info.length - 1],
"Cannot start sync: already syncing?");
-}
+});
--- a/services/sync/tests/unit/test_service_sync_remoteSetup.js
+++ b/services/sync/tests/unit/test_service_sync_remoteSetup.js
@@ -7,17 +7,19 @@ Cu.import("resource://services-sync/keys
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/fakeservices.js");
Cu.import("resource://testing-common/services/sync/utils.js");
function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+}
+add_task(function* () {
let guidSvc = new FakeGUIDService();
let clients = new ServerCollection();
let meta_global = new ServerWBO('global');
let collectionsHelper = track_collections_helper();
let upd = collectionsHelper.with_updated_collection;
let collections = collectionsHelper.collections;
@@ -27,17 +29,16 @@ function run_test() {
wbo.wasCalled = true;
handler.apply(this, arguments);
};
}
let keysWBO = new ServerWBO("keys");
let cryptoColl = new ServerCollection({keys: keysWBO});
let metaColl = new ServerCollection({global: meta_global});
- do_test_pending();
/**
* Handle the bulk DELETE request sent by wipeServer.
*/
function storageHandler(request, response) {
do_check_eq("DELETE", request.method);
do_check_true(request.hasHeader("X-Confirm-Delete"));
@@ -64,77 +65,77 @@ function run_test() {
});
try {
_("Log in.");
ensureLegacyIdentityManager();
Service.serverURL = server.baseURI;
_("Checking Status.sync with no credentials.");
- Service.verifyAndFetchSymmetricKeys();
+ yield Service.verifyAndFetchSymmetricKeys();
do_check_eq(Service.status.sync, CREDENTIALS_CHANGED);
do_check_eq(Service.status.login, LOGIN_FAILED_NO_PASSPHRASE);
_("Log in with an old secret phrase, is upgraded to Sync Key.");
- Service.login("johndoe", "ilovejane", "my old secret phrase!!1!");
+ yield Service.login("johndoe", "ilovejane", "my old secret phrase!!1!");
_("End of login");
do_check_true(Service.isLoggedIn);
do_check_true(Utils.isPassphrase(Service.identity.syncKey));
let syncKey = Service.identity.syncKey;
- Service.startOver();
+ yield Service.startOver();
Service.serverURL = server.baseURI;
Service.login("johndoe", "ilovejane", syncKey);
do_check_true(Service.isLoggedIn);
_("Checking that remoteSetup returns true when credentials have changed.");
Service.recordManager.get(Service.metaURL).payload.syncID = "foobar";
- do_check_true(Service._remoteSetup());
+ do_check_true(yield Service._remoteSetup());
_("Do an initial sync.");
let beforeSync = Date.now()/1000;
- Service.sync();
+ yield Service.sync();
_("Checking that remoteSetup returns true.");
- do_check_true(Service._remoteSetup());
+ do_check_true(yield Service._remoteSetup());
_("Verify that the meta record was uploaded.");
do_check_eq(meta_global.data.syncID, Service.syncID);
do_check_eq(meta_global.data.storageVersion, STORAGE_VERSION);
do_check_eq(meta_global.data.engines.clients.version, Service.clientsEngine.version);
do_check_eq(meta_global.data.engines.clients.syncID, Service.clientsEngine.syncID);
_("Set the collection info hash so that sync() will remember the modified times for future runs.");
collections.meta = Service.clientsEngine.lastSync;
collections.clients = Service.clientsEngine.lastSync;
- Service.sync();
+ yield Service.sync();
_("Sync again and verify that meta/global wasn't downloaded again");
meta_global.wasCalled = false;
- Service.sync();
+ yield Service.sync();
do_check_false(meta_global.wasCalled);
_("Fake modified records. This will cause a redownload, but not reupload since it hasn't changed.");
collections.meta += 42;
meta_global.wasCalled = false;
let metaModified = meta_global.modified;
- Service.sync();
+ yield Service.sync();
do_check_true(meta_global.wasCalled);
do_check_eq(metaModified, meta_global.modified);
_("Checking bad passphrases.");
let pp = Service.identity.syncKey;
Service.identity.syncKey = "notvalid";
- do_check_false(Service.verifyAndFetchSymmetricKeys());
+ do_check_false(yield Service.verifyAndFetchSymmetricKeys());
do_check_eq(Service.status.sync, CREDENTIALS_CHANGED);
do_check_eq(Service.status.login, LOGIN_FAILED_INVALID_PASSPHRASE);
Service.identity.syncKey = pp;
- do_check_true(Service.verifyAndFetchSymmetricKeys());
+ do_check_true(yield Service.verifyAndFetchSymmetricKeys());
// changePassphrase wipes our keys, and they're regenerated on next sync.
_("Checking changed passphrase.");
let existingDefault = Service.collectionKeys.keyForCollection();
let existingKeysPayload = keysWBO.payload;
let newPassphrase = "bbbbbabcdeabcdeabcdeabcdea";
Service.changePassphrase(newPassphrase);
@@ -151,21 +152,21 @@ function run_test() {
// Re-encrypt keys with a new random keybundle, and upload them to the
// server, just as might happen with a second client.
_("Attempting to screw up HMAC by re-encrypting keys.");
let keys = Service.collectionKeys.asWBO();
let b = new BulkKeyBundle("hmacerror");
b.generateRandom();
collections.crypto = keys.modified = 100 + (Date.now()/1000); // Future modification time.
keys.encrypt(b);
- keys.upload(Service.resource(Service.cryptoKeysURL));
+ yield keys.upload(Service.resource(Service.cryptoKeysURL));
- do_check_false(Service.verifyAndFetchSymmetricKeys());
+ do_check_false(yield Service.verifyAndFetchSymmetricKeys());
do_check_eq(Service.status.login, LOGIN_FAILED_INVALID_PASSPHRASE);
let hmacErrors = sumHistogram("WEAVE_HMAC_ERRORS");
do_check_eq(hmacErrors, 1);
} finally {
Svc.Prefs.resetBranch("");
- server.stop(do_test_finished);
+ server.promiseStop();
}
-}
+});
--- a/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
+++ b/services/sync/tests/unit/test_service_sync_updateEnabledEngines.js
@@ -24,18 +24,18 @@ QuietStore.prototype = {
function SteamEngine() {
SyncEngine.call(this, "Steam", Service);
}
SteamEngine.prototype = {
__proto__: SyncEngine.prototype,
// We're not interested in engine sync but what the service does.
_storeObj: QuietStore,
- _sync: function _sync() {
- this._syncStartup();
+ _sync: function* _sync() {
+ yield this._syncStartup();
}
};
Service.engineManager.register(SteamEngine);
function StirlingEngine() {
SyncEngine.call(this, "Stirling", Service);
}
StirlingEngine.prototype = {
@@ -85,116 +85,116 @@ const PAYLOAD = 42;
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Service").level = Log.Level.Trace;
Log.repository.getLogger("Sync.ErrorHandler").level = Log.Level.Trace;
run_next_test();
}
-add_test(function test_newAccount() {
+add_task(function* test_newAccount() {
_("Test: New account does not disable locally enabled engines.");
let engine = Service.engineManager.get("steam");
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": new ServerWBO("global", {}).handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler()
});
- setUp(server);
+ yield setUp(server);
try {
_("Engine is enabled from the beginning.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Engine continues to be enabled.");
do_check_true(engine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_enabledLocally() {
+add_task(function* test_enabledLocally() {
_("Test: Engine is disabled on remote clients and enabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler()
});
- setUp(server);
+ yield setUp(server);
try {
_("Enable engine locally.");
engine.enabled = true;
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Meta record now contains the new engine.");
do_check_true(!!metaWBO.data.engines.steam);
_("Engine continues to be enabled.");
do_check_true(engine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_disabledLocally() {
+add_task(function* test_disabledLocally() {
_("Test: Engine is enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: engine.syncID,
version: engine.version}}
});
let steamCollection = new ServerWBO("steam", PAYLOAD);
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": steamCollection.handler()
});
- setUp(server);
+ yield setUp(server);
try {
_("Disable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
engine.enabled = false;
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Meta record no longer contains engine.");
do_check_false(!!metaWBO.data.engines.steam);
_("Server records are wiped.");
do_check_eq(steamCollection.payload, undefined);
_("Engine continues to be disabled.");
do_check_false(engine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_disabledLocally_wipe503() {
+add_task(function* test_disabledLocally_wipe503() {
_("Test: Engine is enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: engine.syncID,
version: engine.version}}
@@ -207,192 +207,190 @@ add_test(function test_disabledLocally_w
response.setHeader("Retry-After", "23");
response.bodyOutputStream.write(body, body.length);
}
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": service_unavailable
});
- setUp(server);
+ yield setUp(server);
_("Disable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
engine.enabled = false;
- Svc.Obs.add("weave:ui:sync:error", function onSyncError() {
- Svc.Obs.remove("weave:ui:sync:error", onSyncError);
-
- do_check_eq(Service.status.sync, SERVER_MAINTENANCE);
+ let promiseUIError = promiseOneObserver("weave:ui:sync:error");
+ _("Sync.");
+ yield Service.errorHandler.syncAndReportErrors();
+ yield promiseUIError;
- Service.startOver();
- server.stop(run_next_test);
- });
+ do_check_eq(Service.status.sync, SERVER_MAINTENANCE);
- _("Sync.");
- Service.errorHandler.syncAndReportErrors();
+ yield Service.startOver();
+ yield promiseStopServer(server);
});
-add_test(function test_enabledRemotely() {
+add_task(function* test_enabledRemotely() {
_("Test: Engine is disabled locally and enabled on a remote client");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: engine.syncID,
version: engine.version}}
});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global":
upd("meta", metaWBO.handler()),
"/1.1/johndoe/storage/steam":
upd("steam", new ServerWBO("steam", {}).handler())
});
- setUp(server);
+ yield setUp(server);
// We need to be very careful how we do this, so that we don't trigger a
// fresh start!
try {
_("Upload some keys to avoid a fresh start.");
let wbo = Service.collectionKeys.generateNewKeysWBO();
wbo.encrypt(Service.identity.syncKeyBundle);
- do_check_eq(200, wbo.upload(Service.resource(Service.cryptoKeysURL)).status);
+ do_check_eq(200, (yield wbo.upload(Service.resource(Service.cryptoKeysURL))).status);
_("Engine is disabled.");
do_check_false(engine.enabled);
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Engine is enabled.");
do_check_true(engine.enabled);
_("Meta record still present.");
do_check_eq(metaWBO.data.engines.steam.syncID, engine.syncID);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_disabledRemotelyTwoClients() {
+add_task(function* test_disabledRemotelyTwoClients() {
_("Test: Engine is enabled locally and disabled on a remote client... with two clients.");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global":
upd("meta", metaWBO.handler()),
"/1.1/johndoe/storage/steam":
upd("steam", new ServerWBO("steam", {}).handler())
});
- setUp(server);
+ yield setUp(server);
try {
_("Enable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Disable engine by deleting from meta/global.");
let d = metaWBO.data;
delete d.engines["steam"];
metaWBO.payload = JSON.stringify(d);
metaWBO.modified = Date.now() / 1000;
_("Add a second client and verify that the local pref is changed.");
Service.clientsEngine._store._remoteClients["foobar"] = {name: "foobar", type: "desktop"};
- Service.sync();
+ yield Service.sync();
_("Engine is disabled.");
do_check_false(engine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_disabledRemotely() {
+add_task(function* test_disabledRemotely() {
_("Test: Engine is enabled locally and disabled on a remote client");
Service.syncID = "abcdefghij";
let engine = Service.engineManager.get("steam");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler()
});
- setUp(server);
+ yield setUp(server);
try {
_("Enable engine locally.");
Service._ignorePrefObserver = true;
engine.enabled = true;
Service._ignorePrefObserver = false;
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Engine is not disabled: only one client.");
do_check_true(engine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_dependentEnginesEnabledLocally() {
+add_task(function* test_dependentEnginesEnabledLocally() {
_("Test: Engine is disabled on remote clients and enabled locally");
Service.syncID = "abcdefghij";
let steamEngine = Service.engineManager.get("steam");
let stirlingEngine = Service.engineManager.get("stirling");
let metaWBO = new ServerWBO("global", {syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {}});
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": new ServerWBO("steam", {}).handler(),
"/1.1/johndoe/storage/stirling": new ServerWBO("stirling", {}).handler()
});
- setUp(server);
+ yield setUp(server);
try {
_("Enable engine locally. Doing it on one is enough.");
steamEngine.enabled = true;
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Meta record now contains the new engines.");
do_check_true(!!metaWBO.data.engines.steam);
do_check_true(!!metaWBO.data.engines.stirling);
_("Engines continue to be enabled.");
do_check_true(steamEngine.enabled);
do_check_true(stirlingEngine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
-add_test(function test_dependentEnginesDisabledLocally() {
+add_task(function* test_dependentEnginesDisabledLocally() {
_("Test: Two dependent engines are enabled on remote clients and disabled locally");
Service.syncID = "abcdefghij";
let steamEngine = Service.engineManager.get("steam");
let stirlingEngine = Service.engineManager.get("stirling");
let metaWBO = new ServerWBO("global", {
syncID: Service.syncID,
storageVersion: STORAGE_VERSION,
engines: {steam: {syncID: steamEngine.syncID,
@@ -404,38 +402,38 @@ add_test(function test_dependentEnginesD
let steamCollection = new ServerWBO("steam", PAYLOAD);
let stirlingCollection = new ServerWBO("stirling", PAYLOAD);
let server = sync_httpd_setup({
"/1.1/johndoe/storage/meta/global": metaWBO.handler(),
"/1.1/johndoe/storage/steam": steamCollection.handler(),
"/1.1/johndoe/storage/stirling": stirlingCollection.handler()
});
- setUp(server);
+ yield setUp(server);
try {
_("Disable engines locally. Doing it on one is enough.");
Service._ignorePrefObserver = true;
steamEngine.enabled = true;
do_check_true(stirlingEngine.enabled);
Service._ignorePrefObserver = false;
steamEngine.enabled = false;
do_check_false(stirlingEngine.enabled);
_("Sync.");
- Service.sync();
+ yield Service.sync();
_("Meta record no longer contains engines.");
do_check_false(!!metaWBO.data.engines.steam);
do_check_false(!!metaWBO.data.engines.stirling);
_("Server records are wiped.");
do_check_eq(steamCollection.payload, undefined);
do_check_eq(stirlingCollection.payload, undefined);
_("Engines continue to be disabled.");
do_check_false(steamEngine.enabled);
do_check_false(stirlingEngine.enabled);
} finally {
- Service.startOver();
- server.stop(run_next_test);
+ yield Service.startOver();
+ yield promiseStopServer(server);
}
});
--- a/services/sync/tests/unit/test_service_verifyLogin.js
+++ b/services/sync/tests/unit/test_service_verifyLogin.js
@@ -24,26 +24,26 @@ function service_unavailable(request, re
response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
response.setHeader("Retry-After", "42");
response.bodyOutputStream.write(body, body.length);
}
function run_test() {
let logger = Log.repository.rootLogger;
Log.repository.rootLogger.addAppender(new Log.DumpAppender());
+}
+add_task(function* () {
ensureLegacyIdentityManager();
// This test expects a clean slate -- no saved passphrase.
Services.logins.removeAllLogins();
let johnHelper = track_collections_helper();
let johnU = johnHelper.with_updated_collection;
let johnColls = johnHelper.collections;
- do_test_pending();
-
let server;
function weaveHandler (request, response) {
response.setStatusLine(request.httpVersion, 200, "OK");
let body = server.baseURI + "/api/";
response.bodyOutputStream.write(body, body.length);
}
server = httpd_setup({
@@ -58,65 +58,65 @@ function run_test() {
try {
Service.serverURL = server.baseURI;
_("Force the initial state.");
Service.status.service = STATUS_OK;
do_check_eq(Service.status.service, STATUS_OK);
_("Credentials won't check out because we're not configured yet.");
- Service.status.resetSync();
- do_check_false(Service.verifyLogin());
+ yield Service.status.resetSync();
+ do_check_false(yield Service.verifyLogin());
do_check_eq(Service.status.service, CLIENT_NOT_CONFIGURED);
do_check_eq(Service.status.login, LOGIN_FAILED_NO_USERNAME);
_("Try again with username and password set.");
- Service.status.resetSync();
+ yield Service.status.resetSync();
setBasicCredentials("johndoe", "ilovejane", null);
- do_check_false(Service.verifyLogin());
+ do_check_false(yield Service.verifyLogin());
do_check_eq(Service.status.service, CLIENT_NOT_CONFIGURED);
do_check_eq(Service.status.login, LOGIN_FAILED_NO_PASSPHRASE);
_("verifyLogin() has found out the user's cluster URL, though.");
do_check_eq(Service.clusterURL, server.baseURI + "/api/");
_("Success if passphrase is set.");
- Service.status.resetSync();
+ yield Service.status.resetSync();
Service.identity.syncKey = "foo";
- do_check_true(Service.verifyLogin());
+ do_check_true(yield Service.verifyLogin());
do_check_eq(Service.status.service, STATUS_OK);
do_check_eq(Service.status.login, LOGIN_SUCCEEDED);
_("If verifyLogin() encounters a server error, it flips on the backoff flag and notifies observers on a 503 with Retry-After.");
- Service.status.resetSync();
+ yield Service.status.resetSync();
Service.identity.account = "janedoe";
Service._updateCachedURLs();
do_check_false(Service.status.enforceBackoff);
let backoffInterval;
Svc.Obs.add("weave:service:backoff:interval", function observe(subject, data) {
Svc.Obs.remove("weave:service:backoff:interval", observe);
backoffInterval = subject;
});
- do_check_false(Service.verifyLogin());
+ do_check_false(yield Service.verifyLogin());
do_check_true(Service.status.enforceBackoff);
do_check_eq(backoffInterval, 42);
do_check_eq(Service.status.service, LOGIN_FAILED);
do_check_eq(Service.status.login, SERVER_MAINTENANCE);
_("Ensure a network error when finding the cluster sets the right Status bits.");
- Service.status.resetSync();
+ yield Service.status.resetSync();
Service.serverURL = "http://localhost:12345/";
do_check_false(Service.verifyLogin());
do_check_eq(Service.status.service, LOGIN_FAILED);
do_check_eq(Service.status.login, LOGIN_FAILED_NETWORK_ERROR);
_("Ensure a network error when getting the collection info sets the right Status bits.");
- Service.status.resetSync();
+ yield Service.status.resetSync();
Service.clusterURL = "http://localhost:12345/";
- do_check_false(Service.verifyLogin());
+ do_check_false(yield Service.verifyLogin());
do_check_eq(Service.status.service, LOGIN_FAILED);
do_check_eq(Service.status.login, LOGIN_FAILED_NETWORK_ERROR);
} finally {
Svc.Prefs.resetBranch("");
- server.stop(do_test_finished);
+ yield server.promiseStop();
}
-}
+});
--- a/services/sync/tests/unit/test_service_wipeClient.js
+++ b/services/sync/tests/unit/test_service_wipeClient.js
@@ -43,70 +43,64 @@ CannotDecryptEngine.prototype = {
wasWiped: false,
wipeClient: function wipeClient() {
this.wasWiped = true;
}
};
Service.engineManager.register(CannotDecryptEngine);
-add_test(function test_withEngineList() {
+add_task(function* test_withEngineList() {
try {
_("Ensure initial scenario.");
do_check_false(Service.engineManager.get("candecrypt").wasWiped);
do_check_false(Service.engineManager.get("cannotdecrypt").wasWiped);
_("Wipe local engine data.");
- Service.wipeClient(["candecrypt", "cannotdecrypt"]);
+ yield Service.wipeClient(["candecrypt", "cannotdecrypt"]);
_("Ensure only the engine that can decrypt was wiped.");
do_check_true(Service.engineManager.get("candecrypt").wasWiped);
do_check_false(Service.engineManager.get("cannotdecrypt").wasWiped);
} finally {
Service.engineManager.get("candecrypt").wasWiped = false;
Service.engineManager.get("cannotdecrypt").wasWiped = false;
- Service.startOver();
+ yield Service.startOver();
}
-
- run_next_test();
});
-add_test(function test_startOver_clears_keys() {
+add_task(function* test_startOver_clears_keys() {
generateNewKeys(Service.collectionKeys);
do_check_true(!!Service.collectionKeys.keyForCollection());
- Service.startOver();
+ yield Service.startOver();
do_check_false(!!Service.collectionKeys.keyForCollection());
-
- run_next_test();
});
-add_test(function test_credentials_preserved() {
+add_task(function* test_credentials_preserved() {
_("Ensure that credentials are preserved if client is wiped.");
// Required for wipeClient().
ensureLegacyIdentityManager();
Service.identity.account = "testaccount";
Service.identity.basicPassword = "testpassword";
Service.clusterURL = "http://dummy:9000/";
let key = Utils.generatePassphrase();
Service.identity.syncKey = key;
Service.identity.persistCredentials();
// Simulate passwords engine wipe without all the overhead. To do this
// properly would require extra test infrastructure.
Services.logins.removeAllLogins();
- Service.wipeClient();
+ yield Service.wipeClient();
let id = new IdentityManager();
do_check_eq(id.account, "testaccount");
do_check_eq(id.basicPassword, "testpassword");
do_check_eq(id.syncKey, key);
- Service.startOver();
-
- run_next_test();
+ yield Service.startOver();
});
function run_test() {
initTestLogging();
run_next_test();
}
--- a/services/sync/tests/unit/test_service_wipeServer.js
+++ b/services/sync/tests/unit/test_service_wipeServer.js
@@ -41,22 +41,16 @@ function setUpTestFixtures(server) {
}
function run_test() {
initTestLogging("Trace");
run_next_test();
}
-function promiseStopServer(server) {
- let deferred = Promise.defer();
- server.stop(deferred.resolve);
- return deferred.promise;
-}
-
add_identity_test(this, function test_wipeServer_list_success() {
_("Service.wipeServer() deletes collections given as argument.");
let steam_coll = new FakeCollection();
let diesel_coll = new FakeCollection();
let server = httpd_setup({
"/1.1/johndoe/storage/steam": steam_coll.handler(),
@@ -68,17 +62,17 @@ add_identity_test(this, function test_wi
yield setUpTestFixtures(server);
new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
_("Confirm initial environment.");
do_check_false(steam_coll.deleted);
do_check_false(diesel_coll.deleted);
_("wipeServer() will happily ignore the non-existent collection and use the timestamp of the last DELETE that was successful.");
- let timestamp = Service.wipeServer(["steam", "diesel", "petrol"]);
+ let timestamp = yield Service.wipeServer(["steam", "diesel", "petrol"]);
do_check_eq(timestamp, diesel_coll.timestamp);
_("wipeServer stopped deleting after encountering an error with the 'petrol' collection, thus only 'steam' has been deleted.");
do_check_true(steam_coll.deleted);
do_check_true(diesel_coll.deleted);
} finally {
yield promiseStopServer(server);
@@ -104,17 +98,17 @@ add_identity_test(this, function test_wi
_("Confirm initial environment.");
do_check_false(steam_coll.deleted);
do_check_false(diesel_coll.deleted);
_("wipeServer() will happily ignore the non-existent collection, delete the 'steam' collection and abort after an receiving an error on the 'petrol' collection.");
let error;
try {
- Service.wipeServer(["non-existent", "steam", "petrol", "diesel"]);
+ yield Service.wipeServer(["non-existent", "steam", "petrol", "diesel"]);
do_throw("Should have thrown!");
} catch(ex) {
error = ex;
}
_("wipeServer() threw this exception: " + error);
do_check_eq(error.status, 503);
_("wipeServer stopped deleting after encountering an error with the 'petrol' collection, thus only 'steam' has been deleted.");
@@ -144,17 +138,17 @@ add_identity_test(this, function test_wi
let server = httpd_setup({
"/1.1/johndoe/storage": storageHandler
});
yield setUpTestFixtures(server);
_("Try deletion.");
new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
- let returnedTimestamp = Service.wipeServer();
+ let returnedTimestamp = yield Service.wipeServer();
do_check_true(deleted);
do_check_eq(returnedTimestamp, serverTimestamp);
yield promiseStopServer(server);
Svc.Prefs.resetBranch("");
});
add_identity_test(this, function test_wipeServer_all_404() {
@@ -176,17 +170,17 @@ add_identity_test(this, function test_wi
let server = httpd_setup({
"/1.1/johndoe/storage": storageHandler
});
yield setUpTestFixtures(server);
_("Try deletion.");
new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
- let returnedTimestamp = Service.wipeServer();
+ let returnedTimestamp = yield Service.wipeServer();
do_check_true(deleted);
do_check_eq(returnedTimestamp, serverTimestamp);
yield promiseStopServer(server);
Svc.Prefs.resetBranch("");
});
add_identity_test(this, function test_wipeServer_all_503() {
@@ -205,17 +199,17 @@ add_identity_test(this, function test_wi
"/1.1/johndoe/storage": storageHandler
});
yield setUpTestFixtures(server);
_("Try deletion.");
let error;
try {
new SyncTestingInfrastructure(server, "johndoe", "irrelevant", "irrelevant");
- Service.wipeServer();
+ yield Service.wipeServer();
do_throw("Should have thrown!");
} catch (ex) {
error = ex;
}
do_check_eq(error.status, 503);
yield promiseStopServer(server);
Svc.Prefs.resetBranch("");
@@ -226,17 +220,17 @@ add_identity_test(this, function test_wi
let server = httpd_setup({});
yield setUpTestFixtures(server);
Service.serverURL = "http://localhost:4352/";
Service.clusterURL = "http://localhost:4352/";
_("Try deletion.");
try {
- Service.wipeServer();
+ yield Service.wipeServer();
do_throw("Should have thrown!");
} catch (ex) {
do_check_eq(ex.result, Cr.NS_ERROR_CONNECTION_REFUSED);
}
Svc.Prefs.resetBranch("");
yield promiseStopServer(server);
});
--- a/services/sync/tests/unit/test_syncengine.js
+++ b/services/sync/tests/unit/test_syncengine.js
@@ -1,60 +1,62 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
Cu.import("resource://services-sync/engines.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/utils.js");
-function makeSteamEngine() {
- return new SyncEngine('Steam', Service);
+function* makeSteamEngine() {
+ let engine = new SyncEngine('Steam', Service);
+ yield engine.whenInitialized;
+ return engine;
}
var server;
-function test_url_attributes() {
+add_task(function* test_url_attributes() {
_("SyncEngine url attributes");
let syncTesting = new SyncTestingInfrastructure(server);
Service.clusterURL = "https://cluster/";
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
try {
do_check_eq(engine.storageURL, "https://cluster/1.1/foo/storage/");
do_check_eq(engine.engineURL, "https://cluster/1.1/foo/storage/steam");
do_check_eq(engine.metaURL, "https://cluster/1.1/foo/storage/meta/global");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_syncID() {
+add_task(function* test_syncID() {
_("SyncEngine.syncID corresponds to preference");
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(Svc.Prefs.get("steam.syncID"), undefined);
// Performing the first get on the attribute will generate a new GUID.
do_check_eq(engine.syncID, "fake-guid-0");
do_check_eq(Svc.Prefs.get("steam.syncID"), "fake-guid-0");
Svc.Prefs.set("steam.syncID", Utils.makeGUID());
do_check_eq(Svc.Prefs.get("steam.syncID"), "fake-guid-1");
do_check_eq(engine.syncID, "fake-guid-1");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_lastSync() {
+add_task(function* test_lastSync() {
_("SyncEngine.lastSync and SyncEngine.lastSyncLocal correspond to preferences");
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(Svc.Prefs.get("steam.lastSync"), undefined);
do_check_eq(engine.lastSync, 0);
do_check_eq(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
do_check_eq(engine.lastSyncLocal, 0);
// Floats are properly stored as floats and synced with the preference
@@ -69,136 +71,127 @@ function test_lastSync() {
// resetLastSync() resets the value (and preference) to 0
engine.resetLastSync();
do_check_eq(engine.lastSync, 0);
do_check_eq(Svc.Prefs.get("steam.lastSync"), "0");
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_toFetch() {
+add_task(function* test_toFetch() {
_("SyncEngine.toFetch corresponds to file on disk");
let syncTesting = new SyncTestingInfrastructure(server);
const filename = "weave/toFetch/steam.json";
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(engine.toFetch.length, 0);
// Write file to disk
let toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
engine.toFetch = toFetch;
do_check_eq(engine.toFetch, toFetch);
// toFetch is written asynchronously
- engine._store._sleep(0);
+ yield engine._store._sleep(0);
let fakefile = syncTesting.fakeFilesystem.fakeContents[filename];
do_check_eq(fakefile, JSON.stringify(toFetch));
// Read file from disk
toFetch = [Utils.makeGUID(), Utils.makeGUID()];
syncTesting.fakeFilesystem.fakeContents[filename] = JSON.stringify(toFetch);
- engine.loadToFetch();
+ yield engine.loadToFetch();
do_check_eq(engine.toFetch.length, 2);
do_check_eq(engine.toFetch[0], toFetch[0]);
do_check_eq(engine.toFetch[1], toFetch[1]);
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_previousFailed() {
+add_task(function* test_previousFailed() {
_("SyncEngine.previousFailed corresponds to file on disk");
let syncTesting = new SyncTestingInfrastructure(server);
const filename = "weave/failed/steam.json";
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(engine.previousFailed.length, 0);
// Write file to disk
let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
engine.previousFailed = previousFailed;
do_check_eq(engine.previousFailed, previousFailed);
// previousFailed is written asynchronously
- engine._store._sleep(0);
+ yield engine._store._sleep(0);
let fakefile = syncTesting.fakeFilesystem.fakeContents[filename];
do_check_eq(fakefile, JSON.stringify(previousFailed));
// Read file from disk
previousFailed = [Utils.makeGUID(), Utils.makeGUID()];
syncTesting.fakeFilesystem.fakeContents[filename] = JSON.stringify(previousFailed);
- engine.loadPreviousFailed();
+ yield engine.loadPreviousFailed();
do_check_eq(engine.previousFailed.length, 2);
do_check_eq(engine.previousFailed[0], previousFailed[0]);
do_check_eq(engine.previousFailed[1], previousFailed[1]);
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_resetClient() {
+add_task(function* test_resetClient() {
_("SyncEngine.resetClient resets lastSync and toFetch");
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
try {
// Ensure pristine environment
do_check_eq(Svc.Prefs.get("steam.lastSync"), undefined);
do_check_eq(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
do_check_eq(engine.toFetch.length, 0);
engine.lastSync = 123.45;
engine.lastSyncLocal = 67890;
engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
engine.previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
- engine.resetClient();
+ yield engine.resetClient();
do_check_eq(engine.lastSync, 0);
do_check_eq(engine.lastSyncLocal, 0);
do_check_eq(engine.toFetch.length, 0);
do_check_eq(engine.previousFailed.length, 0);
} finally {
Svc.Prefs.resetBranch("");
}
-}
+});
-function test_wipeServer() {
+add_task(function* test_wipeServer() {
_("SyncEngine.wipeServer deletes server data and resets the client.");
- let engine = makeSteamEngine();
+ let engine = yield makeSteamEngine();
const PAYLOAD = 42;
let steamCollection = new ServerWBO("steam", PAYLOAD);
let server = httpd_setup({
"/1.1/foo/storage/steam": steamCollection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- do_test_pending();
try {
// Some data to reset.
engine.lastSync = 123.45;
engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
_("Wipe server data and reset client.");
- engine.wipeServer();
+ yield engine.wipeServer();
do_check_eq(steamCollection.payload, undefined);
do_check_eq(engine.lastSync, 0);
do_check_eq(engine.toFetch.length, 0);
} finally {
- server.stop(do_test_finished);
+ yield promiseStopServer(server);
Svc.Prefs.resetBranch("");
}
-}
+});
function run_test() {
server = httpd_setup({});
- test_url_attributes();
- test_syncID();
- test_lastSync();
- test_toFetch();
- test_previousFailed();
- test_resetClient();
- test_wipeServer();
-
- server.stop(run_next_test);
+ run_next_test();
}
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -6,36 +6,40 @@ Cu.import("resource://services-sync/engi
Cu.import("resource://services-sync/policies.js");
Cu.import("resource://services-sync/record.js");
Cu.import("resource://services-sync/resource.js");
Cu.import("resource://services-sync/service.js");
Cu.import("resource://services-sync/util.js");
Cu.import("resource://testing-common/services/sync/rotaryengine.js");
Cu.import("resource://testing-common/services/sync/utils.js");
-function makeRotaryEngine() {
- return new RotaryEngine(Service);
+function* makeRotaryEngine() {
+ let engine = new RotaryEngine(Service);
+ yield engine.whenInitialized;
+ engine._tracker.persistChangedIDs = false;
+ return engine;
}
-function cleanAndGo(server) {
+function* cleanAndGo(server) {
Svc.Prefs.resetBranch("");
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
Service.recordManager.clearCache();
- server.stop(run_next_test);
+ yield promiseStopServer(server);
}
function configureService(server, username, password) {
Service.clusterURL = server.baseURI;
Service.identity.account = username || "foo";
Service.identity.basicPassword = password || "password";
}
function createServerAndConfigureClient() {
let engine = new RotaryEngine(Service);
+ engine._tracker.persistChangedIDs = false;
let contents = {
meta: {global: {engines: {rotary: {version: engine.version,
syncID: engine.syncID}}}},
crypto: {},
rotary: {}
};
@@ -50,16 +54,18 @@ function createServerAndConfigureClient(
Service.identity.username = USER;
Service._updateCachedURLs();
return [engine, server, USER];
}
function run_test() {
generateNewKeys(Service.collectionKeys);
+ initTestLogging("Trace");
+ Log.repository.getLogger("Sync.EngineManager").level = Log.Level.Trace;
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
run_next_test();
}
/*
* Tests
*
* SyncEngine._sync() is divided into four rather independent steps:
@@ -68,17 +74,17 @@ function run_test() {
* - _processIncoming()
* - _uploadOutgoing()
* - _syncFinish()
*
* In the spirit of unit testing, these are tested individually for
* different scenarios below.
*/
-add_test(function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
+add_task(function* test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
_("SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record");
// Some server side data that's going to be wiped
let collection = new ServerCollection();
collection.insert('flying',
encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
collection.insert('scotsman',
@@ -87,140 +93,140 @@ add_test(function test_syncStartup_empty
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
Service.identity.username = "foo";
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine._store.items = {rekolok: "Rekonstruktionslokomotive"};
try {
// Confirm initial environment
do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
- let metaGlobal = Service.recordManager.get(engine.metaURL);
+ let metaGlobal = yield Service.recordManager.get(engine.metaURL);
do_check_eq(metaGlobal.payload.engines, undefined);
do_check_true(!!collection.payload("flying"));
do_check_true(!!collection.payload("scotsman"));
engine.lastSync = Date.now() / 1000;
engine.lastSyncLocal = Date.now();
// Trying to prompt a wipe -- we no longer track CryptoMeta per engine,
// so it has nothing to check.
- engine._syncStartup();
+ yield engine._syncStartup();
// The meta/global WBO has been filled with data about the engine
let engineData = metaGlobal.payload.engines["rotary"];
do_check_eq(engineData.version, engine.version);
do_check_eq(engineData.syncID, engine.syncID);
// Sync was reset and server data was wiped
do_check_eq(engine.lastSync, 0);
do_check_eq(collection.payload("flying"), undefined);
do_check_eq(collection.payload("scotsman"), undefined);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_syncStartup_serverHasNewerVersion() {
+add_task(function* test_syncStartup_serverHasNewerVersion() {
_("SyncEngine._syncStartup ");
let global = new ServerWBO('global', {engines: {rotary: {version: 23456}}});
let server = httpd_setup({
"/1.1/foo/storage/meta/global": global.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
Service.identity.username = "foo";
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
try {
// The server has a newer version of the data and our engine can
// handle. That should give us an exception.
let error;
try {
- engine._syncStartup();
+ yield engine._syncStartup();
} catch (ex) {
error = ex;
}
do_check_eq(error.failureCode, VERSION_OUT_OF_DATE);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_syncStartup_syncIDMismatchResetsClient() {
+add_task(function* test_syncStartup_syncIDMismatchResetsClient() {
_("SyncEngine._syncStartup resets sync if syncIDs don't match");
let server = sync_httpd_setup({});
let syncTesting = new SyncTestingInfrastructure(server);
Service.identity.username = "foo";
// global record with a different syncID than our engine has
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
let global = new ServerWBO('global',
{engines: {rotary: {version: engine.version,
syncID: 'foobar'}}});
server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler());
try {
// Confirm initial environment
do_check_eq(engine.syncID, 'fake-guid-0');
do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
engine.lastSync = Date.now() / 1000;
engine.lastSyncLocal = Date.now();
- engine._syncStartup();
+ yield engine._syncStartup();
// The engine has assumed the server's syncID
do_check_eq(engine.syncID, 'foobar');
// Sync was reset
do_check_eq(engine.lastSync, 0);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_emptyServer() {
+add_task(function* test_processIncoming_emptyServer() {
_("SyncEngine._processIncoming working with an empty server backend");
let collection = new ServerCollection();
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
Service.identity.username = "foo";
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
try {
// Merely ensure that this code path is run without any errors
- engine._processIncoming();
+ yield engine._processIncoming();
do_check_eq(engine.lastSync, 0);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_createFromServer() {
+add_task(function* test_processIncoming_createFromServer() {
_("SyncEngine._processIncoming creates new records from server data");
// Some server records that will be downloaded
let collection = new ServerCollection();
collection.insert('flying',
encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
collection.insert('scotsman',
@@ -238,50 +244,50 @@ add_test(function test_processIncoming_c
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
Service.identity.username = "foo";
generateNewKeys(Service.collectionKeys);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_eq(engine.lastSync, 0);
do_check_eq(engine.lastModified, null);
do_check_eq(engine._store.items.flying, undefined);
do_check_eq(engine._store.items.scotsman, undefined);
do_check_eq(engine._store.items['../pathological'], undefined);
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Timestamps of last sync and last server modification are set.
do_check_true(engine.lastSync > 0);
do_check_true(engine.lastModified > 0);
// Local records have been created from the server data.
do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
do_check_eq(engine._store.items['../pathological'], "Pathological Case");
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_reconcile() {
+add_task(function* test_processIncoming_reconcile() {
_("SyncEngine._processIncoming updates local records");
let collection = new ServerCollection();
// This server record is newer than the corresponding client one,
// so it'll update its data.
collection.insert('newrecord',
encryptPayload({id: 'newrecord',
@@ -321,45 +327,45 @@ add_test(function test_processIncoming_r
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
Service.identity.username = "foo";
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine._store.items = {newerserver: "New data, but not as new as server!",
olderidentical: "Older but identical",
updateclient: "Got data?",
original: "Original Entry",
long_original: "Long Original Entry",
nukeme: "Nuke me!"};
// Make this record 1 min old, thus older than the one on the server
- engine._tracker.addChangedID('newerserver', Date.now()/1000 - 60);
+ yield engine._tracker.addChangedID('newerserver', Date.now()/1000 - 60);
// This record has been changed 2 mins later than the one on the server
- engine._tracker.addChangedID('olderidentical', Date.now()/1000);
+ yield engine._tracker.addChangedID('olderidentical', Date.now()/1000);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_eq(engine._store.items.newrecord, undefined);
do_check_eq(engine._store.items.newerserver, "New data, but not as new as server!");
do_check_eq(engine._store.items.olderidentical, "Older but identical");
do_check_eq(engine._store.items.updateclient, "Got data?");
do_check_eq(engine._store.items.nukeme, "Nuke me!");
do_check_true(engine._tracker.changedIDs['olderidentical'] > 0);
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Timestamps of last sync and last server modification are set.
do_check_true(engine.lastSync > 0);
do_check_true(engine.lastModified > 0);
// The new record is created.
do_check_eq(engine._store.items.newrecord, "New stuff...");
@@ -377,21 +383,21 @@ add_test(function test_processIncoming_r
// The incoming ID is preferred.
do_check_eq(engine._store.items.original, undefined);
do_check_eq(engine._store.items.duplication, "Original Entry");
do_check_neq(engine._delete.ids.indexOf("original"), -1);
// The 'nukeme' record marked as deleted is removed.
do_check_eq(engine._store.items.nukeme, undefined);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_reconcile_local_deleted() {
+add_task(function* test_processIncoming_reconcile_local_deleted() {
_("Ensure local, duplicate ID is deleted on server.");
// When a duplicate is resolved, the local ID (which is never taken) should
// be deleted on the server.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
@@ -400,56 +406,56 @@ add_test(function test_processIncoming_r
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
record = encryptPayload({id: "DUPE_LOCAL", denomination: "local"});
wbo = new ServerWBO("DUPE_LOCAL", record, now - 1);
server.insertWBO(user, "rotary", wbo);
- engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
- do_check_true(engine._store.itemExists("DUPE_LOCAL"));
- do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+ yield engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
+ do_check_true(yield engine._store.itemExists("DUPE_LOCAL"));
+ do_check_eq("DUPE_LOCAL", yield engine._findDupe({id: "DUPE_INCOMING"}));
- engine._sync();
+ yield engine._sync();
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
do_check_neq(undefined, collection.wbo("DUPE_INCOMING"));
- cleanAndGo(server);
+ yield cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_equivalent() {
+add_task(function* test_processIncoming_reconcile_equivalent() {
_("Ensure proper handling of incoming records that match local.");
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "entry", denomination: "denomination"});
let wbo = new ServerWBO("entry", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.items = {entry: "denomination"};
- do_check_true(engine._store.itemExists("entry"));
+ do_check_true(yield engine._store.itemExists("entry"));
- engine._sync();
+ yield engine._sync();
do_check_attribute_count(engine._store.items, 1);
cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_locally_deleted_dupe_new() {
+add_task(function* test_processIncoming_reconcile_locally_deleted_dupe_new() {
_("Ensure locally deleted duplicate record newer than incoming is handled.");
// This is a somewhat complicated test. It ensures that if a client receives
// a modified record for an item that is deleted locally but with a different
// ID that the incoming record is ignored. This is a corner case for record
// handling, but it needs to be supported.
let [engine, server, user] = createServerAndConfigureClient();
@@ -458,37 +464,37 @@ add_test(function test_processIncoming_r
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
// Simulate a locally-deleted item.
engine._store.items = {};
- engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
- do_check_false(engine._store.itemExists("DUPE_LOCAL"));
- do_check_false(engine._store.itemExists("DUPE_INCOMING"));
- do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+ yield engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
+ do_check_false(yield engine._store.itemExists("DUPE_LOCAL"));
+ do_check_false(yield engine._store.itemExists("DUPE_INCOMING"));
+ do_check_eq("DUPE_LOCAL", yield engine._findDupe({id: "DUPE_INCOMING"}));
- engine._sync();
+ yield engine._sync();
// After the sync, the server's payload for the original ID should be marked
// as deleted.
do_check_empty(engine._store.items);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(null, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_true(payload.deleted);
- cleanAndGo(server);
+ yield cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_locally_deleted_dupe_old() {
+add_task(function* test_processIncoming_reconcile_locally_deleted_dupe_old() {
_("Ensure locally deleted duplicate record older than incoming is restored.");
// This is similar to the above test except it tests the condition where the
// incoming record is newer than the local deletion, therefore overriding it.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
@@ -496,112 +502,112 @@ add_test(function test_processIncoming_r
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
// Simulate a locally-deleted item.
engine._store.items = {};
- engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
- do_check_false(engine._store.itemExists("DUPE_LOCAL"));
- do_check_false(engine._store.itemExists("DUPE_INCOMING"));
- do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+ yield engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
+ do_check_false(yield engine._store.itemExists("DUPE_LOCAL"));
+ do_check_false(yield engine._store.itemExists("DUPE_INCOMING"));
+ do_check_eq("DUPE_LOCAL", yield engine._findDupe({id: "DUPE_INCOMING"}));
- engine._sync();
+ yield engine._sync();
// Since the remote change is newer, the incoming item should exist locally.
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
do_check_eq("incoming", engine._store.items.DUPE_INCOMING);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("incoming", payload.denomination);
- cleanAndGo(server);
+ yield cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_changed_dupe() {
+add_task(function* test_processIncoming_reconcile_changed_dupe() {
_("Ensure that locally changed duplicate record is handled properly.");
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
// The local record is newer than the incoming one, so it should be retained.
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
- engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
- do_check_true(engine._store.itemExists("DUPE_LOCAL"));
- do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+ yield engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
+ do_check_true(yield engine._store.itemExists("DUPE_LOCAL"));
+ do_check_eq("DUPE_LOCAL", yield engine._findDupe({id: "DUPE_INCOMING"}));
- engine._sync();
+ yield engine._sync();
// The ID should have been changed to incoming.
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
// On the server, the local ID should be deleted and the incoming ID should
// have its payload set to what was in the local record.
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(undefined, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("local", payload.denomination);
- cleanAndGo(server);
+ yield cleanAndGo(server);
});
-add_test(function test_processIncoming_reconcile_changed_dupe_new() {
+add_task(function* test_processIncoming_reconcile_changed_dupe_new() {
_("Ensure locally changed duplicate record older than incoming is ignored.");
// This test is similar to the above except the incoming record is younger
// than the local record. The incoming record should be authoritative.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
- engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
- do_check_true(engine._store.itemExists("DUPE_LOCAL"));
- do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+ yield engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
+ do_check_true(yield engine._store.itemExists("DUPE_LOCAL"));
+ do_check_eq("DUPE_LOCAL", yield engine._findDupe({id: "DUPE_INCOMING"}));
- engine._sync();
+ yield engine._sync();
// The ID should have been changed to incoming.
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
// On the server, the local ID should be deleted and the incoming ID should
// have its payload retained.
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(undefined, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("incoming", payload.denomination);
- cleanAndGo(server);
+ yield cleanAndGo(server);
});
-add_test(function test_processIncoming_mobile_batchSize() {
+add_task(function* test_processIncoming_mobile_batchSize() {
_("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients");
Svc.Prefs.set("client.type", "mobile");
Service.identity.username = "foo";
// A collection that logs each GET
let collection = new ServerCollection();
collection.get_log = [];
@@ -622,27 +628,27 @@ add_test(function test_processIncoming_m
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
_("On a mobile client, we get new records from the server in batches of 50.");
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
do_check_attribute_count(engine._store.items, 234);
do_check_true('record-no-0' in engine._store.items);
do_check_true('record-no-49' in engine._store.items);
do_check_true('record-no-50' in engine._store.items);
do_check_true('record-no-233' in engine._store.items);
// Verify that the right number of GET requests with the right
// kind of parameters were made.
@@ -657,22 +663,22 @@ add_test(function test_processIncoming_m
do_check_eq(collection.get_log[i+1].limit, undefined);
if (i < Math.floor(234 / MOBILE_BATCH_SIZE))
do_check_eq(collection.get_log[i+1].ids.length, MOBILE_BATCH_SIZE);
else
do_check_eq(collection.get_log[i+1].ids.length, 234 % MOBILE_BATCH_SIZE);
}
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_store_toFetch() {
+add_task(function* test_processIncoming_store_toFetch() {
_("If processIncoming fails in the middle of a batch on mobile, state is saved in toFetch and lastSync.");
Service.identity.username = "foo";
Svc.Prefs.set("client.type", "mobile");
// A collection that throws at the fourth get.
let collection = new ServerCollection();
collection._get_calls = 0;
collection._get = collection.get;
@@ -688,17 +694,17 @@ add_test(function test_processIncoming_s
for (var i = 0; i < MOBILE_BATCH_SIZE * 3; i++) {
let id = 'record-no-' + i;
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
let wbo = new ServerWBO(id, payload);
wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3);
collection.insertWBO(wbo);
}
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.enabled = true;
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
@@ -709,38 +715,38 @@ add_test(function test_processIncoming_s
try {
// Confirm initial environment
do_check_eq(engine.lastSync, 0);
do_check_empty(engine._store.items);
let error;
try {
- engine.sync();
+ yield engine.sync();
} catch (ex) {
error = ex;
}
do_check_true(!!error);
// Only the first two batches have been applied.
do_check_eq(Object.keys(engine._store.items).length,
MOBILE_BATCH_SIZE * 2);
// The third batch is stuck in toFetch. lastSync has been moved forward to
// the last successful item's timestamp.
do_check_eq(engine.toFetch.length, MOBILE_BATCH_SIZE);
do_check_eq(engine.lastSync, collection.wbo("record-no-99").modified);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_resume_toFetch() {
+add_task(function* test_processIncoming_resume_toFetch() {
_("toFetch and previousFailed items left over from previous syncs are fetched on the next sync, along with new items.");
Service.identity.username = "foo";
const LASTSYNC = Date.now() / 1000;
// Server records that will be downloaded
let collection = new ServerCollection();
collection.insert('flying',
@@ -760,17 +766,17 @@ add_test(function test_processIncoming_r
collection.insertWBO(wbo);
}
collection.wbo("flying").modified =
collection.wbo("scotsman").modified = LASTSYNC - 10;
collection._wbos.rekolok.modified = LASTSYNC + 10;
// Time travel 10 seconds into the future but still download the above WBOs.
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.lastSync = LASTSYNC;
engine.toFetch = ["flying", "scotsman"];
engine.previousFailed = ["failed0", "failed1", "failed2"];
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
@@ -782,48 +788,49 @@ add_test(function test_processIncoming_r
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_eq(engine._store.items.flying, undefined);
do_check_eq(engine._store.items.scotsman, undefined);
do_check_eq(engine._store.items.rekolok, undefined);
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Local records have been created from the server data.
do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
do_check_eq(engine._store.items.rekolok, "Rekonstruktionslokomotive");
do_check_eq(engine._store.items.failed0, "Record No. 0");
do_check_eq(engine._store.items.failed1, "Record No. 1");
do_check_eq(engine._store.items.failed2, "Record No. 2");
do_check_eq(engine.previousFailed.length, 0);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_applyIncomingBatchSize_smaller() {
+add_task(function* test_processIncoming_applyIncomingBatchSize_smaller() {
_("Ensure that a number of incoming items less than applyIncomingBatchSize is still applied.");
Service.identity.username = "foo";
// Engine that doesn't like the first and last record it's given.
const APPLY_BATCH_SIZE = 10;
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
+
engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
- engine._store.applyIncomingBatch = function (records) {
+ engine._store.applyIncomingBatch = Task.async(function* (records) {
let failed1 = records.shift();
let failed2 = records.pop();
- this._applyIncomingBatch(records);
+ yield this._applyIncomingBatch(records);
return [failed1.id, failed2.id];
- };
+ });
// Let's create less than a batch worth of server side records.
let collection = new ServerCollection();
for (let i = 0; i < APPLY_BATCH_SIZE - 1; i++) {
let id = 'record-no-' + i;
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
@@ -838,49 +845,49 @@ add_test(function test_processIncoming_a
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_empty(engine._store.items);
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Records have been applied and the expected failures have failed.
do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE - 1 - 2);
do_check_eq(engine.toFetch.length, 0);
do_check_eq(engine.previousFailed.length, 2);
do_check_eq(engine.previousFailed[0], "record-no-0");
do_check_eq(engine.previousFailed[1], "record-no-8");
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_NEW_FAILURES", { key: "rotary" }), 2);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_applyIncomingBatchSize_multiple() {
+add_task(function* test_processIncoming_applyIncomingBatchSize_multiple() {
_("Ensure that incoming items are applied according to applyIncomingBatchSize.");
Service.identity.username = "foo";
const APPLY_BATCH_SIZE = 10;
// Engine that applies records in batches.
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
let batchCalls = 0;
engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
- engine._store.applyIncomingBatch = function (records) {
+ engine._store.applyIncomingBatch = Task.async(function* (records) {
batchCalls += 1;
do_check_eq(records.length, APPLY_BATCH_SIZE);
- this._applyIncomingBatch.apply(this, arguments);
- };
+ yield this._applyIncomingBatch.apply(this, arguments);
+ });
// Let's create three batches worth of server side records.
let collection = new ServerCollection();
for (let i = 0; i < APPLY_BATCH_SIZE * 3; i++) {
let id = 'record-no-' + i;
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
@@ -895,62 +902,62 @@ add_test(function test_processIncoming_a
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_empty(engine._store.items);
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Records have been applied in 3 batches.
do_check_eq(batchCalls, 3);
do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE * 3);
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_NEW_FAILURES", { key: "rotary" }), 3);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_notify_count() {
+add_task(function* test_processIncoming_notify_count() {
_("Ensure that failed records are reported only once.");
Service.identity.username = "foo";
const APPLY_BATCH_SIZE = 5;
const NUMBER_OF_RECORDS = 15;
// Engine that fails the first record.
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
- engine._store.applyIncomingBatch = function (records) {
- engine._store._applyIncomingBatch(records.slice(1));
+ engine._store.applyIncomingBatch = Task.async(function* (records) {
+ yield engine._store._applyIncomingBatch(records.slice(1));
return [records[0].id];
- };
+ });
// Create a batch of server side records.
let collection = new ServerCollection();
for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
let id = 'record-no-' + i;
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let meta_global = Service.recordManager.set(engine.metaURL,
- new WBORecord(engine.metaURL));
+ let meta_global = yield Service.recordManager.set(engine.metaURL,
+ new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment.
do_check_eq(engine.lastSync, 0);
do_check_eq(engine.toFetch.length, 0);
do_check_eq(engine.previousFailed.length, 0);
do_check_empty(engine._store.items);
@@ -960,18 +967,18 @@ add_test(function test_processIncoming_n
function onApplied(count) {
_("Called with " + JSON.stringify(counts));
counts = count;
called++;
}
Svc.Obs.add("weave:engine:sync:applied", onApplied);
// Do sync.
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Confirm failures.
do_check_attribute_count(engine._store.items, 12);
do_check_eq(engine.previousFailed.length, 3);
do_check_eq(engine.previousFailed[0], "record-no-0");
do_check_eq(engine.previousFailed[1], "record-no-5");
do_check_eq(engine.previousFailed[2], "record-no-10");
@@ -981,54 +988,54 @@ add_test(function test_processIncoming_n
do_check_eq(counts.applied, 15);
do_check_eq(counts.newFailed, 3);
do_check_eq(counts.succeeded, 12);
// Make sure we recorded telemetry for the failed records.
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_NEW_FAILURES", { key: "rotary" }), 3);
// Sync again, 1 of the failed items are the same, the rest didn't fail.
- engine._processIncoming();
+ yield engine._processIncoming();
// Confirming removed failures.
do_check_attribute_count(engine._store.items, 14);
do_check_eq(engine.previousFailed.length, 1);
do_check_eq(engine.previousFailed[0], "record-no-0");
do_check_eq(called, 2);
do_check_eq(counts.failed, 1);
do_check_eq(counts.applied, 3);
do_check_eq(counts.newFailed, 0);
do_check_eq(counts.succeeded, 2);
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_NEW_FAILURES", { key: "rotary" }), 0);
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_previousFailed() {
+add_task(function* test_processIncoming_previousFailed() {
_("Ensure that failed records are retried.");
Service.identity.username = "foo";
Svc.Prefs.set("client.type", "mobile");
const APPLY_BATCH_SIZE = 4;
const NUMBER_OF_RECORDS = 14;
// Engine that fails the first 2 records.
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.mobileGUIDFetchBatchSize = engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
- engine._store.applyIncomingBatch = function (records) {
- engine._store._applyIncomingBatch(records.slice(2));
+ engine._store.applyIncomingBatch = Task.async(function* (records) {
+ yield engine._store._applyIncomingBatch(records.slice(2));
return [records[0].id, records[1].id];
- };
+ });
// Create a batch of server side records.
let collection = new ServerCollection();
for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
let id = 'record-no-' + i;
let payload = encryptPayload({id: id, denomination: "Record No. " + i});
collection.insert(id, payload);
}
@@ -1051,33 +1058,33 @@ add_test(function test_processIncoming_p
do_check_empty(engine._store.items);
// Initial failed items in previousFailed to be reset.
let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
engine.previousFailed = previousFailed;
do_check_eq(engine.previousFailed, previousFailed);
// Do sync.
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Expected result: 4 sync batches with 2 failures each => 8 failures
do_check_attribute_count(engine._store.items, 6);
do_check_eq(engine.previousFailed.length, 8);
do_check_eq(engine.previousFailed[0], "record-no-0");
do_check_eq(engine.previousFailed[1], "record-no-1");
do_check_eq(engine.previousFailed[2], "record-no-4");
do_check_eq(engine.previousFailed[3], "record-no-5");
do_check_eq(engine.previousFailed[4], "record-no-8");
do_check_eq(engine.previousFailed[5], "record-no-9");
do_check_eq(engine.previousFailed[6], "record-no-12");
do_check_eq(engine.previousFailed[7], "record-no-13");
// Sync again with the same failed items (records 0, 1, 8, 9).
- engine._processIncoming();
+ yield engine._processIncoming();
// A second sync with the same failed items should not add the same items again.
// Items that did not fail a second time should no longer be in previousFailed.
do_check_attribute_count(engine._store.items, 10);
do_check_eq(engine.previousFailed.length, 4);
do_check_eq(engine.previousFailed[0], "record-no-0");
do_check_eq(engine.previousFailed[1], "record-no-1");
do_check_eq(engine.previousFailed[2], "record-no-8");
@@ -1085,22 +1092,22 @@ add_test(function test_processIncoming_p
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_NEW_FAILURES", { key: "rotary" }), 8);
// Refetched items that didn't fail the second time are in engine._store.items.
do_check_eq(engine._store.items['record-no-4'], "Record No. 4");
do_check_eq(engine._store.items['record-no-5'], "Record No. 5");
do_check_eq(engine._store.items['record-no-12'], "Record No. 12");
do_check_eq(engine._store.items['record-no-13'], "Record No. 13");
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_failed_records() {
+add_task(function* test_processIncoming_failed_records() {
_("Ensure that failed records from _reconcile and applyIncomingBatch are refetched.");
Service.identity.username = "foo";
// Let's create three and a bit batches worth of server side records.
let collection = new ServerCollection();
const NUMBER_OF_RECORDS = MOBILE_BATCH_SIZE * 3 + 5;
for (let i = 0; i < NUMBER_OF_RECORDS; i++) {
let id = 'record-no-' + i;
@@ -1116,33 +1123,33 @@ add_test(function test_processIncoming_f
const BOGUS_RECORDS = ["record-no-" + 42,
"record-no-" + 23,
"record-no-" + (42 + MOBILE_BATCH_SIZE),
"record-no-" + (23 + MOBILE_BATCH_SIZE),
"record-no-" + (42 + MOBILE_BATCH_SIZE * 2),
"record-no-" + (23 + MOBILE_BATCH_SIZE * 2),
"record-no-" + (2 + MOBILE_BATCH_SIZE * 3),
"record-no-" + (1 + MOBILE_BATCH_SIZE * 3)];
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.applyIncomingBatchSize = MOBILE_BATCH_SIZE;
engine.__reconcile = engine._reconcile;
- engine._reconcile = function _reconcile(record) {
+ engine._reconcile = Task.async(function* _reconcile(record) {
if (BOGUS_RECORDS.indexOf(record.id) % 2 == 0) {
throw "I don't like this record! Baaaaaah!";
}
- return this.__reconcile.apply(this, arguments);
- };
+ return (yield this.__reconcile.apply(this, arguments));
+ });
engine._store._applyIncoming = engine._store.applyIncoming;
- engine._store.applyIncoming = function (record) {
+ engine._store.applyIncoming = Task.async(function* (record) {
if (BOGUS_RECORDS.indexOf(record.id) % 2 == 1) {
throw "I don't like this record! Baaaaaah!";
}
- return this._applyIncoming.apply(this, arguments);
- };
+ return (yield this._applyIncoming.apply(this, arguments));
+ });
// Keep track of requests made of a collection.
let count = 0;
let uris = [];
function recording_handler(collection) {
let h = collection.handler();
return function(req, res) {
++count;
@@ -1172,18 +1179,18 @@ add_test(function test_processIncoming_f
let observerSubject;
let observerData;
Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
observerSubject = subject;
observerData = data;
});
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
// Ensure that all records but the bogus 4 have been applied.
do_check_attribute_count(engine._store.items,
NUMBER_OF_RECORDS - BOGUS_RECORDS.length);
// Ensure that the bogus records will be fetched again on the next sync.
do_check_eq(engine.previousFailed.length, BOGUS_RECORDS.length);
engine.previousFailed.sort();
@@ -1196,46 +1203,46 @@ add_test(function test_processIncoming_f
do_check_eq(observerData, engine.name);
do_check_eq(observerSubject.failed, BOGUS_RECORDS.length);
do_check_eq(observerSubject.newFailed, BOGUS_RECORDS.length);
do_check_eq(sumHistogram("WEAVE_ENGINE_APPLY_NEW_FAILURES", { key: "rotary" }), BOGUS_RECORDS.length);
// Testing batching of failed item fetches.
// Try to sync again. Ensure that we split the request into chunks to avoid
// URI length limitations.
- function batchDownload(batchSize) {
+ function* batchDownload(batchSize) {
count = 0;
uris = [];
engine.guidFetchBatchSize = batchSize;
- engine._processIncoming();
+ yield engine._processIncoming();
_("Tried again. Requests: " + count + "; URIs: " + JSON.stringify(uris));
return count;
}
// There are 8 bad records, so this needs 3 fetches.
_("Test batching with ID batch size 3, normal mobile batch size.");
- do_check_eq(batchDownload(3), 3);
+ do_check_eq((yield batchDownload(3)), 3);
// Now see with a more realistic limit.
_("Test batching with sufficient ID batch size.");
- do_check_eq(batchDownload(BOGUS_RECORDS.length), 1);
+ do_check_eq((yield batchDownload(BOGUS_RECORDS.length)), 1);
// If we're on mobile, that limit is used by default.
_("Test batching with tiny mobile batch size.");
Svc.Prefs.set("client.type", "mobile");
engine.mobileGUIDFetchBatchSize = 2;
- do_check_eq(batchDownload(BOGUS_RECORDS.length), 4);
+ do_check_eq((yield batchDownload(BOGUS_RECORDS.length)), 4);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_processIncoming_decrypt_failed() {
+add_task(function* test_processIncoming_decrypt_failed() {
_("Ensure that records failing to decrypt are either replaced or refetched.");
Service.identity.username = "foo";
// Some good and some bogus records. One doesn't contain valid JSON,
// the other will throw during decrypt.
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
@@ -1254,17 +1261,17 @@ add_test(function test_processIncoming_d
Svc.Crypto.decrypt = function (ciphertext) {
if (ciphertext == "Decrypt this!") {
throw "Derp! Cipher finalized failed. Im ur crypto destroyin ur recordz.";
}
return this._decrypt.apply(this, arguments);
};
// Some broken records also exist locally.
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.enabled = true;
engine._store.items = {nojson: "Valid JSON",
nodecrypt: "Valid ciphertext"};
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
@@ -1284,178 +1291,178 @@ add_test(function test_processIncoming_d
let observerData;
Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
observerSubject = subject;
observerData = data;
});
engine.lastSync = collection.wbo("nojson").modified - 1;
- engine.sync();
+ yield engine.sync();
do_check_eq(engine.previousFailed.length, 4);
do_check_eq(engine.previousFailed[0], "nojson");
do_check_eq(engine.previousFailed[1], "nojson2");
do_check_eq(engine.previousFailed[2], "nodecrypt");
do_check_eq(engine.previousFailed[3], "nodecrypt2");
// Ensure the observer was notified
do_check_eq(observerData, engine.name);
do_check_eq(observerSubject.applied, 2);
do_check_eq(observerSubject.failed, 4);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_uploadOutgoing_toEmptyServer() {
+add_task(function* test_uploadOutgoing_toEmptyServer() {
_("SyncEngine._uploadOutgoing uploads new records to server");
Service.identity.username = "foo";
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO('flying');
collection._wbos.scotsman = new ServerWBO('scotsman');
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler(),
"/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
"/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman"};
// Mark one of these records as changed
- engine._tracker.addChangedID('scotsman', 0);
+ yield engine._tracker.addChangedID('scotsman', 0);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_eq(engine.lastSyncLocal, 0);
do_check_eq(collection.payload("flying"), undefined);
do_check_eq(collection.payload("scotsman"), undefined);
- engine._syncStartup();
- engine._uploadOutgoing();
+ yield engine._syncStartup();
+ yield engine._uploadOutgoing();
// Local timestamp has been set.
do_check_true(engine.lastSyncLocal > 0);
// Ensure the marked record ('scotsman') has been uploaded and is
// no longer marked.
do_check_eq(collection.payload("flying"), undefined);
do_check_true(!!collection.payload("scotsman"));
do_check_eq(JSON.parse(collection.wbo("scotsman").data.ciphertext).id,
"scotsman");
do_check_eq(engine._tracker.changedIDs["scotsman"], undefined);
// The 'flying' record wasn't marked so it wasn't uploaded
do_check_eq(collection.payload("flying"), undefined);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_uploadOutgoing_failed() {
+add_task(function* test_uploadOutgoing_failed() {
_("SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload.");
Service.identity.username = "foo";
let collection = new ServerCollection();
// We only define the "flying" WBO on the server, not the "scotsman"
// and "peppercorn" ones.
collection._wbos.flying = new ServerWBO('flying');
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine._store.items = {flying: "LNER Class A3 4472",
scotsman: "Flying Scotsman",
peppercorn: "Peppercorn Class"};
// Mark these records as changed
const FLYING_CHANGED = 12345;
const SCOTSMAN_CHANGED = 23456;
const PEPPERCORN_CHANGED = 34567;
- engine._tracker.addChangedID('flying', FLYING_CHANGED);
- engine._tracker.addChangedID('scotsman', SCOTSMAN_CHANGED);
- engine._tracker.addChangedID('peppercorn', PEPPERCORN_CHANGED);
+ yield engine._tracker.addChangedID('flying', FLYING_CHANGED);
+ yield engine._tracker.addChangedID('scotsman', SCOTSMAN_CHANGED);
+ yield engine._tracker.addChangedID('peppercorn', PEPPERCORN_CHANGED);
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
// Confirm initial environment
do_check_eq(engine.lastSyncLocal, 0);
do_check_eq(collection.payload("flying"), undefined);
do_check_eq(engine._tracker.changedIDs['flying'], FLYING_CHANGED);
do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED);
do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
engine.enabled = true;
- engine.sync();
+ yield engine.sync();
// Local timestamp has been set.
do_check_true(engine.lastSyncLocal > 0);
// Ensure the 'flying' record has been uploaded and is no longer marked.
do_check_true(!!collection.payload("flying"));
do_check_eq(engine._tracker.changedIDs['flying'], undefined);
// The 'scotsman' and 'peppercorn' records couldn't be uploaded so
// they weren't cleared from the tracker.
do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED);
do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
+add_task(function* test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
_("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
Service.identity.username = "foo";
let collection = new ServerCollection();
// Let's count how many times the client posts to the server
var noOfUploads = 0;
collection.post = (function(orig) {
return function() {
noOfUploads++;
return orig.apply(this, arguments);
};
}(collection.post));
// Create a bunch of records (and server side handlers)
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
for (var i = 0; i < 234; i++) {
let id = 'record-no-' + i;
engine._store.items[id] = "Record No. " + i;
- engine._tracker.addChangedID(id, 0);
+ yield engine._tracker.addChangedID(id, 0);
collection.insert(id);
}
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
@@ -1465,51 +1472,51 @@ add_test(function test_uploadOutgoing_MA
let syncTesting = new SyncTestingInfrastructure(server);
try {
// Confirm initial environment.
do_check_eq(noOfUploads, 0);
- engine._syncStartup();
- engine._uploadOutgoing();
+ yield engine._syncStartup();
+ yield engine._uploadOutgoing();
// Ensure all records have been uploaded.
for (i = 0; i < 234; i++) {
do_check_true(!!collection.payload('record-no-' + i));
}
// Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS.
do_check_eq(noOfUploads, Math.ceil(234/MAX_UPLOAD_RECORDS));
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_syncFinish_noDelete() {
+add_task(function* test_syncFinish_noDelete() {
_("SyncEngine._syncFinish resets tracker's score");
let server = httpd_setup({});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine._delete = {}; // Nothing to delete
engine._tracker.score = 100;
// _syncFinish() will reset the engine's score.
- engine._syncFinish();
+ yield engine._syncFinish();
do_check_eq(engine.score, 0);
- server.stop(run_next_test);
+ yield promiseStopServer(server);
});
-add_test(function test_syncFinish_deleteByIds() {
+add_task(function* test_syncFinish_deleteByIds() {
_("SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs).");
Service.identity.username = "foo";
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
'flying', encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
collection._wbos.scotsman = new ServerWBO(
@@ -1519,37 +1526,37 @@ add_test(function test_syncFinish_delete
'rekolok', encryptPayload({id: 'rekolok',
denomination: "Rekonstruktionslokomotive"}));
let server = httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
try {
engine._delete = {ids: ['flying', 'rekolok']};
- engine._syncFinish();
+ yield engine._syncFinish();
// The 'flying' and 'rekolok' records were deleted while the
// 'scotsman' one wasn't.
do_check_eq(collection.payload("flying"), undefined);
do_check_true(!!collection.payload("scotsman"));
do_check_eq(collection.payload("rekolok"), undefined);
// The deletion todo list has been reset.
do_check_eq(engine._delete.ids, undefined);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_syncFinish_deleteLotsInBatches() {
+add_task(function* test_syncFinish_deleteLotsInBatches() {
_("SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs).");
Service.identity.username = "foo";
let collection = new ServerCollection();
// Let's count how many times the client does a DELETE request to the server
var noOfUploads = 0;
collection.delete = (function(orig) {
@@ -1570,32 +1577,32 @@ add_test(function test_syncFinish_delete
}
let server = httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
try {
// Confirm initial environment
do_check_eq(noOfUploads, 0);
// Declare what we want to have deleted: all records no. 100 and
// up and all records that are less than 200 mins old (which are
// records 0 thru 90).
engine._delete = {ids: [],
newer: now / 1000 - 60 * 200.5};
for (i = 100; i < 234; i++) {
engine._delete.ids.push('record-no-' + i);
}
- engine._syncFinish();
+ yield engine._syncFinish();
// Ensure that the appropriate server data has been wiped while
// preserving records 90 thru 200.
for (i = 0; i < 234; i++) {
let id = 'record-no-' + i;
if (i <= 90 || i >= 100) {
do_check_eq(collection.payload(id), undefined);
} else {
@@ -1605,34 +1612,34 @@ add_test(function test_syncFinish_delete
// The deletion was done in batches
do_check_eq(noOfUploads, 2 + 1);
// The deletion todo list has been reset.
do_check_eq(engine._delete.ids, undefined);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_sync_partialUpload() {
+add_task(function* test_sync_partialUpload() {
_("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
Service.identity.username = "foo";
let collection = new ServerCollection();
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
generateNewKeys(Service.collectionKeys);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
engine.lastSyncLocal = 456;
// Let the third upload fail completely
var noOfUploads = 0;
collection.post = (function(orig) {
return function() {
if (noOfUploads == 2)
@@ -1641,34 +1648,34 @@ add_test(function test_sync_partialUploa
return orig.apply(this, arguments);
};
}(collection.post));
// Create a bunch of records (and server side handlers)
for (let i = 0; i < 234; i++) {
let id = 'record-no-' + i;
engine._store.items[id] = "Record No. " + i;
- engine._tracker.addChangedID(id, i);
+ yield engine._tracker.addChangedID(id, i);
// Let two items in the first upload batch fail.
if ((i != 23) && (i != 42)) {
collection.insert(id);
}
}
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {rotary: {version: engine.version,
syncID: engine.syncID}};
try {
engine.enabled = true;
let error;
try {
- engine.sync();
+ yield engine.sync();
} catch (ex) {
error = ex;
}
do_check_true(!!error);
// The timestamp has been updated.
do_check_true(engine.lastSyncLocal > 456);
@@ -1680,80 +1687,80 @@ add_test(function test_sync_partialUploa
// hard on the 3rd upload.
if ((i == 23) || (i == 42) || (i >= 200))
do_check_eq(engine._tracker.changedIDs[id], i);
else
do_check_false(id in engine._tracker.changedIDs);
}
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_canDecrypt_noCryptoKeys() {
+add_task(function* test_canDecrypt_noCryptoKeys() {
_("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection.");
Service.identity.username = "foo";
// Wipe collection keys so we can test the desired scenario.
Service.collectionKeys.clear();
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
'flying', encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
try {
- do_check_false(engine.canDecrypt());
+ do_check_false(yield engine.canDecrypt());
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_canDecrypt_true() {
+add_task(function* test_canDecrypt_true() {
_("SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server.");
Service.identity.username = "foo";
generateNewKeys(Service.collectionKeys);
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO(
'flying', encryptPayload({id: 'flying',
denomination: "LNER Class A3 4472"}));
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler()
});
let syncTesting = new SyncTestingInfrastructure(server);
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
try {
- do_check_true(engine.canDecrypt());
+ do_check_true(yield engine.canDecrypt());
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
}
});
-add_test(function test_syncapplied_observer() {
+add_task(function* test_syncapplied_observer() {
Service.identity.username = "foo";
const NUMBER_OF_RECORDS = 10;
- let engine = makeRotaryEngine();
+ let engine = yield makeRotaryEngine();
// Create a batch of server side records.
let collection = new ServerCollection();
for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
let id = 'record-no-' + i;
let payload = encryptPayload({id: id, denomination: "Record No. " + id});
collection.insert(id, payload);
}
@@ -1779,24 +1786,24 @@ add_test(function test_syncapplied_obser
}
Svc.Obs.add("weave:engine:sync:applied", onApplied);
try {
Service.scheduler.hasIncomingItems = false;
// Do sync.
- engine._syncStartup();
- engine._processIncoming();
+ yield engine._syncStartup();
+ yield engine._processIncoming();
do_check_attribute_count(engine._store.items, 10);
do_check_eq(numApplyCalls, 1);
do_check_eq(engine_name, "rotary");
do_check_eq(count.applied, 10);
do_check_true(Service.scheduler.hasIncomingItems);
} finally {
- cleanAndGo(server);
+ yield cleanAndGo(server);
Service.scheduler.hasIncomingItems = false;
Svc.Obs.remove("weave:engine:sync:applied", onApplied);
}
});
--- a/services/sync/tests/unit/test_syncscheduler.js
+++ b/services/sync/tests/unit/test_syncscheduler.js
@@ -15,17 +15,17 @@ Service.engineManager.clear();
function CatapultEngine() {
SyncEngine.call(this, "Catapult", Service);
}
CatapultEngine.prototype = {
__proto__: SyncEngine.prototype,
exception: null, // tests fill this in
_sync: function _sync() {
- throw this.exception;
+ return Promise.reject(this.exception);
}
};
Service.engineManager.register(CatapultEngine);
var scheduler = new SyncScheduler(Service);
var clientsEngine = Service.clientsEngine;
@@ -49,37 +49,39 @@ function sync_httpd_setup() {
upd("crypto", (new ServerWBO("keys")).handler()),
"/1.1/johndoe/storage/clients": upd("clients", clientsColl.handler()),
"/user/1.0/johndoe/node/weave": httpd_handler(200, "OK", "null")
});
}
function setUp(server) {
let deferred = Promise.defer();
- configureIdentity({username: "johndoe"}).then(() => {
+ return configureIdentity({username: "johndoe"}).then(() => {
Service.clusterURL = server.baseURI + "/";
generateNewKeys(Service.collectionKeys);
let serverKeys = Service.collectionKeys.asWBO("crypto", "keys");
serverKeys.encrypt(Service.identity.syncKeyBundle);
- let result = serverKeys.upload(Service.resource(Service.cryptoKeysURL)).success;
- deferred.resolve(result);
- });
- return deferred.promise;
+ return serverKeys.upload(Service.resource(Service.cryptoKeysURL));
+ }).then(result => result.success
+ );
}
function cleanUpAndGo(server) {
let deferred = Promise.defer();
Utils.nextTick(function () {
- Service.startOver();
- if (server) {
- server.stop(deferred.resolve);
- } else {
- deferred.resolve();
- }
+ Service.startOver().then(
+ () => {
+ if (server) {
+ server.stop(deferred.resolve);
+ } else {
+ deferred.resolve();
+ }
+ }
+ );
});
return deferred.promise;
}
function run_test() {
initTestLogging("Trace");
Log.repository.getLogger("Sync.Service").level = Log.Level.Trace;
@@ -159,26 +161,26 @@ add_test(function test_prefAttributes()
add_identity_test(this, function test_updateClientMode() {
_("Test updateClientMode adjusts scheduling attributes based on # of clients appropriately");
do_check_eq(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.idle);
// Trigger a change in interval & threshold by adding a client.
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
scheduler.updateClientMode();
do_check_eq(scheduler.syncThreshold, MULTI_DEVICE_THRESHOLD);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.idle);
// Resets the number of clients to 0.
- clientsEngine.resetClient();
+ yield clientsEngine.resetClient();
scheduler.updateClientMode();
// Goes back to single user if # clients is 1.
do_check_eq(scheduler.numClients, 1);
do_check_eq(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.idle);
@@ -200,23 +202,23 @@ add_identity_test(this, function test_ma
SyncScheduler.prototype.scheduleAtInterval = function (interval) {
rescheduleInterval = true;
do_check_eq(interval, MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
};
let oldVerifyLogin = Service.verifyLogin;
Service.verifyLogin = function () {
Status.login = MASTER_PASSWORD_LOCKED;
- return false;
+ return Promise.resolve(false);
};
let server = sync_httpd_setup();
yield setUp(server);
- Service.sync();
+ yield Service.sync();
do_check_true(loginFailed);
do_check_eq(Status.login, MASTER_PASSWORD_LOCKED);
do_check_true(rescheduleInterval);
Service.verifyLogin = oldVerifyLogin;
SyncScheduler.prototype.scheduleAtInterval = oldScheduleAtInterval;
@@ -370,48 +372,48 @@ add_identity_test(this, function test_ha
do_check_eq(scheduler._syncErrors, 0);
do_check_false(Status.enforceBackoff);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_eq(Status.backoffInterval, 0);
// Trigger sync with an error several times & observe
// functionality of handleSyncError()
_("Test first error calls scheduleNextSync on default interval");
- Service.sync();
+ yield Service.sync();
do_check_true(scheduler.nextSync <= Date.now() + scheduler.singleDeviceInterval);
do_check_eq(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
do_check_eq(scheduler._syncErrors, 1);
do_check_false(Status.enforceBackoff);
scheduler.syncTimer.clear();
_("Test second error still calls scheduleNextSync on default interval");
- Service.sync();
+ yield Service.sync();
do_check_true(scheduler.nextSync <= Date.now() + scheduler.singleDeviceInterval);
do_check_eq(scheduler.syncTimer.delay, scheduler.singleDeviceInterval);
do_check_eq(scheduler._syncErrors, 2);
do_check_false(Status.enforceBackoff);
scheduler.syncTimer.clear();
_("Test third error sets Status.enforceBackoff and calls scheduleAtInterval");
- Service.sync();
+ yield Service.sync();
let maxInterval = scheduler._syncErrors * (2 * MINIMUM_BACKOFF_INTERVAL);
do_check_eq(Status.backoffInterval, 0);
do_check_true(scheduler.nextSync <= (Date.now() + maxInterval));
do_check_true(scheduler.syncTimer.delay <= maxInterval);
do_check_eq(scheduler._syncErrors, 3);
do_check_true(Status.enforceBackoff);
// Status.enforceBackoff is false but there are still errors.
Status.resetBackoff();
do_check_false(Status.enforceBackoff);
do_check_eq(scheduler._syncErrors, 3);
scheduler.syncTimer.clear();
_("Test fourth error still calls scheduleAtInterval even if enforceBackoff was reset");
- Service.sync();
+ yield Service.sync();
maxInterval = scheduler._syncErrors * (2 * MINIMUM_BACKOFF_INTERVAL);
do_check_true(scheduler.nextSync <= Date.now() + maxInterval);
do_check_true(scheduler.syncTimer.delay <= maxInterval);
do_check_eq(scheduler._syncErrors, 4);
do_check_true(Status.enforceBackoff);
scheduler.syncTimer.clear();
_("Arrange for a successful sync to reset the scheduler error count");
@@ -430,29 +432,29 @@ add_identity_test(this, function test_cl
yield setUp(server);
// Confirm defaults.
do_check_eq(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_false(scheduler.idle);
// Trigger a change in interval & threshold by adding a client.
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
do_check_false(scheduler.numClients > 1);
scheduler.updateClientMode();
- Service.sync();
+ yield Service.sync();
do_check_eq(scheduler.syncThreshold, MULTI_DEVICE_THRESHOLD);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
do_check_true(scheduler.numClients > 1);
do_check_false(scheduler.idle);
// Resets the number of clients to 0.
- clientsEngine.resetClient();
- Service.sync();
+ yield clientsEngine.resetClient();
+ yield Service.sync();
// Goes back to single user if # clients is 1.
do_check_eq(scheduler.numClients, 1);
do_check_eq(scheduler.syncThreshold, SINGLE_USER_THRESHOLD);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
do_check_false(scheduler.numClients > 1);
do_check_false(scheduler.idle);
@@ -613,17 +615,17 @@ add_identity_test(this, function test_id
// Single device: nothing changes.
scheduler.observe(null, "idle", Svc.Prefs.get("scheduler.idleTime"));
do_check_eq(scheduler.idle, true);
do_check_eq(scheduler.syncInterval, scheduler.singleDeviceInterval);
// Multiple devices: switch to idle interval.
scheduler.idle = false;
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
scheduler.updateClientMode();
scheduler.observe(null, "idle", Svc.Prefs.get("scheduler.idleTime"));
do_check_eq(scheduler.idle, true);
do_check_eq(scheduler.syncInterval, scheduler.idleInterval);
yield cleanUpAndGo();
});
@@ -712,17 +714,17 @@ add_identity_test(this, function test_ba
add_identity_test(this, function test_no_sync_node() {
// Test when Status.sync == NO_SYNC_NODE_FOUND
// it is not overwritten on sync:finish
let server = sync_httpd_setup();
yield setUp(server);
Service.serverURL = server.baseURI + "/";
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.sync, NO_SYNC_NODE_FOUND);
do_check_eq(scheduler.syncTimer.delay, NO_SYNC_NODE_INTERVAL);
yield cleanUpAndGo(server);
});
add_identity_test(this, function test_sync_failed_partial_500s() {
_("Test a 5xx status calls handleSyncError.");
@@ -732,17 +734,17 @@ add_identity_test(this, function test_sy
let engine = Service.engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 500};
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(yield setUp(server));
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
let maxInterval = scheduler._syncErrors * (2 * MINIMUM_BACKOFF_INTERVAL);
do_check_eq(Status.backoffInterval, 0);
do_check_true(Status.enforceBackoff);
do_check_eq(scheduler._syncErrors, 4);
do_check_true(scheduler.nextSync <= (Date.now() + maxInterval));
@@ -756,23 +758,23 @@ add_identity_test(this, function test_sy
scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
let server = sync_httpd_setup();
let engine = Service.engineManager.get("catapult");
engine.enabled = true;
engine.exception = {status: 400};
// Have multiple devices for an active interval.
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
do_check_eq(Status.sync, SYNC_SUCCEEDED);
do_check_true(yield setUp(server));
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.service, SYNC_FAILED_PARTIAL);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
do_check_eq(Status.backoffInterval, 0);
do_check_false(Status.enforceBackoff);
do_check_eq(scheduler._syncErrors, 0);
do_check_true(scheduler.nextSync <= (Date.now() + scheduler.activeInterval));
@@ -798,35 +800,35 @@ add_identity_test(this, function test_sy
response.setHeader("X-Weave-Backoff", "" + BACKOFF);
}
infoColl(request, response);
}
server.registerPathHandler(INFO_COLLECTIONS, infoCollWithBackoff);
// Pretend we have two clients so that the regular sync interval is
// sufficiently low.
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
- let rec = clientsEngine._store.createRecord("foo", "clients");
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ let rec = yield clientsEngine._store.createRecord("foo", "clients");
rec.encrypt(Service.collectionKeys.keyForCollection("clients"));
rec.upload(Service.resource(clientsEngine.engineURL + rec.id));
// Sync once to log in and get everything set up. Let's verify our initial
// values.
- Service.sync();
+ yield Service.sync();
do_check_eq(Status.backoffInterval, 0);
do_check_eq(Status.minimumNextSync, 0);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
do_check_true(scheduler.nextSync <=
Date.now() + scheduler.syncInterval);
// Sanity check that we picked the right value for BACKOFF:
do_check_true(scheduler.syncInterval < BACKOFF * 1000);
// Turn on server maintenance and sync again.
serverBackoff = true;
- Service.sync();
+ yield Service.sync();
do_check_true(Status.backoffInterval >= BACKOFF * 1000);
// Allowing 1 second worth of of leeway between when Status.minimumNextSync
// was set and when this line gets executed.
let minimumExpectedDelay = (BACKOFF - 1) * 1000;
do_check_true(Status.minimumNextSync >= Date.now() + minimumExpectedDelay);
// Verify that the next sync is actually going to wait that long.
@@ -855,36 +857,36 @@ add_identity_test(this, function test_sy
}
response.setHeader("Retry-After", "" + BACKOFF);
response.setStatusLine(request.httpVersion, 503, "Service Unavailable");
}
server.registerPathHandler(INFO_COLLECTIONS, infoCollWithMaintenance);
// Pretend we have two clients so that the regular sync interval is
// sufficiently low.
- clientsEngine._store.create({id: "foo", cleartext: "bar"});
- let rec = clientsEngine._store.createRecord("foo", "clients");
+ yield clientsEngine._store.create({id: "foo", cleartext: "bar"});
+ let rec = yield clientsEngine._store.createRecord("foo", "clients");
rec.encrypt(Service.collectionKeys.keyForCollection("clients"));
rec.upload(Service.resource(clientsEngine.engineURL + rec.id));
// Sync once to log in and get everything set up. Let's verify our initial
// values.
- Service.sync();
+ yield Service.sync();
do_check_false(Status.enforceBackoff);
do_check_eq(Status.backoffInterval, 0);
do_check_eq(Status.minimumNextSync, 0);
do_check_eq(scheduler.syncInterval, scheduler.activeInterval);
do_check_true(scheduler.nextSync <=
Date.now() + scheduler.syncInterval);
// Sanity check that we picked the right value for BACKOFF:
do_check_true(scheduler.syncInterval < BACKOFF * 1000);
// Turn on server maintenance and sync again.
serverMaintenance = true;
- Service.sync();
+ yield Service.sync();
do_check_true(Status.enforceBackoff);
do_check_true(Status.backoffInterval >= BACKOFF * 1000);
// Allowing 1 second worth of of leeway between when Status.minimumNextSync
// was set and when this line gets executed.
let minimumExpectedDelay = (BACKOFF - 1) * 1000;
do_check_true(Status.minimumNextSync >= Date.now() + minimumExpectedDelay);
--- a/services/sync/tests/unit/test_tab_engine.js
+++ b/services/sync/tests/unit/test_tab_engine.js
@@ -38,28 +38,28 @@ add_test(function test_getOpenURLs() {
_(" test matching works (false)");
matches = openurlsset.has("http://barfoo.com");
ok(!matches);
run_next_test();
});
-add_test(function test_tab_engine_skips_incoming_local_record() {
+add_task(function* test_tab_engine_skips_incoming_local_record() {
_("Ensure incoming records that match local client ID are never applied.");
let [engine, store] = getMocks();
let localID = engine.service.clientsEngine.localID;
let apply = store.applyIncoming;
let applied = [];
- store.applyIncoming = function (record) {
+ store.applyIncoming = Task.async(function* (record) {
notEqual(record.id, localID, "Only apply tab records from remote clients");
applied.push(record);
- apply.call(store, record);
- }
+ yield apply.call(store, record);
+ });
let collection = new ServerCollection();
_("Creating remote tab record with local client ID");
let localRecord = encryptPayload({id: localID, clientName: "local"});
collection.insert(localID, localRecord);
_("Creating remote tab record with a different client ID");
@@ -78,58 +78,58 @@ add_test(function test_tab_engine_skips_
let meta_global = Service.recordManager.set(engine.metaURL,
new WBORecord(engine.metaURL));
meta_global.payload.engines = {tabs: {version: engine.version,
syncID: engine.syncID}};
generateNewKeys(Service.collectionKeys);
let syncFinish = engine._syncFinish;
- engine._syncFinish = function () {
+ let didSyncFinishTests = false;
+ engine._syncFinish = Task.async(function* () {
equal(applied.length, 1, "Remote client record was applied");
equal(applied[0].id, remoteID, "Remote client ID matches");
- syncFinish.call(engine);
- run_next_test();
- }
+ yield syncFinish.call(engine);
+ didSyncFinishTests = true;
+ });
_("Start sync");
- engine._sync();
+ yield engine._sync();
+ ok(didSyncFinishTests);
});
-add_test(function test_reconcile() {
+add_task(function* test_reconcile() {
let [engine, store] = getMocks();
_("Setup engine for reconciling");
- engine._syncStartup();
+ yield engine._syncStartup();
_("Create an incoming remote record");
let remoteRecord = {id: "remote id",
cleartext: "stuff and things!",
modified: 1000};
- ok(engine._reconcile(remoteRecord), "Apply a recently modified remote record");
+ ok((yield engine._reconcile(remoteRecord)), "Apply a recently modified remote record");
remoteRecord.modified = 0;
- ok(engine._reconcile(remoteRecord), "Apply a remote record modified long ago");
+ ok((yield engine._reconcile(remoteRecord)), "Apply a remote record modified long ago");
// Remote tab records are never tracked locally, so the only
// time they're skipped is when they're marked as deleted.
remoteRecord.deleted = true;
- ok(!engine._reconcile(remoteRecord), "Skip a deleted remote record");
+ ok(!(yield engine._reconcile(remoteRecord)), "Skip a deleted remote record");
_("Create an incoming local record");
// The locally tracked tab record always takes precedence over its
// remote counterparts.
let localRecord = {id: engine.service.clientsEngine.localID,
cleartext: "this should always be skipped",
modified: 2000};
- ok(!engine._reconcile(localRecord), "Skip incoming local if recently modified");
+ ok(!(yield engine._reconcile(localRecord)), "Skip incoming local if recently modified");
localRecord.modified = 0;
- ok(!engine._reconcile(localRecord), "Skip incoming local if modified long ago");
+ ok(!(yield engine._reconcile(localRecord)), "Skip incoming local if modified long ago");
localRecord.deleted = true;
- ok(!engine._reconcile(localRecord), "Skip incoming local if deleted");
-
- run_next_test();
+ ok(!(yield engine._reconcile(localRecord)), "Skip incoming local if deleted");
});
--- a/services/sync/tests/unit/test_tab_store.js
+++ b/services/sync/tests/unit/test_tab_store.js
@@ -9,51 +9,53 @@ Cu.import("resource://testing-common/ser
function getMockStore() {
let engine = new TabEngine(Service);
let store = engine._store;
store.getTabState = mockGetTabState;
store.shouldSkipWindow = mockShouldSkipWindow;
return store;
}
-function test_create() {
- let store = new TabEngine(Service)._store;
+add_task(function* test_create() {
+ let engine = new TabEngine(Service)
+ yield engine.whenInitialized;
+ let store = engine._store;
_("Create a first record");
let rec = {id: "id1",
clientName: "clientName1",
cleartext: "cleartext1",
modified: 1000};
- store.applyIncoming(rec);
+ yield store.applyIncoming(rec);
do_check_eq(store._remoteClients["id1"], "cleartext1");
do_check_eq(Svc.Prefs.get("notifyTabState"), 1);
_("Create a second record");
rec = {id: "id2",
clientName: "clientName2",
cleartext: "cleartext2",
modified: 2000};
- store.applyIncoming(rec);
+ yield store.applyIncoming(rec);
do_check_eq(store._remoteClients["id2"], "cleartext2");
do_check_eq(Svc.Prefs.get("notifyTabState"), 0);
_("Create a third record");
rec = {id: "id3",
clientName: "clientName3",
cleartext: "cleartext3",
modified: 3000};
- store.applyIncoming(rec);
+ yield store.applyIncoming(rec);
do_check_eq(store._remoteClients["id3"], "cleartext3");
do_check_eq(Svc.Prefs.get("notifyTabState"), 0);
// reset the notifyTabState
Svc.Prefs.reset("notifyTabState");
-}
+});
-function test_getAllTabs() {
+add_task(function* test_getAllTabs() {
let store = getMockStore();
let tabs;
let threeUrls = ["http://foo.com", "http://fuubar.com", "http://barbar.com"];
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://bar.com", 1, 1, () => 2, () => threeUrls);
_("Get all tabs.");
@@ -84,39 +86,37 @@ function test_getAllTabs() {
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://bar.com", 1, 1, () => 45, () => allURLs);
tabs = store.getAllTabs((url) => url.startsWith("about"));
_("Sliced: " + JSON.stringify(tabs));
do_check_eq(tabs.length, 1);
do_check_eq(tabs[0].urlHistory.length, 25);
do_check_eq(tabs[0].urlHistory[0], "http://foo40.bar");
do_check_eq(tabs[0].urlHistory[24], "http://foo16.bar");
-}
+});
-function test_createRecord() {
+add_task(function* test_createRecord() {
let store = getMockStore();
let record;
store.getTabState = mockGetTabState;
store.shouldSkipWindow = mockShouldSkipWindow;
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
let tabs = store.getAllTabs();
let tabsize = JSON.stringify(tabs[0]).length;
let numtabs = Math.ceil(20000./77.);
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
- record = store.createRecord("fake-guid");
+ record = yield store.createRecord("fake-guid");
do_check_true(record instanceof TabSetRecord);
do_check_eq(record.tabs.length, 1);
_("create a big record");
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, numtabs);
- record = store.createRecord("fake-guid");
+ record = yield store.createRecord("fake-guid");
do_check_true(record instanceof TabSetRecord);
do_check_eq(record.tabs.length, 256);
-}
+});
function run_test() {
- test_create();
- test_getAllTabs();
- test_createRecord();
+ run_next_test();
}
--- a/services/sync/tests/unit/test_tab_tracker.js
+++ b/services/sync/tests/unit/test_tab_tracker.js
@@ -32,24 +32,29 @@ function fakeSvcWinMediator() {
}
};
}
};
return logs;
}
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let engine = Service.engineManager.get("tabs");
+ yield engine.whenInitialized;
_("We assume that tabs have changed at startup.");
let tracker = engine._tracker;
tracker.persistChangedIDs = false;
do_check_true(tracker.modified);
- do_check_true(Utils.deepEquals(Object.keys(engine.getChangedIDs()),
+ do_check_true(Utils.deepEquals(Object.keys(yield engine.getChangedIDs()),
[clientsEngine.localID]));
let logs;
_("Test listeners are registered on windows");
logs = fakeSvcWinMediator();
Svc.Obs.notify("weave:engine:start-tracking");
do_check_eq(logs.length, 2);
@@ -75,26 +80,26 @@ function run_test() {
do_check_true(log.remTopics.indexOf("TabClose") >= 0);
do_check_true(log.remTopics.indexOf("TabSelect") >= 0);
do_check_true(log.remTopics.indexOf("unload") >= 0);
}
_("Test tab listener");
for each (let evttype in ["TabOpen", "TabClose", "TabSelect"]) {
// Pretend we just synced.
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
do_check_false(tracker.modified);
// Send a fake tab event
tracker.onTab({type: evttype , originalTarget: evttype});
do_check_true(tracker.modified);
- do_check_true(Utils.deepEquals(Object.keys(engine.getChangedIDs()),
+ do_check_true(Utils.deepEquals(Object.keys(yield engine.getChangedIDs()),
[clientsEngine.localID]));
}
// Pretend we just synced.
- tracker.clearChangedIDs();
+ yield tracker.clearChangedIDs();
do_check_false(tracker.modified);
tracker.onTab({type: "pageshow", originalTarget: "pageshow"});
- do_check_true(Utils.deepEquals(Object.keys(engine.getChangedIDs()),
+ do_check_true(Utils.deepEquals(Object.keys(yield engine.getChangedIDs()),
[clientsEngine.localID]));
-}
+});
--- a/services/sync/tests/unit/test_tracker_addChanged.js
+++ b/services/sync/tests/unit/test_tracker_addChanged.js
@@ -32,28 +32,33 @@ add_test(function test_tracker_basics()
_("Adding without time defaults to current time");
tracker.addChangedID(id);
do_check_true(tracker.changedIDs[id] > 10);
run_next_test();
});
-add_test(function test_tracker_persistence() {
+add_task(function* test_tracker_persistence() {
let tracker = new Tracker("Tracker", Service);
let id = "abcdef";
- tracker.persistChangedIDs = true;
- tracker.onSavedChangedIDs = function () {
- _("IDs saved.");
- do_check_eq(5, tracker.changedIDs[id]);
+ let promiseTrackerOK = new Promise(resolve => {
+ tracker.persistChangedIDs = true;
+ tracker.onSavedChangedIDs = function () {
+ _("IDs saved.");
+ do_check_eq(5, tracker.changedIDs[id]);
- // Verify the write by reading the file back.
- Utils.jsonLoad("changes/tracker", this, function (json) {
- do_check_eq(5, json[id]);
- tracker.persistChangedIDs = false;
- delete tracker.onSavedChangedIDs;
- run_next_test();
- });
- };
+ // Verify the write by reading the file back.
+ Utils.jsonLoad("changes/tracker", this).then(
+ json => {
+ do_check_eq(5, json[id]);
+ tracker.persistChangedIDs = false;
+ delete tracker.onSavedChangedIDs;
+ resolve();
+ }
+ );
+ };
+ });
tracker.addChangedID(id, 5);
+ yield promiseTrackerOK;
});
--- a/services/sync/tests/unit/test_utils_catch.js
+++ b/services/sync/tests/unit/test_utils_catch.js
@@ -1,16 +1,21 @@
Cu.import("resource://services-sync/util.js");
Cu.import("resource://services-sync/service.js");
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
_("Make sure catch when copied to an object will correctly catch stuff");
let ret, rightThis, didCall, didThrow, wasTen, wasLocked;
let obj = {
catch: Utils.catch,
+ _promiseCatch: Service._promiseCatch,
_log: {
debug: function(str) {
didThrow = str.search(/^Exception: /) == 0;
},
info: function(str) {
wasLocked = str.indexOf("Cannot start sync: already syncing?") == 0;
}
},
@@ -37,22 +42,30 @@ function run_test() {
didCall = true;
throw 10;
}, function(ex) {
wasTen = (ex == 10)
})();
},
lockedy: function() {
- return this.catch(function() {
+ return this._promiseCatch(function() {
rightThis = this == obj;
didCall = true;
throw("Could not acquire lock.");
})();
- }
+ },
+
+ lockedy_chained: function() {
+ return this._promiseCatch(function() {
+ rightThis = this == obj;
+ didCall = true;
+ return Promise.resolve().then( () => {throw("Could not acquire lock.")});
+ })();
+ },
};
_("Make sure a normal call will call and return");
rightThis = didCall = didThrow = wasLocked = false;
ret = obj.func();
do_check_eq(ret, 5);
do_check_true(rightThis);
do_check_true(didCall);
@@ -76,19 +89,29 @@ function run_test() {
do_check_eq(ret, undefined);
do_check_true(rightThis);
do_check_true(didCall);
do_check_true(didThrow);
do_check_true(wasTen);
do_check_false(wasLocked);
_("Test the lock-aware catch that Service uses.");
- obj.catch = Service._catch;
rightThis = didCall = didThrow = wasLocked = false;
wasTen = undefined;
- ret = obj.lockedy();
+ ret = yield obj.lockedy();
do_check_eq(ret, undefined);
do_check_true(rightThis);
do_check_true(didCall);
do_check_true(didThrow);
do_check_eq(wasTen, undefined);
do_check_true(wasLocked);
-}
+
+ _("Test the lock-aware catch that Service uses with a chained promise.");
+ rightThis = didCall = didThrow = wasLocked = false;
+ wasTen = undefined;
+ ret = yield obj.lockedy_chained();
+ do_check_eq(ret, undefined);
+ do_check_true(rightThis);
+ do_check_true(didCall);
+ do_check_true(didThrow);
+ do_check_eq(wasTen, undefined);
+ do_check_true(wasLocked);
+});
--- a/services/sync/tests/unit/test_utils_json.js
+++ b/services/sync/tests/unit/test_utils_json.js
@@ -4,81 +4,64 @@
Cu.import("resource://gre/modules/FileUtils.jsm");
Cu.import("resource://services-sync/util.js");
function run_test() {
initTestLogging();
run_next_test();
}
-add_test(function test_roundtrip() {
+add_task(function test_roundtrip() {
_("Do a simple write of an array to json and read");
- Utils.jsonSave("foo", {}, ["v1", "v2"], ensureThrows(function(error) {
- do_check_eq(error, null);
+ yield Utils.jsonSave("foo", {}, ["v1", "v2"]);
- Utils.jsonLoad("foo", {}, ensureThrows(function(val) {
- let foo = val;
- do_check_eq(typeof foo, "object");
- do_check_eq(foo.length, 2);
- do_check_eq(foo[0], "v1");
- do_check_eq(foo[1], "v2");
- run_next_test();
- }));
- }));
+ let val = yield Utils.jsonLoad("foo", {});
+ let foo = val;
+ do_check_eq(typeof foo, "object");
+ do_check_eq(foo.length, 2);
+ do_check_eq(foo[0], "v1");
+ do_check_eq(foo[1], "v2");
});
-add_test(function test_string() {
+add_task(function test_string() {
_("Try saving simple strings");
- Utils.jsonSave("str", {}, "hi", ensureThrows(function(error) {
- do_check_eq(error, null);
-
- Utils.jsonLoad("str", {}, ensureThrows(function(val) {
- let str = val;
- do_check_eq(typeof str, "string");
- do_check_eq(str.length, 2);
- do_check_eq(str[0], "h");
- do_check_eq(str[1], "i");
- run_next_test();
- }));
- }));
+ yield Utils.jsonSave("str", {}, "hi");
+ let val = yield Utils.jsonLoad("str", {});
+ let str = val;
+ do_check_eq(typeof str, "string");
+ do_check_eq(str.length, 2);
+ do_check_eq(str[0], "h");
+ do_check_eq(str[1], "i");
});
-add_test(function test_number() {
+add_task(function test_number() {
_("Try saving a number");
- Utils.jsonSave("num", {}, 42, ensureThrows(function(error) {
- do_check_eq(error, null);
+ yield Utils.jsonSave("num", {}, 42);
- Utils.jsonLoad("num", {}, ensureThrows(function(val) {
- let num = val;
- do_check_eq(typeof num, "number");
- do_check_eq(num, 42);
- run_next_test();
- }));
- }));
+ let val = yield Utils.jsonLoad("num", {});
+ let num = val;
+ do_check_eq(typeof num, "number");
+ do_check_eq(num, 42);
});
-add_test(function test_nonexistent_file() {
+add_task(function test_nonexistent_file() {
_("Try loading a non-existent file.");
- Utils.jsonLoad("non-existent", {}, ensureThrows(function(val) {
- do_check_eq(val, undefined);
- run_next_test();
- }));
+ let val = yield Utils.jsonLoad("non-existent", {});
+ do_check_eq(val, undefined);
});
-add_test(function test_save_logging() {
+add_task(function test_save_logging() {
_("Verify that writes are logged.");
let trace;
- Utils.jsonSave("log", {_log: {trace: function(msg) { trace = msg; }}},
- "hi", ensureThrows(function () {
- do_check_true(!!trace);
- run_next_test();
- }));
+ yield Utils.jsonSave("log", {_log: {trace: function(msg) { trace = msg; }}},
+ "hi");
+ do_check_true(!!trace);
});
-add_test(function test_load_logging() {
+add_task(function test_load_logging() {
_("Verify that reads and read errors are logged.");
// Write a file with some invalid JSON
let filePath = "weave/log.json";
let file = FileUtils.getFile("ProfD", filePath.split("/"), true);
let fos = Cc["@mozilla.org/network/file-output-stream;1"]
.createInstance(Ci.nsIFileOutputStream);
let flags = FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE
@@ -96,19 +79,13 @@ add_test(function test_load_logging() {
trace: function(msg) {
trace = msg;
},
debug: function(msg) {
debug = msg;
}
}
};
- Utils.jsonLoad("log", obj, ensureThrows(function(val) {
- do_check_true(!val);
- do_check_true(!!trace);
- do_check_true(!!debug);
- run_next_test();
- }));
+ let val = yield Utils.jsonLoad("log", obj);
+ do_check_true(!val);
+ do_check_true(!!trace);
+ do_check_true(!!debug);
});
-
-add_task(function* test_undefined_callback() {
- yield Utils.jsonSave("foo", {}, ["v1", "v2"]);
-});
--- a/services/sync/tests/unit/test_utils_lock.js
+++ b/services/sync/tests/unit/test_utils_lock.js
@@ -4,76 +4,78 @@ Cu.import("resource://services-sync/util
// Utility that we only use here.
function do_check_begins(thing, startsWith) {
if (!(thing && thing.indexOf && (thing.indexOf(startsWith) == 0)))
do_throw(thing + " doesn't begin with " + startsWith);
}
function run_test() {
+ run_next_test();
+}
+
+add_task(function* () {
let ret, rightThis, didCall;
let state, lockState, lockedState, unlockState;
let obj = {
- _lock: Utils.lock,
+ _promiseLock: Utils.promiseLock,
lock: function() {
lockState = ++state;
if (this._locked) {
lockedState = ++state;
return false;
}
this._locked = true;
return true;
},
unlock: function() {
unlockState = ++state;
this._locked = false;
},
-
func: function() {
- return this._lock("Test utils lock",
- function() {
- rightThis = this == obj;
- didCall = true;
- return 5;
- })();
+ return this._promiseLock("Test utils lock",
+ function() {
+ rightThis = this == obj;
+ didCall = true;
+ return Promise.resolve(5);
+ })();
},
-
throwy: function() {
- return this._lock("Test utils lock throwy",
- function() {
- rightThis = this == obj;
- didCall = true;
- this.throwy();
- })();
- }
- };
+ return this._promiseLock("Test utils lock throwy",
+ function() {
+ rightThis = this == obj;
+ didCall = true;
+ return Promise.resolve().then(() => this.throwy());
+ })();
+ },
+ }
_("Make sure a normal call will call and return");
rightThis = didCall = false;
state = 0;
- ret = obj.func();
+ ret = yield obj.func();
do_check_eq(ret, 5);
do_check_true(rightThis);
do_check_true(didCall);
do_check_eq(lockState, 1);
do_check_eq(unlockState, 2);
do_check_eq(state, 2);
_("Make sure code that calls locked code throws");
ret = null;
rightThis = didCall = false;
try {
- ret = obj.throwy();
+ ret = yield obj.throwy();
do_throw("throwy internal call should have thrown!");
}
catch(ex) {
// Should throw an Error, not a string.
do_check_begins(ex, "Could not acquire lock");
}
do_check_eq(ret, null);
do_check_true(rightThis);
do_check_true(didCall);
_("Lock should be called twice so state 3 is skipped");
do_check_eq(lockState, 4);
do_check_eq(lockedState, 5);
do_check_eq(unlockState, 6);
do_check_eq(state, 6);
-}
+});
--- a/services/sync/tests/unit/test_warn_on_truncated_response.js
+++ b/services/sync/tests/unit/test_warn_on_truncated_response.js
@@ -33,23 +33,23 @@ function getWarningMessages(log) {
if (message.match(regEx)) {
warnMessages.push(message);
}
warn.call(log, message);
}
return warnMessages;
}
-add_test(function test_resource_logs_content_length_mismatch() {
+add_task(function* test_resource_logs_content_length_mismatch() {
_("Issuing request.");
let httpServer = httpd_setup({"/content": contentHandler});
let resource = new Resource(httpServer.baseURI + "/content");
let warnMessages = getWarningMessages(resource._log);
- let result = resource.get();
+ let result = yield resource.get();
notEqual(warnMessages.length, 0, "test that a warning was logged");
notEqual(result.length, contentLength);
equal(result, BODY);
httpServer.stop(run_next_test);
});