--- a/services/common/kinto-offline-client.js
+++ b/services/common/kinto-offline-client.js
@@ -15,17 +15,17 @@
/*
* This file is generated from kinto.js - do not modify directly.
*/
this.EXPORTED_SYMBOLS = ["Kinto"];
/*
- * Version 8.0.0 - 57d2836
+ * Version 9.0.2 - b025c7b
*/
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.Kinto = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
@@ -172,25 +172,39 @@ class KintoBase {
remote: DEFAULT_REMOTE,
retry: DEFAULT_RETRY
};
this._options = _extends({}, defaults, options);
if (!this._options.adapter) {
throw new Error("No adapter provided");
}
- const { remote, events, headers, retry, requestMode, timeout, ApiClass } = this._options;
+ const {
+ remote,
+ events,
+ headers,
+ retry,
+ requestMode,
+ timeout,
+ ApiClass
+ } = this._options;
// public properties
/**
* The kinto HTTP client instance.
* @type {KintoClient}
*/
- this.api = new ApiClass(remote, { events, headers, retry, requestMode, timeout });
+ this.api = new ApiClass(remote, {
+ events,
+ headers,
+ retry,
+ requestMode,
+ timeout
+ });
/**
* The event emitter instance.
* @type {EventEmitter}
*/
this.events = this._options.events;
}
/**
@@ -203,28 +217,18 @@ class KintoBase {
* @param {Object} [options.remoteTransformers] Array<RemoteTransformer> (default: `[]`])
* @param {Object} [options.hooks] Array<Hook> (default: `[]`])
* @return {Collection}
*/
collection(collName, options = {}) {
if (!collName) {
throw new Error("missing collection name");
}
- const {
- bucket,
- events,
- adapter,
- adapterOptions,
- dbPrefix
- } = _extends({}, this._options, options);
- const {
- idSchema,
- remoteTransformers,
- hooks
- } = options;
+ const { bucket, events, adapter, adapterOptions, dbPrefix } = _extends({}, this._options, options);
+ const { idSchema, remoteTransformers, hooks } = options;
return new _collection2.default(bucket, collName, this.api, {
events,
adapter,
adapterOptions,
dbPrefix,
idSchema,
remoteTransformers,
@@ -1230,17 +1234,17 @@ class Collection {
throw new Error("hooks should be an object, not an array.");
}
if (typeof hooks !== "object") {
throw new Error("hooks should be an object.");
}
const validatedHooks = {};
- for (let hook in hooks) {
+ for (const hook in hooks) {
if (AVAILABLE_HOOKS.indexOf(hook) === -1) {
throw new Error("The hook should be one of " + AVAILABLE_HOOKS.join(", "));
}
validatedHooks[hook] = this._validateHook(hooks[hook]);
}
return validatedHooks;
}
@@ -1327,17 +1331,19 @@ class Collection {
}
const newRecord = _extends({}, record, {
id: options.synced || options.useRecordId ? record.id : this.idSchema.generate(),
_status: options.synced ? "synced" : "created"
});
if (!this.idSchema.validate(newRecord.id)) {
return reject(`Invalid Id: ${newRecord.id}`);
}
- return this.execute(txn => txn.create(newRecord), { preloadIds: [newRecord.id] }).catch(err => {
+ return this.execute(txn => txn.create(newRecord), {
+ preloadIds: [newRecord.id]
+ }).catch(err => {
if (options.useRecordId) {
throw new Error("Couldn't create record. It may have been virtually deleted.");
}
throw err;
});
}
/**
@@ -1361,17 +1367,19 @@ class Collection {
}
if (!record.hasOwnProperty("id")) {
return Promise.reject(new Error("Cannot update a record missing id."));
}
if (!this.idSchema.validate(record.id)) {
return Promise.reject(new Error(`Invalid Id: ${record.id}`));
}
- return this.execute(txn => txn.update(record, options), { preloadIds: [record.id] });
+ return this.execute(txn => txn.update(record, options), {
+ preloadIds: [record.id]
+ });
}
/**
* Like {@link CollectionTransaction#upsert}, but wrapped in its own transaction.
*
* @param {Object} record
* @return {Promise}
*/
@@ -1575,27 +1583,48 @@ class Collection {
})();
}
/**
* Handles synchronization conflicts according to specified strategy.
*
* @param {SyncResultObject} result The sync result object.
* @param {String} strategy The {@link Collection.strategy}.
- * @return {Promise}
+ * @return {Promise<Array<Object>>} The resolved conflicts, as an
+ * array of {accepted, rejected} objects
*/
_handleConflicts(transaction, conflicts, strategy) {
if (strategy === Collection.strategy.MANUAL) {
return [];
}
return conflicts.map(conflict => {
const resolution = strategy === Collection.strategy.CLIENT_WINS ? conflict.local : conflict.remote;
- const updated = this._resolveRaw(conflict, resolution);
- transaction.update(updated);
- return updated;
+ const rejected = strategy === Collection.strategy.CLIENT_WINS ? conflict.remote : conflict.local;
+ let accepted, status, id;
+ if (resolution === null) {
+ // We "resolved" with the server-side deletion. Delete locally.
+ // This only happens during SERVER_WINS because the local
+ // version of a record can never be null.
+ // We can get "null" from the remote side if we got a conflict
+ // and there is no remote version available; see kinto-http.js
+ // batch.js:aggregate.
+ transaction.delete(conflict.local.id);
+ accepted = null;
+ // The record was deleted, but that status is "synced" with
+ // the server, so we don't need to push the change.
+ status = "synced";
+ id = conflict.local.id;
+ } else {
+ const updated = this._resolveRaw(conflict, resolution);
+ transaction.update(updated);
+ accepted = updated;
+ status = updated._status;
+ id = updated.id;
+ }
+ return { rejected, accepted, id, _status: status };
});
}
/**
* Execute a bunch of operations in a transaction.
*
* This transaction should be atomic -- either all of its operations
* will succeed, or none will.
@@ -1612,17 +1641,17 @@ class Collection {
* Options:
* - {Array} preloadIds: list of IDs to fetch at the beginning of
* the transaction
*
* @return {Promise} Resolves with the result of the given function
* when the transaction commits.
*/
execute(doOperations, { preloadIds = [] } = {}) {
- for (let id of preloadIds) {
+ for (const id of preloadIds) {
if (!this.idSchema.validate(id)) {
return Promise.reject(Error(`Invalid Id: ${id}`));
}
}
return this.db.execute(transaction => {
const txn = new CollectionTransaction(this, transaction);
const result = doOperations(txn);
@@ -1672,17 +1701,20 @@ class Collection {
* - `toSync`: local updates to send to the server.
*
* @return {Promise}
*/
gatherLocalChanges() {
var _this6 = this;
return _asyncToGenerator(function* () {
- const unsynced = yield _this6.list({ filters: { _status: ["created", "updated"] }, order: "" });
+ const unsynced = yield _this6.list({
+ filters: { _status: ["created", "updated"] },
+ order: ""
+ });
const deleted = yield _this6.list({ filters: { _status: "deleted" }, order: "" }, { includeDeleted: true });
return yield Promise.all(unsynced.data.concat(deleted.data).map(_this6._encodeRecord.bind(_this6, "remote")));
})();
}
/**
* Fetch remote changes, import them to the local database, and handle
@@ -1837,24 +1869,27 @@ class Collection {
// Store outgoing errors into sync result object
syncResultObject.add("errors", synced.errors.map(function (e) {
return _extends({}, e, { type: "outgoing" });
}));
// Store outgoing conflicts into sync result object
const conflicts = [];
- for (let _ref of synced.conflicts) {
- let { type, local, remote } = _ref;
+ for (const _ref of synced.conflicts) {
+ const { type, local, remote } = _ref;
// Note: we ensure that local data are actually available, as they may
// be missing in the case of a published deletion.
const safeLocal = local && local.data || { id: remote.id };
const realLocal = yield _this8._decodeRecord("remote", safeLocal);
- const realRemote = yield _this8._decodeRecord("remote", remote);
+ // We can get "null" from the remote side if we got a conflict
+ // and there is no remote version available; see kinto-http.js
+ // batch.js:aggregate.
+ const realRemote = remote && (yield _this8._decodeRecord("remote", remote));
const conflict = { type, local: realLocal, remote: realRemote };
conflicts.push(conflict);
}
syncResultObject.add("conflicts", conflicts);
// Records that must be deleted are either deletions that were pushed
// to server (published) or deleted records that were never pushed (skipped).
const missingRemotely = synced.skipped.map(function (r) {
@@ -1914,17 +1949,17 @@ class Collection {
}
/**
* @private
*/
_resolveRaw(conflict, resolution) {
const resolved = _extends({}, resolution, {
// Ensure local record has the latest authoritative timestamp
- last_modified: conflict.remote.last_modified
+ last_modified: conflict.remote && conflict.remote.last_modified
});
// If the resolution object is strictly equal to the
// remote record, then we can mark it as synced locally.
// Otherwise, mark it as updated (so that the resolution is pushed).
const synced = (0, _utils.deepEqual)(resolved, conflict.remote);
return markStatus(resolved, synced ? "synced" : "updated");
}
@@ -1990,24 +2025,33 @@ class Collection {
// Publish local changes and pull local resolutions
yield _this9.pushChanges(client, toSync, result, options);
// Publish local resolution of push conflicts to server (on CLIENT_WINS)
const resolvedUnsynced = result.resolved.filter(function (r) {
return r._status !== "synced";
});
if (resolvedUnsynced.length > 0) {
- const resolvedEncoded = yield Promise.all(resolvedUnsynced.map(_this9._encodeRecord.bind(_this9, "remote")));
+ const resolvedEncoded = yield Promise.all(resolvedUnsynced.map(function (resolution) {
+ let record = resolution.accepted;
+ if (record === null) {
+ record = { id: resolution.id, _status: resolution._status };
+ }
+ return _this9._encodeRecord("remote", record);
+ }));
yield _this9.pushChanges(client, resolvedEncoded, result, options);
}
// Perform a last pull to catch changes that occured after the last pull,
// while local changes were pushed. Do not do it nothing was pushed.
if (result.published.length > 0) {
// Avoid redownloading our own changes during the last pull.
- const pullOpts = _extends({}, options, { lastModified, exclude: result.published });
+ const pullOpts = _extends({}, options, {
+ lastModified,
+ exclude: result.published
+ });
yield _this9.pullChanges(client, result, pullOpts);
}
// Don't persist lastModified value if any conflict or error occured
if (result.ok) {
// No conflict occured, persist collection's lastModified value
_this9._lastModified = yield _this9.db.saveLastModified(result.lastModified);
}
@@ -2035,17 +2079,17 @@ class Collection {
loadDump(records) {
var _this10 = this;
return _asyncToGenerator(function* () {
if (!Array.isArray(records)) {
throw new Error("Records is not an array.");
}
- for (let record of records) {
+ for (const record of records) {
if (!record.hasOwnProperty("id") || !_this10.idSchema.validate(record.id)) {
throw new Error("Record has invalid ID: " + JSON.stringify(record));
}
if (!record.last_modified) {
throw new Error("Record has no last_modified value: " + JSON.stringify(record));
}
}
@@ -2100,23 +2144,25 @@ class CollectionTransaction {
this._events.push({ action, payload });
}
/**
* Emit queued events, to be called once every transaction operations have
* been executed successfully.
*/
emitEvents() {
- for (let _ref2 of this._events) {
- let { action, payload } = _ref2;
+ for (const _ref2 of this._events) {
+ const { action, payload } = _ref2;
this.collection.events.emit(action, payload);
}
if (this._events.length > 0) {
- const targets = this._events.map(({ action, payload }) => _extends({ action }, payload));
+ const targets = this._events.map(({ action, payload }) => _extends({
+ action
+ }, payload));
this.collection.events.emit("change", { targets });
}
this._events = [];
}
/**
* Retrieve a record by its id from the local database, or
* undefined if none exists.
@@ -2461,9 +2507,9 @@ function omitKeys(obj, keys = []) {
if (keys.indexOf(key) === -1) {
acc[key] = obj[key];
}
return acc;
}, {});
}
},{}]},{},[1])(1)
-});
+});
\ No newline at end of file
--- a/toolkit/components/extensions/ExtensionStorageSync.jsm
+++ b/toolkit/components/extensions/ExtensionStorageSync.jsm
@@ -316,38 +316,16 @@ class KeyRingEncryptionRemoteTransformer
return Task.spawn(function* () {
const encoded = yield encodePromise;
encoded.kbHash = record.kbHash;
return encoded;
});
}
async decode(record) {
- if (record === null) {
- // XXX: This is a hack that detects a situation that should
- // never happen by using a technique that shouldn't actually
- // work. See
- // https://bugzilla.mozilla.org/show_bug.cgi?id=1359879 for
- // the whole gory story.
- //
- // For reasons that aren't clear yet,
- // sometimes the server-side keyring is deleted. When we try
- // to sync our copy of the keyring, we get a conflict with the
- // deleted version. Due to a bug in kinto.js, we are called to
- // decode the deleted version, which is represented as
- // null. For now, try to handle this by throwing a specific
- // kind of exception which we can catch and recover from the
- // same way we would do with any other kind of undecipherable
- // keyring -- wiping the bucket and reuploading everything.
- //
- // Eventually we will probably fix the bug in kinto.js, and
- // this will have to move somewhere else, probably in the code
- // that detects a resolved conflict.
- throw new ServerKeyringDeleted();
- }
try {
return await super.decode(record);
} catch (e) {
if (Utils.isHMACMismatch(e)) {
const currentKBHash = await getKBHash(this._fxaService);
if (record.kbHash != currentKBHash) {
// Some other client encoded this with a kB that we don't
// have access to.
@@ -830,29 +808,30 @@ class ExtensionStorageSync {
newValue: record.new.data,
};
}
for (const record of syncResults.deleted) {
changes[record.key] = {
oldValue: record.data,
};
}
- for (const conflict of syncResults.resolved) {
+ for (const resolution of syncResults.resolved) {
// FIXME: We can't send a "changed" notification because
// kinto.js only provides the newly-resolved value. But should
// we even send a notification? We use CLIENT_WINS so nothing
// has really "changed" on this end. (The change will come on
// the other end when it pulls down the update, which is handled
// by the "updated" case above.) If we are going to send a
// notification, what best values for "old" and "new"? This
// might violate client code's assumptions, since from their
// perspective, we were in state L, but this diff is from R ->
// L.
- changes[conflict.key] = {
- newValue: conflict.data,
+ const accepted = resolution.accepted;
+ changes[accepted.key] = {
+ newValue: accepted.data,
};
}
if (Object.keys(changes).length > 0) {
this.notifyListeners(extension, changes);
}
}
/**
@@ -1065,18 +1044,54 @@ class ExtensionStorageSync {
// the lifetime of a keyring, so the only time a keyring UUID
// changes is when a new keyring is uploaded, which only happens
// after a server wipe. So when we get a "conflict" (resolved by
// server_wins), we check whether the server version has a new
// UUID. If so, reset our sync status, so that we'll reupload
// everything.
const result = await this.cryptoCollection.sync(this);
if (result.resolved.length > 0) {
- if (result.resolved[0].uuid != cryptoKeyRecord.uuid) {
- log.info(`Detected a new UUID (${result.resolved[0].uuid}, was ${cryptoKeyRecord.uuid}). Reseting sync status for everything.`);
+ // Automatically-resolved conflict. It should
+ // be for the keys record.
+ const resolutionIds = result.resolved.map(resolution => resolution.id);
+ if (resolutionIds > 1) {
+ // This should never happen -- there is only ever one record
+ // in this collection.
+ log.error(`Too many resolutions for sync-storage-crypto collection: ${JSON.stringify(resolutionIds)}`);
+ }
+ const keyResolution = result.resolved[0];
+ if (keyResolution.id != STORAGE_SYNC_CRYPTO_KEYRING_RECORD_ID) {
+ // This should never happen -- there should only ever be the
+ // keyring in this collection.
+ log.error(`Strange conflict in sync-storage-crypto collection: ${JSON.stringify(resolutionIds)}`);
+ }
+
+ // Due to a bug in the server-side code (see
+ // https://github.com/Kinto/kinto/issues/1209), lots of users'
+ // keyrings were deleted. We discover this by trying to push a
+ // new keyring (because the user aded a new extension), and we
+ // get a conflict. We have SERVER_WINS, so the client will
+ // accept this deleted keyring and delete it locally. Discover
+ // this and undo it.
+ if (keyResolution.accepted === null) {
+ log.error("Conflict spotted -- the server keyring was deleted");
+ await this.cryptoCollection.upsert(keyResolution.rejected);
+ // It's possible that the keyring on the server that was
+ // deleted had keys for other extensions, which had already
+ // encrypted data. For this to happen, another client would
+ // have had to upload the keyring and then the delete happened
+ // before this client did a sync (and got the new extension
+ // and tried to sync the keyring again). Just to be safe,
+ // let's signal that something went wrong and we should wipe
+ // the bucket.
+ throw new ServerKeyringDeleted();
+ }
+
+ if (keyResolution.accepted.uuid != cryptoKeyRecord.uuid) {
+ log.info(`Detected a new UUID (${keyResolution.accepted.uuid}, was ${cryptoKeyRecord.uuid}). Reseting sync status for everything.`);
await this.cryptoCollection.resetSyncStatus();
// Server version is now correct. Return that result.
return result;
}
}
// No conflicts, or conflict was just someone else adding keys.
return result;