Bug 1343726 - Respect max_record_payload_bytes limit. r?markh
MozReview-Commit-ID: IzKRARMN5bC
--- a/services/sync/modules/constants.js
+++ b/services/sync/modules/constants.js
@@ -73,16 +73,20 @@ FORMS_STORE_BATCH_SIZE: 5
PASSWORDS_STORE_BATCH_SIZE: 50, // same as MOBILE_BATCH_SIZE
ADDONS_STORE_BATCH_SIZE: 1000000, // process all addons at once
APPS_STORE_BATCH_SIZE: 50, // same as MOBILE_BATCH_SIZE
// Default batch size for download batching
// (how many records are fetched at a time from the server when batching is used).
DEFAULT_DOWNLOAD_BATCH_SIZE: 1000,
+
+// Default maximum size for a record payload
+DEFAULT_MAX_RECORD_PAYLOAD_BYTES: 262144, // 256KB
+
// score thresholds for early syncs
SINGLE_USER_THRESHOLD: 1000,
MULTI_DEVICE_THRESHOLD: 300,
// Other score increment constants
SCORE_INCREMENT_SMALL: 1,
SCORE_INCREMENT_MEDIUM: 10,
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -926,16 +926,24 @@ SyncEngine.prototype = {
get lastSyncLocal() {
return parseInt(Svc.Prefs.get(this.name + ".lastSyncLocal", "0"), 10);
},
set lastSyncLocal(value) {
// Store as a string because pref can only store C longs as numbers.
Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
},
+ get maxRecordPayloadBytes() {
+ let serverConfiguration = this.service.serverConfiguration;
+ if (serverConfiguration && serverConfiguration.max_record_payload_bytes) {
+ return serverConfiguration.max_record_payload_bytes;
+ }
+ return DEFAULT_MAX_RECORD_PAYLOAD_BYTES;
+ },
+
/*
* Returns a changeset for this sync. Engine implementations can override this
* method to bypass the tracker for certain or all changed items.
*/
getChangedIDs() {
return this._tracker.changedIDs;
},
@@ -1628,16 +1636,21 @@ SyncEngine.prototype = {
let out;
let ok = false;
try {
out = this._createRecord(id);
if (this._log.level <= Log.Level.Trace)
this._log.trace("Outgoing: " + out);
out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
+ let payloadLength = Utils.strByteLength(JSON.stringify(out.payload));
+ if (payloadLength > this.maxRecordPayloadBytes) {
+ this._modified.delete(id); // Do not attempt to sync that record again
+ throw new Error(`Payload too big: ${payloadLength} bytes`);
+ }
ok = true;
} catch (ex) {
this._log.warn("Error creating record", ex);
++counts.failed;
if (Async.isShutdownException(ex) || !this.allowSkippedRecord) {
Observers.notify("weave:engine:sync:uploaded", counts, this.name);
throw ex;
}
--- a/services/sync/modules/engines/tabs.js
+++ b/services/sync/modules/engines/tabs.js
@@ -206,22 +206,19 @@ TabStore.prototype = {
// Sort tabs in descending-used order to grab the most recently used
let tabs = this.getAllTabs(true).sort(function(a, b) {
return b.lastUsed - a.lastUsed;
});
// Figure out how many tabs we can pack into a payload.
// See bug 535326 comment 8 for an explanation of the estimation
- // If the server configuration is absent, we use the old max payload size of 28K
- let size = JSON.stringify(tabs).length;
+ let size = Utils.strByteLength(JSON.stringify(tabs));
let origLength = tabs.length;
- const MAX_TAB_SIZE = (this.engine.service.serverConfiguration ?
- this.engine.service.serverConfiguration.max_record_payload_bytes :
- 28672) / 4 * 3 - 1500;
+ const MAX_TAB_SIZE = this.engine.maxRecordPayloadBytes / 4 * 3 - 1500;
if (size > MAX_TAB_SIZE) {
// Estimate a little more than the direct fraction to maximize packing
let cutoff = Math.ceil(tabs.length * MAX_TAB_SIZE / size);
tabs = tabs.slice(0, cutoff + 1);
// Keep dropping off the last entry until the data fits
while (JSON.stringify(tabs).length > MAX_TAB_SIZE)
tabs.pop();
--- a/services/sync/modules/util.js
+++ b/services/sync/modules/util.js
@@ -560,16 +560,20 @@ this.Utils = {
+ pp.slice(8, 13) + pp.slice(14, 19)
+ pp.slice(20, 25) + pp.slice(26, 31);
}
// Something else -- just return.
return pp;
},
+ strByteLength(str) {
+ return new TextEncoder("utf-8").encode(str).byteLength;
+ },
+
/**
* Create an array like the first but without elements of the second. Reuse
* arrays if possible.
*/
arraySub: function arraySub(minuend, subtrahend) {
if (!minuend.length || !subtrahend.length)
return minuend;
return minuend.filter(i => subtrahend.indexOf(i) == -1);
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -1339,17 +1339,18 @@ add_task(async function test_uploadOutgo
do_check_eq(collection.payload("flying"), undefined);
} finally {
await cleanAndGo(engine, server);
}
});
-add_task(async function test_uploadOutgoing_huge() {
+add_task(async function test_uploadOutgoing_max_record_payload_bytes() {
+ _("SyncEngine._uploadOutgoing throws when payload is bigger than max_record_payload_bytes");
let collection = new ServerCollection();
collection._wbos.flying = new ServerWBO("flying");
collection._wbos.scotsman = new ServerWBO("scotsman");
let server = sync_httpd_setup({
"/1.1/foo/storage/rotary": collection.handler(),
"/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
});
--- a/services/sync/tests/unit/test_tab_store.js
+++ b/services/sync/tests/unit/test_tab_store.js
@@ -88,27 +88,27 @@ function test_getAllTabs() {
function test_createRecord() {
let store = getMockStore();
let record;
store.getTabState = mockGetTabState;
store.shouldSkipWindow = mockShouldSkipWindow;
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
- let numtabs = Math.ceil(20000. / 77.);
+ let numtabs = 2600; // Note: this number is connected to DEFAULT_MAX_RECORD_PAYLOAD_BYTES
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
record = store.createRecord("fake-guid");
ok(record instanceof TabSetRecord);
equal(record.tabs.length, 1);
_("create a big record");
store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, numtabs);
record = store.createRecord("fake-guid");
ok(record instanceof TabSetRecord);
- equal(record.tabs.length, 256);
+ equal(record.tabs.length, 2501);
}
function run_test() {
test_create();
test_getAllTabs();
test_createRecord();
}