Bug 1476053 - Enabling updating wpt metadata from wptreport.json files, r=ato
Raw log files have the big disadvantage that they are large and we
therefore spend a lot of time just downloading the log files and
parsing json. wptreport.json files are much smaller and so reduce
these issues.
In order to support both formats, we first try parsing the log as
json. If that succeeds, and we have the expected keys, we parse it as
a wptreport file by reconstructing the events we would have got in a
raw log.
If that doesn't work we fall back to the standard raw log processing.
MozReview-Commit-ID: 7rjTJJUPhpz
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
@@ -230,23 +230,58 @@ class ExpectedUpdater(object):
for manifest in test_manifests.iterkeys():
for test_type, path, _ in manifest:
if test_type in wpttest.manifest_test_cls:
self.types_by_path[path] = wpttest.manifest_test_cls[test_type]
self.run_infos = []
def update_from_log(self, log_file):
self.run_info = None
+ try:
+ data = json.load(log_file)
+ except Exception:
+ pass
+ else:
+ if "action" not in data and "results" in data:
+ self.update_from_wptreport_log(data)
+ return
+
+ log_file.seek(0)
+ self.update_from_raw_log(log_file)
+
+ def update_from_raw_log(self, log_file):
action_map = self.action_map
for line in log_file:
data = json.loads(line)
action = data["action"]
if action in action_map:
action_map[action](data)
+ def update_from_wptreport_log(self, data):
+ action_map = self.action_map
+ action_map["suite_start"]({"run_info": data["run_info"]})
+ for test in data["results"]:
+ action_map["test_start"]({"test": test["test"]})
+ for subtest in test["subtests"]:
+ action_map["test_status"]({"test": test["test"],
+ "subtest": subtest["name"],
+ "status": subtest["status"],
+ "expected": subtest.get("expected")})
+ action_map["test_end"]({"test": test["test"],
+ "status": test["status"],
+ "expected": test.get("expected")})
+ if "asserts" in test:
+ asserts = test["asserts"]
+ action_map["assertion_count"]({"test": test["test"],
+ "count": asserts["count"],
+ "min_expected": asserts["min"],
+ "max_expected": asserts["max"]})
+ for item in data.get("lsan_leaks", []):
+ action_map["lsan_leak"](item)
+
def suite_start(self, data):
self.run_info = data["run_info"]
def test_start(self, data):
test_id = data["test"]
try:
test_data = self.id_test_map[test_id]
except KeyError:
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
@@ -1,8 +1,9 @@
+import json
import mock
import os
import sys
from io import BytesIO
from .. import metadata, manifestupdate, wptmanifest
from mozlog import structuredlog, handlers, formatters
@@ -66,25 +67,28 @@ def create_updater(tests, url_base="/",
url_base)
for test_id in test_ids:
id_test_map[test_id] = test_data
return id_test_map, metadata.ExpectedUpdater(test_manifests, id_test_map, **kwargs)
def create_log(entries):
- logger = structuredlog.StructuredLogger("expected_test")
data = BytesIO()
- handler = handlers.StreamHandler(data, formatters.JSONFormatter())
- logger.add_handler(handler)
+ if isinstance(entries, list):
+ logger = structuredlog.StructuredLogger("expected_test")
+ handler = handlers.StreamHandler(data, formatters.JSONFormatter())
+ logger.add_handler(handler)
- for item in entries:
- action, kwargs = item
- getattr(logger, action)(**kwargs)
- logger.remove_handler(handler)
+ for item in entries:
+ action, kwargs = item
+ getattr(logger, action)(**kwargs)
+ logger.remove_handler(handler)
+ else:
+ json.dump(entries, data)
data.seek(0)
return data
def suite_log(entries, run_info=None):
return ([("suite_start", {"tests": [], "run_info": run_info or {}})] +
entries +
[("suite_end", {})])
@@ -563,8 +567,43 @@ def test_update_lsan_3():
run_info={"os": "linux"})
updated = update(tests, log_0, log_1)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
+
+
+def test_update_wptreport_0():
+ tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
+ """[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log = {"run_info": {},
+ "results": [
+ {"test": "/path/to/test.htm",
+ "subtests": [{"name": "test1",
+ "status": "PASS",
+ "expected": "FAIL"}],
+ "status": "OK"}
+ ]}
+
+ updated = update(tests, log)
+
+ assert len(updated) == 1
+ assert updated[0][1].is_empty
+
+
+def test_update_wptreport_1():
+ tests = [("path/to/__dir__", ["path/to/__dir__"], None, "")]
+
+ log = {"run_info": {},
+ "results": [],
+ "lsan_leaks": [{"scope": "path/to/",
+ "frames": ["baz", "foobar"]}]}
+
+ updated = update(tests, log)
+
+ assert len(updated) == 1
+ assert updated[0][1].get("lsan-allowed") == ["baz"]