Bug 1354232 - Add support for updating LSAN data in wpt-update r=maja_zf
LSAN data differs from existing expectation data because the data is
only generated when the browser exits, so the problems reported can
happen at any point in the current session. We use the `scope`
property in the log message to determine the path to a __dir__.ini
file that covers all the tests run in the session, and add the LSAN
exclusion rules to there.
The rules themselves are generated by taking the topmost frame of any
stack that's reported as unexpectedly leaking, and adding that to the
list of permitted frames in the lsan-allowed property. We never remove
entries from this list since intermittents may be present which won't
appear on a specific run. Instead we rely on humand fixing the issues
to also clean up the expectation files.
MozReview-Commit-ID: Kxm0hFXlGE3
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
@@ -173,34 +173,42 @@ class FirefoxBrowser(Browser):
self.symbols_path)
else:
self.stack_fixer = None
if timeout_multiplier:
self.init_timeout = self.init_timeout * timeout_multiplier
self.asan = asan
+ self.lsan_allowed = None
self.leak_check = leak_check
self.leak_report_file = None
self.lsan_handler = None
- if self.asan:
- self.lsan_handler = mozleak.LSANLeaks(logger)
self.stylo_threads = stylo_threads
self.chaos_mode_flags = chaos_mode_flags
def settings(self, test):
- if self.asan:
- self.lsan_handler.set_allowed(test.lsan_allowed)
- return {"check_leaks": self.leak_check and not test.leaks}
+ self.lsan_allowed = test.lsan_allowed
+ return {"check_leaks": self.leak_check and not test.leaks,
+ "lsan_allowed": test.lsan_allowed}
- def start(self, **kwargs):
+ def start(self, group_metadata=None, **kwargs):
+ if group_metadata is None:
+ group_metadata = {}
+
if self.marionette_port is None:
self.marionette_port = get_free_port(2828, exclude=self.used_ports)
self.used_ports.add(self.marionette_port)
+ if self.asan:
+ print "Setting up LSAN"
+ self.lsan_handler = mozleak.LSANLeaks(self.logger,
+ scope=group_metadata.get("scope", "/"),
+ allowed=self.lsan_allowed)
+
env = test_environment(xrePath=os.path.dirname(self.binary),
debugger=self.debug_info is not None,
log=self.logger,
lsanPath=self.prefs_root)
env["STYLO_THREADS"] = str(self.stylo_threads)
if self.chaos_mode_flags is not None:
env["MOZ_CHAOSMODE"] = str(self.chaos_mode_flags)
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
@@ -70,16 +70,19 @@ class ExpectedManifest(ManifestItem):
ManifestItem.__init__(self, node)
self.child_map = {}
self.test_path = test_path
self.url_base = url_base
assert self.url_base is not None
self.modified = False
self.boolean_properties = boolean_properties
self.property_order = property_order
+ self.update_properties = {
+ "lsan": LsanUpdate(self),
+ }
def append(self, child):
ManifestItem.append(self, child)
if child.id in self.child_map:
print "Warning: Duplicate heading %s" % child.id
self.child_map[child.id] = child
def _remove_child(self, child):
@@ -101,16 +104,29 @@ class ExpectedManifest(ManifestItem):
return test_id in self.child_map
@property
def url(self):
return urlparse.urljoin(self.url_base,
"/".join(self.test_path.split(os.path.sep)))
+ def set_lsan(self, run_info, result):
+ """Set the result of the test in a particular run
+
+ :param run_info: Dictionary of run_info parameters corresponding
+ to this run
+ :param result: Lsan violations detected"""
+
+ self.update_properties["lsan"].set(run_info, result)
+
+ def coalesce_properties(self, stability):
+ for prop_update in self.update_properties.itervalues():
+ prop_update.coalesce(stability)
+
class TestNode(ManifestItem):
def __init__(self, node):
"""Tree node associated with a particular test in a manifest
:param node: AST node associated with the test"""
ManifestItem.__init__(self, node)
@@ -470,16 +486,54 @@ class MinAssertsUpdate(PropertyUpdate):
values.append(current_default[0].value_as(self.value_type))
values.extend(max(0, item.value - 1) for item in self.new)
values.extend(max(0, item.value - 1) for item in
itertools.chain.from_iterable(results for _, results in self.updated))
new_value = min(values)
return True, new_value
+class LsanUpdate(PropertyUpdate):
+ property_name = "lsan-allowed"
+ cls_default_value = None
+
+ def get_value(self, result):
+ # IF we have an allowed_match that matched, return None
+ # This value is ignored later (because it matches the default)
+ # We do that because then if we allow a failure in foo/__dir__.ini
+ # we don't want to update foo/bar/__dir__.ini with the same rule
+ if result[1]:
+ return None
+ # Otherwise return the topmost stack frame
+ # TODO: there is probably some improvement to be made by looking for a "better" stack frame
+ return result[0][0]
+
+ def update_value(self, old_value, new_value):
+ if isinstance(new_value, (str, unicode)):
+ new_value = {new_value}
+ else:
+ new_value = set(new_value)
+ if old_value is None:
+ old_value = set()
+ old_value = set(old_value)
+ return sorted((old_value | new_value) - {None})
+
+ def update_default(self):
+ current_default = None
+ if self.property_name in self.node._data:
+ current_default = [item for item in
+ self.node._data[self.property_name]
+ if item.condition_node is None]
+ if current_default:
+ current_default = current_default[0].value
+ new_values = [item.value for item in self.new]
+ new_value = self.update_value(current_default, new_values)
+ return True, new_value if new_value else None
+
+
def group_conditionals(values, property_order=None, boolean_properties=None):
"""Given a list of Value objects, return a list of
(conditional_node, status) pairs representing the conditional
expressions that are required to match each status
:param values: List of Values
:param property_order: List of properties to use in expectation metadata
from most to least significant.
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
@@ -1,36 +1,33 @@
import os
import shutil
import sys
import tempfile
import types
import uuid
from collections import defaultdict
-from mozlog import reader
from mozlog import structuredlog
import expected
import manifestupdate
import testloader
import wptmanifest
import wpttest
from vcs import git
manifest = None # Module that will be imported relative to test_root
manifestitem = None
logger = structuredlog.StructuredLogger("web-platform-tests")
try:
- import ujson
+ import ujson as json
except ImportError:
pass
-else:
- reader.json = ujson
def load_test_manifests(serve_root, test_paths):
do_delayed_imports(serve_root)
manifest_loader = testloader.ManifestLoader(test_paths, False)
return manifest_loader.load()
@@ -165,16 +162,17 @@ def update_from_logs(manifests, *log_fil
updater = ExpectedUpdater(manifests, expected_map, id_test_map,
ignore_existing=ignore_existing)
for log_filename in log_filenames:
with open(log_filename) as f:
updater.update_from_log(f)
for manifest_expected in expected_map.itervalues():
for tree in manifest_expected.itervalues():
+ tree.coalesce_properties(stability=stability)
for test in tree.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_properties(stability=stability)
test.coalesce_properties(stability=stability)
return expected_map
@@ -236,31 +234,37 @@ class ExpectedUpdater(object):
self.expected_tree = expected_tree
self.id_path_map = id_path_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
"test_status": self.test_status,
"test_end": self.test_end,
- "assertion_count": self.assertion_count}
+ "assertion_count": self.assertion_count,
+ "lsan_leak": self.lsan_leak}
self.tests_visited = {}
self.test_cache = {}
self.types_by_path = {}
for manifest in self.test_manifests.iterkeys():
for test_type, path, _ in manifest:
if test_type in wpttest.manifest_test_cls:
self.types_by_path[path] = wpttest.manifest_test_cls[test_type]
def update_from_log(self, log_file):
self.run_info = None
- log_reader = reader.read(log_file)
- reader.each_log(log_reader, self.action_map)
+ action_map = self.action_map
+ line_regexp = self.line_regexp
+ for line in log_file:
+ data = json.loads(line)
+ action = data["action"]
+ if action in action_map:
+ action_map[action](data)
def suite_start(self, data):
self.run_info = data["run_info"]
def test_start(self, data):
test_id = data["test"]
try:
test_manifest, test = self.id_path_map[test_id]
@@ -312,45 +316,92 @@ class ExpectedUpdater(object):
def assertion_count(self, data):
test_id = data["test"]
test = self.test_cache.get(test_id)
if test is None:
return
test.set_asserts(self.run_info, data["count"])
+ def lsan_leak(self, data):
+ dir_path = data.get("scope", "/")
+ dir_id = os.path.join(dir_path, "__dir__").replace(os.path.sep, "/")
+ if dir_id.startswith("/"):
+ dir_id = dir_id[1:]
+ test_manifest, obj = self.id_path_map[dir_id]
+ expected_node = self.expected_tree[test_manifest][obj]
+
+ expected_node.set_lsan(self.run_info, (data["frames"], data.get("allowed_match")))
+
def create_test_tree(metadata_path, test_manifest, property_order=None,
boolean_properties=None):
expected_map = {}
id_test_map = {}
exclude_types = frozenset(["stub", "helper", "manual", "support", "conformancechecker"])
all_types = [item.item_type for item in manifestitem.__dict__.itervalues()
if type(item) == type and
issubclass(item, manifestitem.ManifestItem) and
item.item_type is not None]
include_types = set(all_types) - exclude_types
for _, test_path, tests in test_manifest.itertypes(*include_types):
- expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
- property_order=property_order,
- boolean_properties=boolean_properties)
- if expected_data is None:
- expected_data = create_expected(test_manifest,
- test_path,
- tests,
- property_order=property_order,
- boolean_properties=boolean_properties)
-
+ expected_data = load_or_create_expected(test_manifest, metadata_path, test_path, tests,
+ property_order, boolean_properties)
for test in tests:
id_test_map[test.id] = (test_manifest, test)
expected_map[test] = expected_data
+ dir_path = os.path.split(test_path)[0].replace(os.path.sep, "/")
+ while True:
+ if dir_path:
+ dir_id = dir_path + "/__dir__"
+ else:
+ dir_id = "__dir__"
+ dir_id = (test_manifest.url_base + dir_id).lstrip("/")
+ if dir_id not in id_test_map:
+ dir_object = DirObject(dir_id, dir_path)
+ expected_data = load_or_create_expected(test_manifest,
+ metadata_path,
+ dir_id,
+ [],
+ property_order,
+ boolean_properties)
+
+ id_test_map[dir_id] = (test_manifest, dir_object)
+ expected_map[dir_object] = expected_data
+ if not dir_path:
+ break
+ dir_path = dir_path.rsplit("/", 1)[0] if "/" in dir_path else ""
+
return expected_map, id_test_map
+class DirObject(object):
+ def __init__(self, id, path):
+ self.id = id
+ self.path = path
+
+ def __hash__(self):
+ return hash(self.id)
+
+
+def load_or_create_expected(test_manifest, metadata_path, test_path, tests, property_order=None,
+ boolean_properties=None):
+ expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
+ property_order=property_order,
+ boolean_properties=boolean_properties)
+ if expected_data is None:
+ expected_data = create_expected(test_manifest,
+ test_path,
+ tests,
+ property_order=property_order,
+ boolean_properties=boolean_properties)
+ return expected_data
+
+
def create_expected(test_manifest, test_path, tests, property_order=None,
boolean_properties=None):
expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base,
property_order=property_order,
boolean_properties=boolean_properties)
for test in tests:
expected.append(manifestupdate.TestNode.create(test.id))
return expected
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
@@ -448,16 +448,17 @@ class ManifestLoader(object):
def iterfilter(filters, iter):
for f in filters:
iter = f(iter)
for item in iter:
yield item
+
class TestLoader(object):
def __init__(self,
test_manifests,
test_types,
run_info,
manifest_filters=None,
meta_filters=None,
chunk_type="none",