Bug 1265584 - Add support for recording asserts in wptrunner, r=ato, maja_zf draft
authorJames Graham <james@hoppipolla.co.uk>
Tue, 08 May 2018 13:32:02 +0100
changeset 796304 18d53b7880ffc760491a31ec36e1deb5deff622d
parent 796303 f605d88556b5af60fda626df5dd76465ecc6412d
child 796305 ead63d4ac9c26b4be1b3775454db7f4a3402c6ab
push id110205
push userbmo:james@hoppipolla.co.uk
push dateThu, 17 May 2018 13:42:46 +0000
reviewersato, maja_zf
bugs1265584
milestone62.0a1
Bug 1265584 - Add support for recording asserts in wptrunner, r=ato, maja_zf Gecko has an API for producing a non-fatal "assert". For quality control, it should be possible to annotate the range of possible numbers of these asserts produced during a test execution, and fail if the actual number of asserts falls outside this range. This patch adds assert checking by default in debug builds. It adds two metadata properties; max-asserts and min-asserts for specifying the range of possible asserts produced in a test. MozReview-Commit-ID: BFiIfYKuB9L
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
testing/web-platform/tests/tools/wptrunner/wptrunner/formatters.py
testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
@@ -88,16 +88,17 @@ def executor_kwargs(test_type, server_co
                     **kwargs):
     executor_kwargs = base_executor_kwargs(test_type, server_config,
                                            cache_manager, run_info_data,
                                            **kwargs)
     executor_kwargs["close_after_done"] = test_type != "reftest"
     executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
                                                                    run_info_data,
                                                                    **kwargs)
+    executor_kwargs["e10s"] = run_info_data["e10s"]
     capabilities = {}
     if test_type == "reftest":
         executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
         executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
     if test_type == "wdspec":
         options = {}
         if kwargs["binary"]:
             options["binary"] = kwargs["binary"]
@@ -106,16 +107,17 @@ def executor_kwargs(test_type, server_co
         options["prefs"] = {
             "network.dns.localDomains": ",".join(server_config.domains_set)
         }
         capabilities["moz:firefoxOptions"] = options
     if kwargs["certutil_binary"] is None:
         capabilities["acceptInsecureCerts"] = True
     if capabilities:
         executor_kwargs["capabilities"] = capabilities
+    executor_kwargs["debug"] = run_info_data["debug"]
     return executor_kwargs
 
 
 def env_extras(**kwargs):
     return []
 
 
 def env_options():
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
@@ -57,33 +57,33 @@ class TestharnessResultConverter(object)
                      1: "ERROR",
                      2: "TIMEOUT"}
 
     test_codes = {0: "PASS",
                   1: "FAIL",
                   2: "TIMEOUT",
                   3: "NOTRUN"}
 
-    def __call__(self, test, result):
+    def __call__(self, test, result, extra=None):
         """Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
         result_url, status, message, stack, subtest_results = result
         assert result_url == test.url, ("Got results from %s, expected %s" %
-                                      (result_url, test.url))
-        harness_result = test.result_cls(self.harness_codes[status], message)
+                                        (result_url, test.url))
+        harness_result = test.result_cls(self.harness_codes[status], message, extra=extra)
         return (harness_result,
                 [test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
                  for st_name, st_status, st_message, st_stack in subtest_results])
 
 
 testharness_result_converter = TestharnessResultConverter()
 
 
 def reftest_result_converter(self, test, result):
     return (test.result_cls(result["status"], result["message"],
-                            extra=result.get("extra")), [])
+                            extra=result.get("extra", {})), [])
 
 
 def pytest_result_converter(self, test, data):
     harness_data, subtest_data = data
 
     if subtest_data is None:
         subtest_data = []
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
@@ -20,17 +20,18 @@ from .base import (CallbackHandler,
                    TestharnessExecutor,
                    WdspecExecutor,
                    WdspecRun,
                    WebDriverProtocol,
                    extra_timeout,
                    testharness_result_converter,
                    reftest_result_converter,
                    strip_server)
-from .protocol import (BaseProtocolPart,
+from .protocol import (AssertsProtocolPart,
+                       BaseProtocolPart,
                        TestharnessProtocolPart,
                        PrefsProtocolPart,
                        Protocol,
                        StorageProtocolPart,
                        SelectorProtocolPart,
                        ClickProtocolPart,
                        SendKeysProtocolPart,
                        TestDriverProtocolPart)
@@ -288,38 +289,84 @@ class MarionetteStorageProtocolPart(Stor
             let qms = Components.classes["@mozilla.org/dom/quota-manager-service;1"]
                                 .getService(Components.interfaces.nsIQuotaManagerService);
             qms.clearStoragesForPrincipal(principal, "default", true);
             """ % url
         with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
             self.marionette.execute_script(script)
 
 
+class MarionetteAssertsProtocolPart(AssertsProtocolPart):
+    def setup(self):
+        self.assert_count = {"chrome": 0, "content": 0}
+        self.chrome_assert_count = 0
+        self.marionette = self.parent.marionette
+
+    def get(self):
+        script = """
+        debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+        if (debug.isDebugBuild) {
+          return debug.assertionCount;
+        }
+        return 0;
+        """
+
+        def get_count(context, **kwargs):
+            try:
+                context_count = self.marionette.execute_script(script, **kwargs)
+                if context_count:
+                    self.parent.logger.info("Got %s assert count %s" % (context, context_count))
+                    test_count = context_count - self.assert_count[context]
+                    self.assert_count[context] = context_count
+                    return test_count
+            except errors.NoSuchWindowException:
+                # If the window was already closed
+                self.parent.logger.warning("Failed to get assertion count; window was closed")
+            except (errors.MarionetteException, socket.error):
+                # This usually happens if the process crashed
+                pass
+
+        counts = []
+        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+            counts.append(get_count("chrome"))
+        if self.parent.e10s:
+            counts.append(get_count("content", sandbox="system"))
+
+        counts = [item for item in counts if item is not None]
+
+        if not counts:
+            return None
+
+        return sum(counts)
+
+
 class MarionetteSelectorProtocolPart(SelectorProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
 
     def elements_by_selector(self, selector):
         return self.marionette.find_elements("css selector", selector)
 
 
 class MarionetteClickProtocolPart(ClickProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
 
     def element(self, element):
         return element.click()
 
+
 class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
 
     def send_keys(self, element, keys):
         return element.send_keys(keys)
 
+
 class MarionetteTestDriverProtocolPart(TestDriverProtocolPart):
     def setup(self):
         self.marionette = self.parent.marionette
 
     def send_message(self, message_type, status, message=None):
         obj = {
             "type": "testdriver-%s" % str(message_type),
             "status": str(status)
@@ -332,27 +379,29 @@ class MarionetteTestDriverProtocolPart(T
 class MarionetteProtocol(Protocol):
     implements = [MarionetteBaseProtocolPart,
                   MarionetteTestharnessProtocolPart,
                   MarionettePrefsProtocolPart,
                   MarionetteStorageProtocolPart,
                   MarionetteSelectorProtocolPart,
                   MarionetteClickProtocolPart,
                   MarionetteSendKeysProtocolPart,
-                  MarionetteTestDriverProtocolPart]
+                  MarionetteTestDriverProtocolPart,
+                  MarionetteAssertsProtocolPart]
 
-    def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1):
+    def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1, e10s=True):
         do_delayed_imports()
 
         super(MarionetteProtocol, self).__init__(executor, browser)
         self.marionette = None
         self.marionette_port = browser.marionette_port
         self.capabilities = capabilities
         self.timeout_multiplier = timeout_multiplier
         self.runner_handle = None
+        self.e10s = e10s
 
     def connect(self):
         self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
         startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
         self.marionette = marionette.Marionette(host='localhost',
                                                 port=self.marionette_port,
                                                 socket_timeout=None,
                                                 startup_timeout=startup_timeout)
@@ -488,27 +537,28 @@ class ExecuteAsyncScriptRun(object):
         self.result_flag.set()
 
 
 class MarionetteTestharnessExecutor(TestharnessExecutor):
     supports_testdriver = True
 
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  close_after_done=True, debug_info=None, capabilities=None,
-                 **kwargs):
+                 debug=False, **kwargs):
         """Marionette-based executor for testharness.js tests"""
         TestharnessExecutor.__init__(self, browser, server_config,
                                      timeout_multiplier=timeout_multiplier,
                                      debug_info=debug_info)
 
-        self.protocol = MarionetteProtocol(self, browser, capabilities, timeout_multiplier)
+        self.protocol = MarionetteProtocol(self, browser, capabilities, timeout_multiplier, kwargs["e10s"])
         self.script = open(os.path.join(here, "testharness_marionette.js")).read()
         self.script_resume = open(os.path.join(here, "testharness_marionette_resume.js")).read()
         self.close_after_done = close_after_done
         self.window_id = str(uuid.uuid4())
+        self.debug = debug
 
         self.original_pref_values = {}
 
         if marionette is None:
             do_delayed_imports()
 
     def is_alive(self):
         return self.protocol.is_alive
@@ -523,20 +573,33 @@ class MarionetteTestharnessExecutor(Test
         timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
                    else None)
 
         success, data = ExecuteAsyncScriptRun(self.logger,
                                               self.do_testharness,
                                               self.protocol,
                                               self.test_url(test),
                                               timeout).run()
+        # The format of data depends on whether the test ran to completion or not
+        # For asserts we only care about the fact that if it didn't complete, the
+        # status is in the first field.
+        status = None
+        if not success:
+            status = data[0]
+
+        extra = None
+        if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
+            assertion_count = self.protocol.asserts.get()
+            if assertion_count is not None:
+                extra = {"assertion_count": assertion_count}
+
         if success:
-            return self.convert_result(test, data)
+            return self.convert_result(test, data, extra=extra)
 
-        return (test.result_cls(*data), [])
+        return (test.result_cls(extra=extra, *data), [])
 
     def do_testharness(self, protocol, url, timeout):
         protocol.base.execute_script("if (window.win) {window.win.close()}")
         parent_window = protocol.testharness.close_old_windows(protocol)
 
         if timeout is not None:
             timeout_ms = str(timeout * 1000)
         else:
@@ -567,36 +630,37 @@ class MarionetteTestharnessExecutor(Test
         return rv
 
 
 class MarionetteRefTestExecutor(RefTestExecutor):
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  screenshot_cache=None, close_after_done=True,
                  debug_info=None, reftest_internal=False,
                  reftest_screenshot="unexpected",
-                 group_metadata=None, capabilities=None, **kwargs):
+                 group_metadata=None, capabilities=None, debug=False, **kwargs):
         """Marionette-based executor for reftests"""
         RefTestExecutor.__init__(self,
                                  browser,
                                  server_config,
                                  screenshot_cache=screenshot_cache,
                                  timeout_multiplier=timeout_multiplier,
                                  debug_info=debug_info)
         self.protocol = MarionetteProtocol(self, browser, capabilities,
-                                           timeout_multiplier)
+                                           timeout_multiplier, kwargs["e10s"])
         self.implementation = (InternalRefTestImplementation
                                if reftest_internal
                                else RefTestImplementation)(self)
         self.implementation_kwargs = ({"screenshot": reftest_screenshot} if
                                       reftest_internal else {})
 
         self.close_after_done = close_after_done
         self.has_window = False
         self.original_pref_values = {}
         self.group_metadata = group_metadata
+        self.debug = debug
 
         with open(os.path.join(here, "reftest.js")) as f:
             self.script = f.read()
         with open(os.path.join(here, "reftest-wait_marionette.js")) as f:
             self.wait_script = f.read()
 
     def setup(self, runner):
         super(self.__class__, self).setup(runner)
@@ -627,16 +691,23 @@ class MarionetteRefTestExecutor(RefTestE
                 self.has_window = False
 
             if not self.has_window:
                 self.protocol.base.execute_script(self.script)
                 self.protocol.base.set_window(self.protocol.marionette.window_handles[-1])
                 self.has_window = True
 
         result = self.implementation.run_test(test)
+
+        if self.debug:
+            assertion_count = self.protocol.asserts.get()
+            if "extra" not in result:
+                result["extra"] = {}
+            result["extra"]["assertion_count"] = assertion_count
+
         return self.convert_result(test, result)
 
     def screenshot(self, test, viewport_size, dpi):
         # https://github.com/w3c/wptrunner/issues/166
         assert viewport_size is None
         assert dpi is None
 
         timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
@@ -285,8 +285,21 @@ class TestDriverProtocolPart(ProtocolPar
     def send_message(self, message_type, status, message=None):
         """Send a testdriver message to the browser.
 
         :param str message_type: The kind of the message.
         :param str status: Either "failure" or "success" depending on whether the
                            previous command succeeded.
         :param str message: Additional data to add to the message."""
         pass
+
+
+class AssertsProtocolPart(ProtocolPart):
+    """ProtocolPart that implements the functionality required to get a count of non-fatal
+    assertions triggered"""
+    __metaclass__ = ABCMeta
+
+    name = "asserts"
+
+    @abstractmethod
+    def get(self):
+        """Get a count of assertions since the last browser start"""
+        pass
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters.py
@@ -52,8 +52,16 @@ class WptreportFormatter(BaseFormatter):
         if "message" in data:
             subtest["message"] = data["message"]
 
     def test_end(self, data):
         test = self.find_or_create_test(data)
         test["status"] = data["status"]
         if "message" in data:
             test["message"] = data["message"]
+
+    def assertion_count(self, data):
+        test = self.find_or_create_test(data)
+        test["asserts"] = {
+            "count": data["count"],
+            "min": data["min_expected"],
+            "max": data["max_expected"]
+        }
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py
@@ -28,16 +28,24 @@ def data_cls_getter(output_node, visited
 def bool_prop(name, node):
     """Boolean property"""
     try:
         return node.get(name)
     except KeyError:
         return None
 
 
+def int_prop(name, node):
+    """Boolean property"""
+    try:
+        return int(node.get(name))
+    except KeyError:
+        return None
+
+
 def tags(node):
     """Set of tags that have been applied to the test"""
     try:
         value = node.get("tags")
         if isinstance(value, (str, unicode)):
             return {value}
         return set(value)
     except KeyError:
@@ -111,16 +119,24 @@ class ExpectedManifest(ManifestItem):
     def restart_after(self):
         return bool_prop("restart-after", self)
 
     @property
     def leaks(self):
         return bool_prop("leaks", self)
 
     @property
+    def min_assertion_count(self):
+        return int_prop("min-asserts", self)
+
+    @property
+    def max_assertion_count(self):
+        return int_prop("max-asserts", self)
+
+    @property
     def tags(self):
         return tags(self)
 
     @property
     def prefs(self):
         return prefs(self)
 
 
@@ -133,16 +149,24 @@ class DirectoryManifest(ManifestItem):
     def restart_after(self):
         return bool_prop("restart-after", self)
 
     @property
     def leaks(self):
         return bool_prop("leaks", self)
 
     @property
+    def min_assertion_count(self):
+        return int_prop("min-asserts", self)
+
+    @property
+    def max_assertion_count(self):
+        return int_prop("max-asserts", self)
+
+    @property
     def tags(self):
         return tags(self)
 
     @property
     def prefs(self):
         return prefs(self)
 
 
@@ -182,16 +206,24 @@ class TestNode(ManifestItem):
     def restart_after(self):
         return bool_prop("restart-after", self)
 
     @property
     def leaks(self):
         return bool_prop("leaks", self)
 
     @property
+    def min_assertion_count(self):
+        return int_prop("min-asserts", self)
+
+    @property
+    def max_assertion_count(self):
+        return int_prop("max-asserts", self)
+
+    @property
     def tags(self):
         return tags(self)
 
     @property
     def prefs(self):
         return prefs(self)
 
     def append(self, node):
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
@@ -578,16 +578,24 @@ class TestRunnerManager(threading.Thread
         self.test_count += 1
         is_unexpected = expected != status
         if is_unexpected:
             self.unexpected_count += 1
             self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
         if status == "CRASH":
             self.browser.log_crash(test.id)
 
+        if "assertion_count" in file_result.extra:
+            assertion_count = file_result.extra.pop("assertion_count")
+            if assertion_count > 0:
+                self.logger.assertion_count(test.id,
+                                            int(assertion_count),
+                                            test.min_assertion_count,
+                                            test.max_assertion_count)
+
         self.logger.test_end(test.id,
                              status,
                              message=file_result.message,
                              expected=expected,
                              extra=file_result.extra)
 
         restart_before_next = (test.restart_after or
                                file_result.status in ("CRASH", "EXTERNAL-TIMEOUT", "INTERNAL-ERROR") or
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
@@ -9,17 +9,17 @@ enabled_tests = set(["testharness", "ref
 
 class Result(object):
     def __init__(self, status, message, expected=None, extra=None):
         if status not in self.statuses:
             raise ValueError("Unrecognised status %s" % status)
         self.status = status
         self.message = message
         self.expected = expected
-        self.extra = extra
+        self.extra = extra if extra is not None else {}
 
     def __repr__(self):
         return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
 
 
 class SubtestResult(object):
     def __init__(self, name, status, message, stack=None, expected=None):
         self.name = name
@@ -188,16 +188,32 @@ class Test(object):
     def leaks(self):
         for meta in self.itermeta(None):
             leaks = meta.leaks
             if leaks is not None:
                 return leaks
         return False
 
     @property
+    def min_assertion_count(self):
+        for meta in self.itermeta(None):
+            count = meta.min_assertion_count
+            if count is not None:
+                return count
+        return 0
+
+    @property
+    def max_assertion_count(self):
+        for meta in self.itermeta(None):
+            count = meta.max_assertion_count
+            if count is not None:
+                return count
+        return 0
+
+    @property
     def tags(self):
         tags = set()
         for meta in self.itermeta():
             meta_tags = meta.tags
             if atom_reset in meta_tags:
                 tags = meta_tags.copy()
                 tags.remove(atom_reset)
             else: