Bug 1363428 - Add per-test-queue metadata to wptrunner, r=ato draft
authorJames Graham <james@hoppipolla.co.uk>
Wed, 10 May 2017 14:56:20 +0100
changeset 600215 c0b1c9e10c01ae450f0b8b43881e7a6503a4c5ec
parent 600214 2d525263bb91e466bc49cb57f14e2efe2d7d59d7
child 600216 b7783b56b39f42308a1071f69962bb5a4df448a1
push id65688
push userbmo:james@hoppipolla.co.uk
push dateSat, 24 Jun 2017 11:04:46 +0000
reviewersato
bugs1363428
milestone56.0a1
Bug 1363428 - Add per-test-queue metadata to wptrunner, r=ato This adds a metadata object associated with each test queue, and uses it to pass cache information into the marionette internal reftest implementation so that we are able to cache only those canvases that will be reused. MozReview-Commit-ID: zASrlvnri3
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
@@ -464,17 +464,17 @@ class MarionetteTestharnessExecutor(Test
         return rv
 
 
 class MarionetteRefTestExecutor(RefTestExecutor):
     def __init__(self, browser, server_config, timeout_multiplier=1,
                  screenshot_cache=None, close_after_done=True,
                  debug_info=None, reftest_internal=False,
                  reftest_screenshot="unexpected",
-                 **kwargs):
+                 group_metadata=None, **kwargs):
         """Marionette-based executor for reftests"""
         RefTestExecutor.__init__(self,
                                  browser,
                                  server_config,
                                  screenshot_cache=screenshot_cache,
                                  timeout_multiplier=timeout_multiplier,
                                  debug_info=debug_info)
         self.protocol = MarionetteProtocol(self, browser)
@@ -482,16 +482,17 @@ class MarionetteRefTestExecutor(RefTestE
                                if reftest_internal
                                else RefTestImplementation)(self)
         self.implementation_kwargs = ({"screenshot": reftest_screenshot} if
                                       reftest_internal else {})
 
         self.close_after_done = close_after_done
         self.has_window = False
         self.original_pref_values = {}
+        self.group_metadata = group_metadata
 
         with open(os.path.join(here, "reftest.js")) as f:
             self.script = f.read()
         with open(os.path.join(here, "reftest-wait.js")) as f:
             self.wait_script = f.read()
 
     def setup(self, runner):
         super(self.__class__, self).setup(runner)
@@ -559,19 +560,19 @@ class InternalRefTestImplementation(obje
         self.executor = executor
 
     @property
     def logger(self):
         return self.executor.logger
 
     def setup(self, screenshot="unexpected"):
         data = {"screenshot": screenshot}
-        if self.executor.queue_metadata is not None:
+        if self.executor.group_metadata is not None:
             data["urlCount"] = {urlparse.urljoin(self.executor.server_url(key[0]), key[1]):value
-                                for key, value in self.executor.queue_metadata.get("url_count", {}).iteritems()
+                                for key, value in self.executor.group_metadata.get("url_count", {}).iteritems()
                                 if value > 1}
         self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
         self.executor.protocol.marionette._send_message("reftest:setup", data)
 
     def run_test(self, test):
         viewport_size = test.viewport_size
         dpi = test.dpi
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
@@ -562,67 +562,72 @@ class TestLoader(object):
 
 
 class TestSource(object):
     __metaclass__ = ABCMeta
 
     def __init__(self, test_queue):
         self.test_queue = test_queue
         self.current_group = None
+        self.current_metadata = None
 
     @abstractmethod
     #@classmethod (doesn't compose with @abstractmethod)
     def make_queue(cls, tests, **kwargs):
         pass
 
     def group(self):
         if not self.current_group or len(self.current_group) == 0:
             try:
-                self.current_group = self.test_queue.get(block=False)
+                self.current_group, self.current_metadata = self.test_queue.get(block=False)
             except Empty:
-                return None
-        return self.current_group
+                return None, None
+        return self.current_group, self.current_metadata
 
 
 class GroupedSource(TestSource):
     @classmethod
     def new_group(cls, state, test, **kwargs):
         raise NotImplementedError
 
     @classmethod
     def make_queue(cls, tests, **kwargs):
         test_queue = Queue()
         groups = []
 
         state = {}
 
         for test in tests:
             if cls.new_group(state, test, **kwargs):
-                groups.append(deque([]))
+                groups.append((deque(), {}))
 
-            group = groups[-1]
+            group, metadata = groups[-1]
             group.append(test)
+            test.update_metadata(metadata)
 
         for item in groups:
             test_queue.put(item)
         return test_queue
 
 
 class SingleTestSource(TestSource):
     @classmethod
     def make_queue(cls, tests, **kwargs):
         test_queue = Queue()
         processes = kwargs["processes"]
         queues = [deque([]) for _ in xrange(processes)]
+        metadatas = [{} for _ in xrange(processes)]
         for test in tests:
             idx = hash(test.id) % processes
             group = queues[idx]
+            metadata = metadatas[idx]
             group.append(test)
+            test.update_metadata(metadata)
 
-        for item in queues:
+        for item in zip(queues, metadatas):
             test_queue.put(item)
 
         return test_queue
 
 
 class PathGroupedSource(GroupedSource):
     @classmethod
     def new_group(cls, state, test, **kwargs):
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
@@ -166,17 +166,17 @@ class BrowserManager(object):
         self.browser_settings = browser_settings
         self.last_test = test
         return restart_required
 
     def init(self):
         """Launch the browser that is being tested,
         and the TestRunner process that will run the tests."""
         # It seems that this lock is helpful to prevent some race that otherwise
-        # sometimes stops the spawned processes initalising correctly, and
+        # sometimes stops the spawned processes initialising correctly, and
         # leaves this thread hung
         if self.init_timer is not None:
             self.init_timer.cancel()
 
         self.logger.debug("Init called, starting browser and runner")
 
         if not self.no_timeout:
             self.init_timer = threading.Timer(self.browser.init_timeout,
@@ -228,19 +228,19 @@ class BrowserManager(object):
 
     def is_alive(self):
         return self.browser.is_alive()
 
 
 class _RunnerManagerState(object):
     before_init = namedtuple("before_init", [])
     initializing = namedtuple("initializing_browser",
-                              ["test", "test_group", "failure_count"])
+                              ["test", "test_group", "group_metadata", "failure_count"])
     running = namedtuple("running", ["test", "test_group"])
-    restarting = namedtuple("restarting", ["test", "test_group"])
+    restarting = namedtuple("restarting", ["test", "test_group", "group_metadata"])
     error = namedtuple("error", [])
     stop = namedtuple("stop", [])
 
 
 RunnerManagerState = _RunnerManagerState()
 
 
 class TestRunnerManager(threading.Thread):
@@ -419,38 +419,40 @@ class TestRunnerManager(threading.Thread
                                     (command, self.state.__class__.__name__))
                 return
             return f(*data)
 
     def should_stop(self):
         return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
 
     def start_init(self):
-        test, test_group = self.get_next_test()
+        test, test_group, group_metadata = self.get_next_test()
         if test is None:
             return RunnerManagerState.stop()
         else:
-            return RunnerManagerState.initializing(test, test_group, 0)
+            return RunnerManagerState.initializing(test, test_group, group_metadata, 0)
 
     def init(self):
         assert isinstance(self.state, RunnerManagerState.initializing)
         if self.state.failure_count > self.max_restarts:
             self.logger.error("Max restarts exceeded")
             return RunnerManagerState.error()
 
         self.browser.update_settings(self.state.test)
 
         result = self.browser.init()
         if result is Stop:
             return RunnerManagerState.error()
         elif not result:
             return RunnerManagerState.initializing(self.state.test,
                                                    self.state.test_group,
+                                                   self.state.group_metadata,
                                                    self.state.failure_count + 1)
         else:
+            self.executor_kwargs["group_metadata"] = self.state.group_metadata
             self.start_test_runner()
 
     def start_test_runner(self):
         # Note that we need to be careful to start the browser before the
         # test runner to ensure that any state set when the browser is started
         # can be passed in to the test runner.
         assert isinstance(self.state, RunnerManagerState.initializing)
         assert self.command_queue is not None
@@ -479,37 +481,40 @@ class TestRunnerManager(threading.Thread
                                           self.state.test_group)
 
     def init_failed(self):
         assert isinstance(self.state, RunnerManagerState.initializing)
         self.browser.after_init()
         self.stop_runner(force=True)
         return RunnerManagerState.initializing(self.state.test,
                                                self.state.test_group,
+                                               self.state.group_metadata,
                                                self.state.failure_count + 1)
 
     def get_next_test(self, test_group=None):
         test = None
         while test is None:
             while test_group is None or len(test_group) == 0:
-                test_group = self.test_source.group()
+                test_group, group_metadata = self.test_source.group()
                 if test_group is None:
                     self.logger.info("No more tests")
-                    return None, None
+                    return None, None, None
             test = test_group.popleft()
-        return test, test_group
+        return test, test_group, group_metadata
+
 
     def run_test(self):
         assert isinstance(self.state, RunnerManagerState.running)
         assert self.state.test is not None
 
         if self.browser.update_settings(self.state.test):
             self.logger.info("Restarting browser for new test environment")
             return RunnerManagerState.restarting(self.state.test,
-                                                 self.state.test_group)
+                                                 self.state.test_group,
+                                                 self.state.group_metadata)
 
         self.logger.test_start(self.state.test.id)
         self.send_message("run_test", self.state.test)
 
     def test_ended(self, test, results):
         """Handle the end of a test.
 
         Output the result of each subtest, and the result of the overall
@@ -572,32 +577,32 @@ class TestRunnerManager(threading.Thread
         # The browser should be stopped already, but this ensures we do any post-stop
         # processing
         self.logger.debug("Wait finished")
 
         return self.after_test_end(True)
 
     def after_test_end(self, restart):
         assert isinstance(self.state, RunnerManagerState.running)
-        test, test_group = self.get_next_test()
+        test, test_group, group_metadata = self.get_next_test()
         if test is None:
             return RunnerManagerState.stop()
         if test_group != self.state.test_group:
             # We are starting a new group of tests, so force a restart
             restart = True
         if restart:
-            return RunnerManagerState.restarting(test, test_group)
+            return RunnerManagerState.restarting(test, test_group, group_metadata)
         else:
             return RunnerManagerState.running(test, test_group)
 
     def restart_runner(self):
         """Stop and restart the TestRunner"""
         assert isinstance(self.state, RunnerManagerState.restarting)
         self.stop_runner()
-        return RunnerManagerState.initializing(self.state.test, self.state.test_group, 0)
+        return RunnerManagerState.initializing(self.state.test, self.state.test_group, self.state.group_metadata, 0)
 
     def log(self, action, kwargs):
         getattr(self.logger, action)(**kwargs)
 
     def error(self, message):
         self.logger.error(message)
         self.restart_runner()
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
@@ -1,9 +1,10 @@
 import os
+from collections import defaultdict
 
 import mozinfo
 
 from wptmanifest.parser import atoms
 
 atom_reset = atoms["Reset"]
 enabled_tests = set(["testharness", "reftest", "wdspec"])
 
@@ -110,16 +111,21 @@ class Test(object):
         self._test_metadata = test_metadata
         self.timeout = timeout if timeout is not None else self.default_timeout
         self.path = path
         self.environment = {"protocol": protocol, "prefs": self.prefs}
 
     def __eq__(self, other):
         return self.id == other.id
 
+    def update_metadata(self, metadata=None):
+        if metadata is None:
+            metadata = {}
+        return metadata
+
     @classmethod
     def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
         timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
         protocol = "https" if hasattr(manifest_item, "https") and manifest_item.https else "http"
         return cls(manifest_item.source_file.tests_root,
                    manifest_item.url,
                    inherit_metadata,
                    test_metadata,
@@ -317,16 +323,27 @@ class ReftestTest(Test):
                                         [],
                                         None,
                                         [])
 
             node.references.append((reference, ref_type))
 
         return node
 
+    def update_metadata(self, metadata):
+        if not "url_count" in metadata:
+            metadata["url_count"] = defaultdict(int)
+        for reference, _ in self.references:
+            # We assume a naive implementation in which a url with multiple
+            # possible screenshots will need to take both the lhs and rhs screenshots
+            # for each possible match
+            metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
+            reference.update_metadata(metadata)
+        return metadata
+
     @property
     def id(self):
         return self.url
 
     @property
     def keys(self):
         return ("reftype", "refurl")