Bug 1375073 - Modify talos startup test framework to support receiving multiple values from a single iteration; r=jmaher draft
authorRob Wood <rwood@mozilla.com>
Fri, 23 Jun 2017 15:01:08 -0400
changeset 600422 81e1139a7556f17dc5e26f649305a1ede9a976bf
parent 599048 2403cb851fe3e56b9018eaa645c78e913d927812
child 634987 1c3aa0b67078cf0a6f2243f45ac36c89410d63c6
push id65754
push userrwood@mozilla.com
push dateMon, 26 Jun 2017 16:39:40 +0000
reviewersjmaher
bugs1375073
milestone56.0a1
Bug 1375073 - Modify talos startup test framework to support receiving multiple values from a single iteration; r=jmaher MozReview-Commit-ID: EUGt71dB6f6
testing/talos/talos/cmdline.py
testing/talos/talos/config.py
testing/talos/talos/results.py
testing/talos/talos/run_tests.py
testing/talos/talos/test.py
--- a/testing/talos/talos/cmdline.py
+++ b/testing/talos/talos/cmdline.py
@@ -75,16 +75,20 @@ def create_parser(mach_interface=False):
     add_arg('--rss', action='store_true',
             help="Collect RSS counters from pageloader instead of the"
                  " operating system")
     add_arg('--mainthread', action='store_true',
             help="Collect mainthread IO data from the browser by setting"
                  " an environment variable")
     add_arg("--mozAfterPaint", action='store_true', dest="tpmozafterpaint",
             help="wait for MozAfterPaint event before recording the time")
+    add_arg("--firstPaint", action='store_true', dest="firstpaint",
+            help="Also report the first paint value in supported tests")
+    add_arg("--userReady", action='store_true', dest="userready",
+            help="Also report the user ready value in supported tests")
     add_arg('--spsProfile', action="store_true", dest="gecko_profile",
             help="(Deprecated - Use --geckoProfile instead.) Profile the "
                  "run and output the results in $MOZ_UPLOAD_DIR.")
     add_arg('--spsProfileInterval', dest='gecko_profile_interval', type=float,
             help="(Deprecated - Use --geckoProfileInterval instead.) How "
                  "frequently to take samples (ms)")
     add_arg('--spsProfileEntries', dest="gecko_profile_entries", type=int,
             help="(Deprecated - Use --geckoProfileEntries instead.) How "
--- a/testing/talos/talos/config.py
+++ b/testing/talos/talos/config.py
@@ -34,16 +34,19 @@ DEFAULTS = dict(
         resolution=1,
         rss=False,
         mainthread=False,
         shutdown=False,
         timeout=3600,
         tpchrome=True,
         tpcycles=10,
         tpmozafterpaint=False,
+        firstpaint=False,
+        userready=False,
+        testeventmap=[],
         tpdisable_e10s=False,
         tpnoisy=True,
         tppagecycles=1,
         tploadnocache=False,
         tpscrolltest=False,
         tprender=False,
         win_counters=[],
         w7_counters=[],
@@ -196,16 +199,18 @@ GLOBAL_OVERRIDES = (
     'mainthread',
     'shutdown',
     'tpcycles',
     'tpdelay',
     'tppagecycles',
     'tpmanifest',
     'tptimeout',
     'tpmozafterpaint',
+    'firstpaint',
+    'userready',
 )
 
 
 CONF_VALIDATORS = []
 
 
 def validator(func):
     """
@@ -349,23 +354,29 @@ def build_manifest(config, manifestName)
     newManifestName = manifestName + '.develop'
 
     # return new manifest
     return newManifestName
 
 
 def get_test(config, global_overrides, counters, test_instance):
     mozAfterPaint = getattr(test_instance, 'tpmozafterpaint', None)
+    firstPaint = getattr(test_instance, 'firstpaint', None)
+    userReady = getattr(test_instance, 'userready', None)
 
     test_instance.update(**global_overrides)
 
     # update original value of mozAfterPaint, this could be 'false',
     # so check for None
     if mozAfterPaint is not None:
         test_instance.tpmozafterpaint = mozAfterPaint
+    if firstPaint is not None:
+        test_instance.firstpaint = firstPaint
+    if userReady is not None:
+        test_instance.userready = userReady
 
     # fix up url
     url = getattr(test_instance, 'url', None)
     if url:
         test_instance.url = utils.interpolate(convert_url(config, url))
 
     # fix up tpmanifest
     tpmanifest = getattr(test_instance, 'tpmanifest', None)
--- a/testing/talos/talos/results.py
+++ b/testing/talos/talos/results.py
@@ -29,17 +29,16 @@ class TalosResults(object):
     def add_extra_option(self, extra_option):
         self.extra_options.append(extra_option)
 
     def output(self, output_formats):
         """
         output all results to appropriate URLs
         - output_formats: a dict mapping formats to a list of URLs
         """
-
         tbpl_output = {}
         try:
 
             for key, urls in output_formats.items():
                 _output = output.Output(self)
                 results = _output()
                 for url in urls:
                     _output.output(results, url, tbpl_output)
@@ -169,32 +168,43 @@ class TsResults(Results):
 
         string = string.strip()
         lines = string.splitlines()
 
         # gather the data
         self.results = []
         index = 0
 
-        # Handle the case where we support a pagename in the results
-        # (new format)
-        for line in lines:
-            result = {}
-            r = line.strip().split(',')
-            r = [i for i in r if i]
-            if len(r) <= 1:
-                continue
+        # Case where one test iteration may report multiple event values i.e. ts_paint
+        if string.startswith('{'):
+            jsonResult = json.loads(string)
+            result = {'runs': {}}
             result['index'] = index
-            result['page'] = r[0]
-            # note: if we have len(r) >1, then we have pagename,raw_results
-            result['runs'] = [float(i) for i in r[1:]]
+            result['page'] = 'NULL'
+
+            for event_label in jsonResult:
+                result['runs'][str(event_label)] = [jsonResult[event_label]]
             self.results.append(result)
-            index += 1
 
-        # The original case where we just have numbers and no pagename
+        # Case where we support a pagename in the results
+        if not self.results:
+            for line in lines:
+                result = {}
+                r = line.strip().split(',')
+                r = [i for i in r if i]
+                if len(r) <= 1:
+                    continue
+                result['index'] = index
+                result['page'] = r[0]
+                # note: if we have len(r) >1, then we have pagename,raw_results
+                result['runs'] = [float(i) for i in r[1:]]
+                self.results.append(result)
+                index += 1
+
+        # Original case where we just have numbers and no pagename
         if not self.results:
             result = {}
             result['index'] = index
             result['page'] = 'NULL'
             result['runs'] = [float(val) for val in string.split('|')]
             self.results.append(result)
 
 
--- a/testing/talos/talos/run_tests.py
+++ b/testing/talos/talos/run_tests.py
@@ -1,14 +1,15 @@
 #!/usr/bin/env python
 
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+import copy
 import mozversion
 import os
 import sys
 import time
 import traceback
 import urllib
 import utils
 import mozhttpd
@@ -147,18 +148,18 @@ def run_tests(config, browser_config):
     browser_config['browser_name'] = version_info['application_name']
     browser_config['browser_version'] = version_info['application_version']
     browser_config['buildid'] = version_info['application_buildid']
     try:
         browser_config['repository'] = version_info['application_repository']
         browser_config['sourcestamp'] = version_info['application_changeset']
     except KeyError:
         if not browser_config['develop']:
-            print("unable to find changeset or repository: %s" % version_info)
-            sys.exit()
+            print("Abort: unable to find changeset or repository: %s" % version_info)
+            sys.exit(1)
         else:
             browser_config['repository'] = 'develop'
             browser_config['sourcestamp'] = 'develop'
 
     # get test date in seconds since epoch
     if testdate:
         date = int(time.mktime(time.strptime(testdate,
                                              '%a, %d %b %Y %H:%M:%S GMT')))
@@ -196,18 +197,17 @@ def run_tests(config, browser_config):
     if mitmproxy_recordings_list is not False:
         # needed so can tell talos ttest to allow external connections
         browser_config['mitmproxy'] = True
 
         # start mitmproxy playback; this also generates the CA certificate
         mitmdump_path = config.get('mitmdumpPath', False)
         if mitmdump_path is False:
             # cannot continue, need path for mitmdump playback tool
-            LOG.error('Aborting: mitmdumpPath was not provided on cmd line but is required')
-            sys.exit()
+            raise TalosError('Aborting: mitmdumpPath not provided on cmd line but is required')
 
         mitmproxy_recording_path = os.path.join(here, 'mitmproxy')
         mitmproxy_proc = mitmproxy.start_mitmproxy_playback(mitmdump_path,
                                                             mitmproxy_recording_path,
                                                             mitmproxy_recordings_list.split(),
                                                             browser_config['browser_path'])
 
         # install the generated CA certificate into Firefox
@@ -227,17 +227,42 @@ def run_tests(config, browser_config):
     timer = utils.Timer()
     LOG.suite_start(tests=[test['name'] for test in tests])
     try:
         for test in tests:
             testname = test['name']
             LOG.test_start(testname)
 
             mytest = TTest()
-            talos_results.add(mytest.runTest(browser_config, test))
+
+            # some tests like ts_paint return multiple results in a single iteration
+            if test.get('firstpaint', False) or test.get('userready', None):
+                # we need a 'testeventmap' to tell us which tests each event should map to
+                multi_value_result = None
+                separate_results_list = []
+
+                test_event_map = test.get('testeventmap', None)
+                if test_event_map is None:
+                    raise TalosError("Need 'testeventmap' in test.py for %s" % test.get('name'))
+
+                # run the test
+                multi_value_result = mytest.runTest(browser_config, test)
+                if multi_value_result is None:
+                    raise TalosError("Abort: no results returned for %s" % test.get('name'))
+
+                # parse out the multi-value results, and 'fake it' to appear like separate tests
+                separate_results_list = convert_to_separate_test_results(multi_value_result,
+                                                                         test_event_map)
+
+                # now we have three separate test results, store them
+                for test_result in separate_results_list:
+                    talos_results.add(test_result)
+            else:
+                # just expecting regular test - one result value per iteration
+                talos_results.add(mytest.runTest(browser_config, test))
 
             LOG.test_end(testname, status='OK')
 
     except TalosRegression as exc:
         LOG.error("Detected a regression for %s" % testname)
         # by returning 1, we report an orange to buildbot
         # http://docs.buildbot.net/latest/developer/results.html
         LOG.test_end(testname, status='FAIL', message=str(exc),
@@ -268,16 +293,54 @@ def run_tests(config, browser_config):
             print("Thanks for running Talos locally. Results are in %s"
                   % (results_urls['output_urls']))
 
     # we will stop running tests on a failed test, or we will return 0 for
     # green
     return 0
 
 
+def convert_to_separate_test_results(multi_value_result, test_event_map):
+    ''' Receive a test result that actually contains multiple values in a single iteration, and
+    parse it out in order to 'fake' three seprate test results.
+
+    Incoming result looks like this:
+
+    [{'index': 0, 'runs': {'event_1': [1338, ...], 'event_2': [1438, ...], 'event_3':
+    [1538, ...]}, 'page': 'NULL'}]
+
+    We want to parse it out such that we have 'faked' three separate tests, setting test names
+    and taking the run values for each. End goal is to have results reported as three separate
+    tests, like this:
+
+    PERFHERDER_DATA: {"framework": {"name": "talos"}, "suites": [{"subtests": [{"replicates":
+    [1338, ...], "name": "ts_paint", "value": 1338}], "extraOptions": ["e10s"], "name":
+    "ts_paint"}, {"subtests": [{"replicates": [1438, ...], "name": "ts_first_paint", "value":
+    1438}], "extraOptions": ["e10s"], "name": "ts_first_paint"}, {"subtests": [{"replicates":
+    [1538, ...], "name": "ts_user_ready", "value": 1538}], "extraOptions": ["e10s"], "name":
+    "ts_user_ready"}]}
+    '''
+    list_of_separate_tests = []
+
+    for next_test in test_event_map:
+        # copy the original test result that has multiple values per iteration
+        separate_test = copy.deepcopy(multi_value_result)
+        # set the name of the new 'faked' test
+        separate_test.test_config['name'] = next_test['name']
+        # set the run values for the new test
+        for x in separate_test.results:
+            for item in x.results:
+                all_runs = item['runs']
+                item['runs'] = all_runs[next_test['label']]
+        # add it to our list of results to return
+        list_of_separate_tests.append(separate_test)
+
+    return list_of_separate_tests
+
+
 def main(args=sys.argv[1:]):
     try:
         config, browser_config = get_configs()
     except ConfigurationError as exc:
         sys.exit("ERROR: %s" % exc)
     sys.exit(run_tests(config, browser_config))
 
 
--- a/testing/talos/talos/test.py
+++ b/testing/talos/talos/test.py
@@ -99,16 +99,19 @@ class TsBase(Test):
         'gecko_profile_entries',
         'gecko_profile_startup',
         'preferences',
         'xperf_counters',
         'xperf_providers',
         'xperf_user_providers',
         'xperf_stackwalk',
         'tpmozafterpaint',
+        'firstpaint',
+        'userready',
+        'testeventmap',
         'extensions',
         'filters',
         'setup',
         'cleanup',
         'webextensions',
         'reinstall',     # A list of files from the profile directory that
                          # should be copied to the temporary profile prior to
                          # running each cycle, to avoid one cycle overwriting
@@ -230,22 +233,22 @@ class tresize(TsBase):
 
 class PageloaderTest(Test):
     """abstract base class for a Talos Pageloader test"""
     tpmanifest = None  # test manifest
     tpcycles = 1  # number of time to run each page
     cycles = None
     timeout = None
     keys = ['tpmanifest', 'tpcycles', 'tppagecycles', 'tprender', 'tpchrome',
-            'tpmozafterpaint', 'tploadnocache', 'rss', 'mainthread',
-            'resolution', 'cycles', 'gecko_profile', 'gecko_profile_interval',
-            'gecko_profile_entries', 'tptimeout', 'win_counters', 'w7_counters',
-            'linux_counters', 'mac_counters', 'tpscrolltest', 'xperf_counters',
-            'timeout', 'shutdown', 'responsiveness', 'profile_path',
-            'xperf_providers', 'xperf_user_providers', 'xperf_stackwalk',
+            'tpmozafterpaint', 'tploadnocache', 'firstpaint', 'userready',
+            'testeventmap', 'rss', 'mainthread', 'resolution', 'cycles',
+            'gecko_profile', 'gecko_profile_interval', 'gecko_profile_entries',
+            'tptimeout', 'win_counters', 'w7_counters', 'linux_counters', 'mac_counters',
+            'tpscrolltest', 'xperf_counters', 'timeout', 'shutdown', 'responsiveness',
+            'profile_path', 'xperf_providers', 'xperf_user_providers', 'xperf_stackwalk',
             'filters', 'preferences', 'extensions', 'setup', 'cleanup',
             'lower_is_better', 'alert_threshold', 'unit', 'webextensions']
 
 
 class QuantumPageloadTest(PageloaderTest):
     """
     Base class for a Quantum Pageload test
     """