Bug 1311991 - Add testing/mach_commands.py to flake8 linter, r?gps draft
authorAndrew Halberstadt <ahalberstadt@mozilla.com>
Thu, 17 Aug 2017 15:50:39 -0400
changeset 649089 8d1809e0b240a04cba02e219bd2885ee891b6ccc
parent 649088 fdfb1b2ee708a69beded746934b6f970f8306d79
child 649090 494cfdde5627d56868d9d7316e6798703e05b693
push id74949
push userahalberstadt@mozilla.com
push dateFri, 18 Aug 2017 16:15:05 +0000
reviewersgps
bugs1311991
milestone57.0a1
Bug 1311991 - Add testing/mach_commands.py to flake8 linter, r?gps This was mostly generated with autopep8 v1.3.2. A few left over errors were fixed by hand. MozReview-Commit-ID: 5crfUz0xj3O
testing/mach_commands.py
tools/lint/flake8.yml
--- a/testing/mach_commands.py
+++ b/testing/mach_commands.py
@@ -168,17 +168,17 @@ TEST_FLAVORS = {
     },
     'xpcshell': {
         'mach_command': 'xpcshell-test',
         'kwargs': {'test_paths': []},
     },
 }
 
 for i in range(1, MOCHITEST_TOTAL_CHUNKS + 1):
-    TEST_SUITES['mochitest-%d' %i] = {
+    TEST_SUITES['mochitest-%d' % i] = {
         'aliases': ('M%d' % i, 'm%d' % i),
         'mach_command': 'mochitest',
         'kwargs': {
             'flavor': 'mochitest',
             'subsuite': 'default',
             'chunk_by_dir': MOCHITEST_CHUNK_BY_DIR,
             'total_chunks': MOCHITEST_TOTAL_CHUNKS,
             'this_chunk': i,
@@ -192,17 +192,18 @@ name or suite alias.
 
 The following test suites and aliases are supported: %s
 ''' % ', '.join(sorted(TEST_SUITES))
 TEST_HELP = TEST_HELP.strip()
 
 
 @CommandProvider
 class Test(MachCommandBase):
-    @Command('test', category='testing', description='Run tests (detects the kind of test and runs it).')
+    @Command('test', category='testing',
+             description='Run tests (detects the kind of test and runs it).')
     @CommandArgument('what', default=None, nargs='*', help=TEST_HELP)
     def test(self, what):
         """Run tests from names or paths.
 
         mach test accepts arguments specifying which tests to run. Each argument
         can be:
 
         * The path to a test file
@@ -282,17 +283,18 @@ class Test(MachCommandBase):
             # tests that match any condition. Bug 1210213 tracks implementing
             # more flexible querying.
             if tags:
                 run_tests = list(resolver.resolve_tests(tags=tags))
             if paths:
                 run_tests += [t for t in resolver.resolve_tests(paths=paths)
                               if not (tags & set(t.get('tags', '').split()))]
             if flavors:
-                run_tests = [t for t in run_tests if t['flavor'] not in flavors]
+                run_tests = [
+                    t for t in run_tests if t['flavor'] not in flavors]
                 for flavor in flavors:
                     run_tests += list(resolver.resolve_tests(flavor=flavor))
 
         if not run_suites and not run_tests:
             print(UNKNOWN_TEST)
             return 1
 
         status = None
@@ -322,50 +324,49 @@ class Test(MachCommandBase):
                 print(UNKNOWN_FLAVOR % flavor)
                 status = 1
                 continue
 
             kwargs = dict(m['kwargs'])
             kwargs['subsuite'] = subsuite
 
             res = self._mach_context.commands.dispatch(
-                    m['mach_command'], self._mach_context,
-                    test_objects=tests, **kwargs)
+                m['mach_command'], self._mach_context,
+                test_objects=tests, **kwargs)
             if res:
                 status = res
 
         return status
 
 
 @CommandProvider
 class MachCommands(MachCommandBase):
     @Command('cppunittest', category='testing',
-        description='Run cpp unit tests (C++ tests).')
+             description='Run cpp unit tests (C++ tests).')
     @CommandArgument('test_files', nargs='*', metavar='N',
-        help='Test to run. Can be specified as one or more files or ' \
-            'directories, or omitted. If omitted, the entire test suite is ' \
-            'executed.')
-
+                     help='Test to run. Can be specified as one or more files or '
+                     'directories, or omitted. If omitted, the entire test suite is '
+                     'executed.')
     def run_cppunit_test(self, **params):
-        import mozinfo
         from mozlog import commandline
         log = commandline.setup_logging("cppunittest",
                                         {},
                                         {"tbpl": sys.stdout})
 
         # See if we have crash symbols
         symbols_path = os.path.join(self.distdir, 'crashreporter-symbols')
         if not os.path.isdir(symbols_path):
             symbols_path = None
 
         # If no tests specified, run all tests in main manifest
         tests = params['test_files']
         if len(tests) == 0:
             tests = [os.path.join(self.distdir, 'cppunittests')]
-            manifest_path = os.path.join(self.topsrcdir, 'testing', 'cppunittest.ini')
+            manifest_path = os.path.join(
+                self.topsrcdir, 'testing', 'cppunittest.ini')
         else:
             manifest_path = None
 
         if conditions.is_android(self):
             from mozrunner.devices.android_device import verify_android_device
             verify_android_device(self, install=False)
             return self.run_android_test(tests, symbols_path, manifest_path, log)
 
@@ -414,36 +415,40 @@ class MachCommands(MachCommandBase):
             result = remotecppunittests.run_test_harness(options, tests)
         except Exception as e:
             log.error("Caught exception running cpp unit tests: %s" % str(e))
             result = False
             raise
 
         return 0 if result else 1
 
+
 def executable_name(name):
     return name + '.exe' if sys.platform.startswith('win') else name
 
+
 @CommandProvider
 class CheckSpiderMonkeyCommand(MachCommandBase):
-    @Command('check-spidermonkey', category='testing', description='Run SpiderMonkey tests (JavaScript engine).')
-    @CommandArgument('--valgrind', action='store_true', help='Run jit-test suite with valgrind flag')
-
+    @Command('check-spidermonkey', category='testing',
+             description='Run SpiderMonkey tests (JavaScript engine).')
+    @CommandArgument('--valgrind', action='store_true',
+                     help='Run jit-test suite with valgrind flag')
     def run_checkspidermonkey(self, **params):
         import subprocess
 
         self.virtualenv_manager.ensure()
         python = self.virtualenv_manager.python_path
 
         js = os.path.join(self.bindir, executable_name('js'))
 
         print('Running jit-tests')
         jittest_cmd = [
             python,
-            os.path.join(self.topsrcdir, 'js', 'src', 'jit-test', 'jit_test.py'),
+            os.path.join(self.topsrcdir, 'js', 'src',
+                         'jit-test', 'jit_test.py'),
             js,
             '--no-slow',
             '--jitflags=all',
         ]
         if params['valgrind']:
             jittest_cmd.append('--valgrind')
 
         jittest_result = subprocess.call(jittest_cmd)
@@ -453,60 +458,64 @@ class CheckSpiderMonkeyCommand(MachComma
             python,
             os.path.join(self.topsrcdir, 'js', 'src', 'tests', 'jstests.py'),
             js,
             '--jitflags=all',
         ]
         jstest_result = subprocess.call(jstest_cmd)
 
         print('running jsapi-tests')
-        jsapi_tests_cmd = [os.path.join(self.bindir, executable_name('jsapi-tests'))]
+        jsapi_tests_cmd = [os.path.join(
+            self.bindir, executable_name('jsapi-tests'))]
         jsapi_tests_result = subprocess.call(jsapi_tests_cmd)
 
         print('running check-style')
-        check_style_cmd = [python, os.path.join(self.topsrcdir, 'config', 'check_spidermonkey_style.py')]
-        check_style_result = subprocess.call(check_style_cmd, cwd=os.path.join(self.topsrcdir, 'js', 'src'))
+        check_style_cmd = [python, os.path.join(
+            self.topsrcdir, 'config', 'check_spidermonkey_style.py')]
+        check_style_result = subprocess.call(
+            check_style_cmd, cwd=os.path.join(self.topsrcdir, 'js', 'src'))
 
         print('running check-masm')
-        check_masm_cmd = [python, os.path.join(self.topsrcdir, 'config', 'check_macroassembler_style.py')]
-        check_masm_result = subprocess.call(check_masm_cmd, cwd=os.path.join(self.topsrcdir, 'js', 'src'))
+        check_masm_cmd = [python, os.path.join(
+            self.topsrcdir, 'config', 'check_macroassembler_style.py')]
+        check_masm_result = subprocess.call(
+            check_masm_cmd, cwd=os.path.join(self.topsrcdir, 'js', 'src'))
 
         print('running check-js-msg-encoding')
-        check_js_msg_cmd = [python, os.path.join(self.topsrcdir, 'config', 'check_js_msg_encoding.py')]
-        check_js_msg_result = subprocess.call(check_js_msg_cmd, cwd=self.topsrcdir)
+        check_js_msg_cmd = [python, os.path.join(
+            self.topsrcdir, 'config', 'check_js_msg_encoding.py')]
+        check_js_msg_result = subprocess.call(
+            check_js_msg_cmd, cwd=self.topsrcdir)
 
-        all_passed = jittest_result and jstest_result and jsapi_tests_result and check_style_result and check_masm_result and check_js_msg_result
+        all_passed = jittest_result and jstest_result and jsapi_tests_result and \
+            check_style_result and check_masm_result and check_js_msg_result
 
         return all_passed
 
+
 @CommandProvider
 class JsapiTestsCommand(MachCommandBase):
     @Command('jsapi-tests', category='testing', description='Run jsapi tests (JavaScript engine).')
     @CommandArgument('test_name', nargs='?', metavar='N',
-        help='Test to run. Can be a prefix or omitted. If omitted, the entire ' \
-             'test suite is executed.')
-
+                     help='Test to run. Can be a prefix or omitted. If omitted, the entire '
+                     'test suite is executed.')
     def run_jsapitests(self, **params):
         import subprocess
 
-        bin_suffix = ''
-        if sys.platform.startswith('win'):
-            bin_suffix = '.exe'
-
         print('running jsapi-tests')
-        jsapi_tests_cmd = [os.path.join(self.bindir, executable_name('jsapi-tests'))]
+        jsapi_tests_cmd = [os.path.join(
+            self.bindir, executable_name('jsapi-tests'))]
         if params['test_name']:
             jsapi_tests_cmd.append(params['test_name'])
 
         jsapi_tests_result = subprocess.call(jsapi_tests_cmd)
 
         return jsapi_tests_result
 
 
-
 def get_parser(argv=None):
     parser = ArgumentParser()
     parser.add_argument(dest="suite_name",
                         nargs=1,
                         choices=['mochitest'],
                         type=str,
                         help="The test for which chunk should be found. It corresponds "
                              "to the mach test invoked (only 'mochitest' currently).")
@@ -538,17 +547,18 @@ def get_parser(argv=None):
 
     parser.add_argument('--disable-e10s',
                         action='store_false',
                         dest='e10s',
                         help='Find test on chunk with electrolysis preferences disabled.',
                         default=True)
 
     parser.add_argument('-p', '--platform',
-                        choices=['linux', 'linux64', 'mac', 'macosx64', 'win32', 'win64'],
+                        choices=['linux', 'linux64', 'mac',
+                                 'macosx64', 'win32', 'win64'],
                         dest='platform',
                         help="Platform for the chunk to find the test.",
                         default=None)
 
     parser.add_argument('--debug',
                         action='store_true',
                         dest='debug',
                         help="Find the test on chunk in a debug build.",
@@ -571,16 +581,17 @@ def download_mozinfo(platform=None, debu
             platform = 'mac64'
         args.extend(['-p', platform])
     if debug_build:
         args.extend(['--debug-build'])
 
     subprocess.call(args)
     return temp_dir, temp_path
 
+
 @CommandProvider
 class ChunkFinder(MachCommandBase):
     @Command('find-test-chunk', category='testing',
              description='Find which chunk a test belongs to (works for mochitest).',
              parser=get_parser)
     def chunk_finder(self, **kwargs):
         total_chunks = kwargs['total_chunks']
         test_path = kwargs['test_path'][0]
@@ -605,37 +616,41 @@ class ChunkFinder(MachCommandBase):
             'e10s': kwargs['e10s'],
             'subsuite': subsuite,
         }
 
         temp_dir = None
         if kwargs['platform'] or kwargs['debug']:
             self._activate_virtualenv()
             self.virtualenv_manager.install_pip_package('mozdownload==1.17')
-            temp_dir, temp_path = download_mozinfo(kwargs['platform'], kwargs['debug'])
+            temp_dir, temp_path = download_mozinfo(
+                kwargs['platform'], kwargs['debug'])
             args['extra_mozinfo_json'] = temp_path
 
         found = False
-        for this_chunk in range(1, total_chunks+1):
+        for this_chunk in range(1, total_chunks + 1):
             args['thisChunk'] = this_chunk
             try:
-                self._mach_context.commands.dispatch(suite_name, self._mach_context, flavor=flavor, resolve_tests=False, **args)
+                self._mach_context.commands.dispatch(
+                    suite_name, self._mach_context, flavor=flavor, resolve_tests=False, **args)
             except SystemExit:
                 pass
             except KeyboardInterrupt:
                 break
 
             fp = open(os.path.expanduser(args['dump_tests']), 'r')
             tests = json.loads(fp.read())['active_tests']
             for test in tests:
                 if test_path == test['path']:
                     if 'disabled' in test:
-                        print('The test %s for flavor %s is disabled on the given platform' % (test_path, flavor))
+                        print('The test %s for flavor %s is disabled on the given platform' % (
+                            test_path, flavor))
                     else:
-                        print('The test %s for flavor %s is present in chunk number: %d' % (test_path, flavor, this_chunk))
+                        print('The test %s for flavor %s is present in chunk number: %d' % (
+                            test_path, flavor, this_chunk))
                     found = True
                     break
 
             if found:
                 break
 
         if not found:
             raise Exception("Test %s not found." % test_path)
@@ -643,40 +658,42 @@ class ChunkFinder(MachCommandBase):
         os.remove(dump_tests)
         if temp_dir:
             shutil.rmtree(temp_dir)
 
 
 @CommandProvider
 class TestInfoCommand(MachCommandBase):
     from datetime import date, timedelta
+
     @Command('test-info', category='testing',
-        description='Display historical test result summary.')
+             description='Display historical test result summary.')
     @CommandArgument('test_names', nargs=argparse.REMAINDER,
-        help='Test(s) of interest.')
+                     help='Test(s) of interest.')
     @CommandArgument('--branches',
-        default='mozilla-central,mozilla-inbound,autoland',
-        help='Report for named branches (default: mozilla-central,mozilla-inbound,autoland)')
+                     default='mozilla-central,mozilla-inbound,autoland',
+                     help='Report for named branches '
+                          '(default: mozilla-central,mozilla-inbound,autoland)')
     @CommandArgument('--start',
-        default=(date.today() - timedelta(7)).strftime("%Y-%m-%d"),
-        help='Start date (YYYY-MM-DD)')
+                     default=(date.today() - timedelta(7)
+                              ).strftime("%Y-%m-%d"),
+                     help='Start date (YYYY-MM-DD)')
     @CommandArgument('--end',
-        default=date.today().strftime("%Y-%m-%d"),
-        help='End date (YYYY-MM-DD)')
+                     default=date.today().strftime("%Y-%m-%d"),
+                     help='End date (YYYY-MM-DD)')
     @CommandArgument('--show-info', action='store_true',
-        help='Retrieve and display general test information.')
+                     help='Retrieve and display general test information.')
     @CommandArgument('--show-results', action='store_true',
-        help='Retrieve and display ActiveData test result summary.')
+                     help='Retrieve and display ActiveData test result summary.')
     @CommandArgument('--show-durations', action='store_true',
-        help='Retrieve and display ActiveData test duration summary.')
+                     help='Retrieve and display ActiveData test duration summary.')
     @CommandArgument('--show-bugs', action='store_true',
-        help='Retrieve and display related Bugzilla bugs.')
+                     help='Retrieve and display related Bugzilla bugs.')
     @CommandArgument('--verbose', action='store_true',
-        help='Enable debug logging.')
-
+                     help='Enable debug logging.')
     def test_info(self, **params):
 
         import which
         from mozbuild.base import MozbuildObject
 
         self.branches = params['branches']
         self.start = params['start']
         self.end = params['end']
@@ -684,17 +701,17 @@ class TestInfoCommand(MachCommandBase):
         self.show_results = params['show_results']
         self.show_durations = params['show_durations']
         self.show_bugs = params['show_bugs']
         self.verbose = params['verbose']
 
         if (not self.show_info and
             not self.show_results and
             not self.show_durations and
-            not self.show_bugs):
+                not self.show_bugs):
             # by default, show everything
             self.show_info = True
             self.show_results = True
             self.show_durations = True
             self.show_bugs = True
 
         here = os.path.abspath(os.path.dirname(__file__))
         build_obj = MozbuildObject.from_environment(cwd=here)
@@ -800,17 +817,17 @@ class TestInfoCommand(MachCommandBase):
             print("%s not found in any test manifest!" % self.full_test_name)
         else:
             print("%s found in more than one manifest!" % self.full_test_name)
 
         # short_name is full_test_name without path
         self.short_name = None
         name_idx = self.full_test_name.rfind('/')
         if name_idx > 0:
-            self.short_name = self.full_test_name[name_idx+1:]
+            self.short_name = self.full_test_name[name_idx + 1:]
 
         # robo_name is short_name without ".java" - for robocop
         self.robo_name = None
         if self.short_name:
             robo_idx = self.short_name.rfind('.java')
             if robo_idx > 0:
                 self.robo_name = self.short_name[:robo_idx]
             if self.short_name == self.test_name:
@@ -842,31 +859,31 @@ class TestInfoCommand(MachCommandBase):
             "groupby": ["result.test"],
             "where": {"and": [
                 {"or": searches},
                 {"in": {"build.branch": self.branches.split(',')}},
                 {"gt": {"run.timestamp": {"date": self.start}}},
                 {"lt": {"run.timestamp": {"date": self.end}}}
             ]}
         }
-        print("Querying ActiveData...") # Following query can take a long time
+        print("Querying ActiveData...")  # Following query can take a long time
         data = self.submit(query)
         if data and len(data) > 0:
             self.activedata_test_name = [
                 d['result']['test']
                 for p in simple_names + regex_names
                 for d in data
-                if re.match(p+"$", d['result']['test'])
+                if re.match(p + "$", d['result']['test'])
             ][0]  # first match is best match
         if self.activedata_test_name:
             print("Found records matching '%s' in ActiveData." %
-                self.activedata_test_name)
+                  self.activedata_test_name)
         else:
             print("Unable to find matching records in ActiveData; using %s!" %
-                self.test_name)
+                  self.test_name)
             self.activedata_test_name = self.test_name
 
     def get_platform(self, record):
         platform = record['build']['platform']
         type = record['build']['type']
         e10s = "-%s" % record['run']['type'] if 'run' in record else ""
         return "%s/%s%s:" % (platform, type, e10s)
 
@@ -907,17 +924,17 @@ class TestInfoCommand(MachCommandBase):
             "where": {"and": [
                 {"eq": {"result.test": self.activedata_test_name}},
                 {"in": {"build.branch": self.branches.split(',')}},
                 {"gt": {"run.timestamp": {"date": self.start}}},
                 {"lt": {"run.timestamp": {"date": self.end}}}
             ]}
         }
         print("\nTest results for %s on %s between %s and %s" %
-            (self.activedata_test_name, self.branches, self.start, self.end))
+              (self.activedata_test_name, self.branches, self.start, self.end))
         data = self.submit(query)
         if data and len(data) > 0:
             data.sort(key=self.get_platform)
             worst_rate = 0.0
             worst_platform = None
             total_runs = 0
             total_failures = 0
             for record in data:
@@ -930,47 +947,48 @@ class TestInfoCommand(MachCommandBase):
                 if rate >= worst_rate:
                     worst_rate = rate
                     worst_platform = platform
                     worst_failures = failures
                     worst_runs = runs
                 print("%-30s %6d failures in %6d runs" % (
                     platform, failures, runs))
             print("\nTotal: %d failures in %d runs or %.3f failures/run" %
-                (total_failures, total_runs, (float)(total_failures) / total_runs))
+                  (total_failures, total_runs, (float)(total_failures) / total_runs))
             if worst_failures > 0:
                 print("Worst rate on %s %d failures in %d runs or %.3f failures/run" %
-                    (worst_platform, worst_failures, worst_runs, worst_rate))
+                      (worst_platform, worst_failures, worst_runs, worst_rate))
         else:
             print("No test result data found.")
 
     def report_test_durations(self):
         # Report test durations summary from ActiveData
         query = {
-	    "from": "unittest",
+            "from": "unittest",
             "format": "list",
-	    "limit": 100,
-	    "groupby": ["build.platform","build.type","run.type"],
-	    "select": [
-		{"value":"result.duration","aggregate":"average","name":"average"},
-		{"value":"result.duration","aggregate":"min","name":"min"},
-		{"value":"result.duration","aggregate":"max","name":"max"},
-		{"aggregate":"count"}
-	    ],
-	    "where": {"and": [
+            "limit": 100,
+            "groupby": ["build.platform", "build.type", "run.type"],
+            "select": [
+                {"value": "result.duration",
+                    "aggregate": "average", "name": "average"},
+                {"value": "result.duration", "aggregate": "min", "name": "min"},
+                {"value": "result.duration", "aggregate": "max", "name": "max"},
+                {"aggregate": "count"}
+            ],
+            "where": {"and": [
                 {"eq": {"result.ok": "T"}},
-		{"eq": {"result.test": self.activedata_test_name}},
+                {"eq": {"result.test": self.activedata_test_name}},
                 {"in": {"build.branch": self.branches.split(',')}},
                 {"gt": {"run.timestamp": {"date": self.start}}},
                 {"lt": {"run.timestamp": {"date": self.end}}}
-	    ]}
+            ]}
         }
         data = self.submit(query)
         print("\nTest durations for %s on %s between %s and %s" %
-            (self.activedata_test_name, self.branches, self.start, self.end))
+              (self.activedata_test_name, self.branches, self.start, self.end))
         if data and len(data) > 0:
             data.sort(key=self.get_platform)
             for record in data:
                 platform = self.get_platform(record)
                 print("%-30s %6.2f s (%.2f s - %.2f s over %d runs)" % (
                     platform, record['average'], record['min'],
                     record['max'], record['count']))
         else:
@@ -982,17 +1000,17 @@ class TestInfoCommand(MachCommandBase):
         search = self.full_test_name
         if self.test_name:
             search = '%s,%s' % (search, self.test_name)
         if self.short_name:
             search = '%s,%s' % (search, self.short_name)
         if self.robo_name:
             search = '%s,%s' % (search, self.robo_name)
         payload = {'quicksearch': search,
-                   'include_fields':'id,summary'}
+                   'include_fields': 'id,summary'}
         response = requests.get('https://bugzilla.mozilla.org/rest/bug',
                                 payload)
         response.raise_for_status()
         json_response = response.json()
         print("\nBugzilla quick search for '%s':" % search)
         if 'bugs' in json_response:
             for bug in json_response['bugs']:
                 print("Bug %s: %s" % (bug['id'], bug['summary']))
--- a/tools/lint/flake8.yml
+++ b/tools/lint/flake8.yml
@@ -2,16 +2,17 @@
 flake8:
     description: Python linter
     include:
         - layout/tools/reftest
         - python/mozlint
         - security/manager
         - taskcluster
         - testing/firefox-ui
+        - testing/mach_commands.py
         - testing/marionette/client
         - testing/marionette/harness
         - testing/marionette/puppeteer
         - testing/mozbase
         - testing/mochitest
         - testing/talos/
         - tools/git
         - tools/lint