Bug 1358978 - Implement --run-until-failure for Marionette. draft
authorHenrik Skupin <mail@hskupin.info>
Mon, 24 Apr 2017 12:48:33 +0200
changeset 594649 7aa91e7249da57fa5fbd1f7508a574c344c87f08
parent 594149 da66c4a05fda49d457d9411a7092fed87cf9e53a
child 633489 53619b30e25afd4becf12a041052d9e1357be288
push id64102
push userbmo:hskupin@gmail.com
push dateThu, 15 Jun 2017 08:50:05 +0000
bugs1358978
milestone56.0a1
Bug 1358978 - Implement --run-until-failure for Marionette. To help debugging intermittent failures the --repeat option can be used, but that doesn't stop if a failure occures and instead it continuous until the number of specified iterations have been reached. With --run-until-failure we can allow the harness to stop running the tests once the first failure appeared. Without the --repeat option specified it will repeat 30 times by default. MozReview-Commit-ID: Jlsss4PHNbj
testing/marionette/harness/marionette_harness/runner/base.py
testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py
testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py
--- a/testing/marionette/harness/marionette_harness/runner/base.py
+++ b/testing/marionette/harness/marionette_harness/runner/base.py
@@ -295,18 +295,22 @@ class BaseMarionetteArguments(ArgumentPa
                           help="read preferences from a JSON or INI file. For INI, use "
                                "'file.ini:section' to specify a particular section.")
         self.add_argument('--addon',
                           action='append',
                           dest='addons',
                           help="addon to install; repeat for multiple addons.")
         self.add_argument('--repeat',
                           type=int,
-                          default=0,
                           help='number of times to repeat the test(s)')
+        self.add_argument("--run-until-failure",
+                          action="store_true",
+                          help="Run tests repeatedly and stop on the first time a test fails. "
+                               "Default cap is 30 runs, which can be overwritten "
+                               "with the --repeat parameter.")
         self.add_argument('--testvars',
                           action='append',
                           help='path to a json file with any test data required')
         self.add_argument('--symbols-path',
                           help='absolute path to directory containing breakpad symbols, or the '
                                'url of a zip file containing symbols')
         self.add_argument('--startup-timeout',
                           type=int,
@@ -428,16 +432,19 @@ class BaseMarionetteArguments(ArgumentPa
 
         missing_tests = [path for path in args.tests if not os.path.exists(path)]
         if missing_tests:
             self.error("Test file(s) not found: " + " ".join([path for path in missing_tests]))
 
         if not args.address and not args.binary and not args.emulator:
             self.error('You must specify --binary, or --address, or --emulator')
 
+        if args.repeat is not None and args.repeat < 0:
+            self.error('The value of --repeat has to be equal or greater than 0.')
+
         if args.total_chunks is not None and args.this_chunk is None:
             self.error('You must specify which chunk to run.')
 
         if args.this_chunk is not None and args.total_chunks is None:
             self.error('You must specify how many chunks to split the tests into.')
 
         if args.total_chunks is not None:
             if not 1 < args.total_chunks:
@@ -497,17 +504,19 @@ class Fixtures(object):
 class BaseMarionetteTestRunner(object):
 
     textrunnerclass = MarionetteTextTestRunner
     driverclass = Marionette
 
     def __init__(self, address=None,
                  app=None, app_args=None, binary=None, profile=None,
                  logger=None, logdir=None,
-                 repeat=0, testvars=None,
+                 repeat=None,
+                 run_until_failure=None,
+                 testvars=None,
                  symbols_path=None,
                  shuffle=False, shuffle_seed=random.randint(0, sys.maxint), this_chunk=1,
                  total_chunks=1,
                  server_root=None, gecko_log=None, result_callbacks=None,
                  prefs=None, test_tags=None,
                  socket_timeout=BaseMarionetteArguments.socket_timeout_default,
                  startup_timeout=None, addons=None, workspace=None,
                  verbose=0, e10s=True, emulator=False, headless=False, **kwargs):
@@ -525,17 +534,18 @@ class BaseMarionetteTestRunner(object):
         self.app_args = app_args or []
         self.bin = binary
         self.emulator = emulator
         self.profile = profile
         self.addons = addons
         self.logger = logger
         self.marionette = None
         self.logdir = logdir
-        self.repeat = repeat
+        self.repeat = repeat or 0
+        self.run_until_failure = run_until_failure or False
         self.symbols_path = symbols_path
         self.socket_timeout = socket_timeout
         self.shuffle = shuffle
         self.shuffle_seed = shuffle_seed
         self.server_root = server_root
         self.this_chunk = this_chunk
         self.total_chunks = total_chunks
         self.mixin_run_tests = []
@@ -559,16 +569,20 @@ class BaseMarionetteTestRunner(object):
         self._e10s_from_browser = None
         if self.e10s:
             self.prefs.update({
                 'browser.tabs.remote.autostart': True,
                 'browser.tabs.remote.force-enable': True,
                 'extensions.e10sBlocksEnabling': False,
             })
 
+        # If no repeat has been set, default to 30 extra runs
+        if self.run_until_failure and repeat is None:
+            self.repeat = 30
+
         def gather_debug(test, status):
             # No screenshots and page source for skipped tests
             if status == "SKIP":
                 return
 
             rv = {}
             marionette = test._marionette_weakref()
 
@@ -862,23 +876,26 @@ class BaseMarionetteTestRunner(object):
         self.logger.suite_start(tests_by_group,
                                 version_info=self.version_info,
                                 device_info=device_info)
 
         self._log_skipped_tests()
 
         interrupted = None
         try:
-            counter = self.repeat
-            while counter >= 0:
-                round_num = self.repeat - counter
-                if round_num > 0:
-                    self.logger.info('\nREPEAT {}\n-------'.format(round_num))
+            repeat_index = 0
+            while repeat_index <= self.repeat:
+                if repeat_index > 0:
+                    self.logger.info("\nREPEAT {}\n-------".format(repeat_index))
                 self.run_test_sets()
-                counter -= 1
+                if self.run_until_failure and self.failed > 0:
+                    break
+
+                repeat_index += 1
+
         except KeyboardInterrupt:
             # in case of KeyboardInterrupt during the test execution
             # we want to display current test results.
             # so we keep the exception to raise it later.
             interrupted = sys.exc_info()
         except:
             # For any other exception we return immediately and have to
             # cleanup running processes
--- a/testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/conftest.py
@@ -30,18 +30,18 @@ def mach_parsed_kwargs(logger):
         'adb_path': None,
         'addons': None,
         'address': None,
         'app': None,
         'app_args': [],
         'avd': None,
         'avd_home': None,
         'binary': u'/path/to/firefox',
-        'browsermob_port' : None,
-        'browsermob_script' : None,
+        'browsermob_port': None,
+        'browsermob_script': None,
         'device_serial': None,
         'e10s': True,
         'emulator': False,
         'emulator_bin': None,
         'gecko_log': None,
         'jsdebugger': False,
         'log_errorsummary': None,
         'log_html': None,
@@ -58,17 +58,18 @@ def mach_parsed_kwargs(logger):
         'log_unittest': None,
         'log_xunit': None,
         'logger_name': 'Marionette-based Tests',
         'prefs': {},
         'prefs_args': None,
         'prefs_files': None,
         'profile': None,
         'pydebugger': None,
-        'repeat': 0,
+        'repeat': None,
+        'run_until_failure': None,
         'server_root': None,
         'shuffle': False,
         'shuffle_seed': 2276870381009474531,
         'socket_timeout': 60.0,
         'startup_timeout': 60,
         'symbols_path': None,
         'test_tags': None,
         'tests': [u'/path/to/unit-tests.ini'],
--- a/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py
+++ b/testing/marionette/harness/marionette_harness/tests/harness_unit/test_marionette_runner.py
@@ -21,17 +21,17 @@ def runner(mach_parsed_kwargs):
 @pytest.fixture
 def mock_runner(runner, mock_marionette, monkeypatch):
     """
     MarionetteTestRunner instance with mocked-out
     self.marionette and other properties,
     to enable testing runner.run_tests().
     """
     runner.driverclass = Mock(return_value=mock_marionette)
-    for attr in ['run_test_set', '_capabilities']:
+    for attr in ['run_test', '_capabilities']:
         setattr(runner, attr, Mock())
     runner._appName = 'fake_app'
     # simulate that browser runs with e10s by default
     runner._e10s_from_browser = True
     monkeypatch.setattr('marionette_harness.runner.base.mozversion', Mock())
     return runner
 
 
@@ -398,47 +398,115 @@ def test_add_tests(mock_runner):
     assert len(mock_runner.tests) == 0
     fake_tests = ["test_" + i + ".py" for i in "abc"]
     mock_runner.run_tests(fake_tests)
     assert len(mock_runner.tests) == 3
     for (test_name, added_test) in zip(fake_tests, mock_runner.tests):
         assert added_test['filepath'].endswith(test_name)
 
 
+def test_repeat(mock_runner):
+    def update_result(test, expected):
+        mock_runner.failed += 1
+
+    fake_tests = ["test_1.py"]
+    mock_runner.repeat = 4
+    mock_runner.run_test = Mock(side_effect=update_result)
+    mock_runner.run_tests(fake_tests)
+
+    assert mock_runner.failed == 5
+    assert mock_runner.passed == 0
+    assert mock_runner.todo == 0
+
+
+def test_run_until_failure(mock_runner):
+    def update_result(test, expected):
+        mock_runner.failed += 1
+
+    fake_tests = ["test_1.py"]
+    mock_runner.run_until_failure = True
+    mock_runner.repeat = 4
+    mock_runner.run_test = Mock(side_effect=update_result)
+    mock_runner.run_tests(fake_tests)
+
+    assert mock_runner.failed == 1
+    assert mock_runner.passed == 0
+    assert mock_runner.todo == 0
+
+
 def test_catch_invalid_test_names(runner):
     good_tests = [u'test_ok.py', u'test_is_ok.py']
     bad_tests = [u'bad_test.py', u'testbad.py', u'_test_bad.py',
                  u'test_bad.notpy', u'test_bad',
                  u'test.py', u'test_.py']
     with pytest.raises(Exception) as exc:
         runner._add_tests(good_tests + bad_tests)
     msg = exc.value.message
     assert "Test file names must be of the form" in msg
     for bad_name in bad_tests:
         assert bad_name in msg
     for good_name in good_tests:
         assert good_name not in msg
 
+
 @pytest.mark.parametrize('e10s', (True, False))
-def test_e10s_option_sets_prefs(mach_parsed_kwargs, e10s):
+def test_option_e10s_sets_prefs(mach_parsed_kwargs, e10s):
     mach_parsed_kwargs['e10s'] = e10s
     runner = MarionetteTestRunner(**mach_parsed_kwargs)
     e10s_prefs = {
         'browser.tabs.remote.autostart': True,
         'browser.tabs.remote.force-enable': True,
         'extensions.e10sBlocksEnabling': False
     }
     for k,v in e10s_prefs.iteritems():
         if k == 'extensions.e10sBlocksEnabling' and not e10s:
             continue
         assert runner.prefs.get(k, False) == (v and e10s)
 
-def test_e10s_option_clash_raises(mock_runner):
+
+def test_option_e10s_clash_raises(mock_runner):
     mock_runner._e10s_from_browser = False
 
     with pytest.raises(AssertionError) as e:
         mock_runner.run_tests([u'test_fake_thing.py'])
         assert "configuration (self.e10s) does not match browser appinfo" in e.value.message
 
+
+@pytest.mark.parametrize('repeat', (None, 0, 42, -1))
+def test_option_repeat(mach_parsed_kwargs, repeat):
+    if repeat is not None:
+        mach_parsed_kwargs['repeat'] = repeat
+    runner = MarionetteTestRunner(**mach_parsed_kwargs)
+
+    if repeat is None:
+        assert runner.repeat == 0
+    else:
+        assert runner.repeat == repeat
+
+
+@pytest.mark.parametrize('repeat', (None, 42))
+@pytest.mark.parametrize('run_until_failure', (None, True))
+def test_option_run_until_failure(mach_parsed_kwargs, repeat, run_until_failure):
+    if run_until_failure is not None:
+        mach_parsed_kwargs['run_until_failure'] = run_until_failure
+    if repeat is not None:
+        mach_parsed_kwargs['repeat'] = repeat
+    runner = MarionetteTestRunner(**mach_parsed_kwargs)
+
+    if run_until_failure is None:
+        assert runner.run_until_failure is False
+        if repeat is None:
+            assert runner.repeat == 0
+        else:
+            assert runner.repeat == repeat
+
+    else:
+        assert runner.run_until_failure == run_until_failure
+        if repeat is None:
+            assert runner.repeat == 30
+        else:
+            assert runner.repeat == repeat
+
+
 if __name__ == '__main__':
     import sys
     sys.exit(pytest.main(
         ['--log-tbpl=-', __file__]))