Bug 1300707 - Expose release sanity errors to ship-it status draft
authorJohan Lorenzo <jlorenzo@mozilla.com>
Wed, 07 Sep 2016 11:53:37 +0200
changeset 7202 fa5ceca3f85f9395a2d757fa2795493c79ebd079
parent 7200 be22c551f172f280ada5e75074e53448f8e9b00c
push id159
push userjlorenzo@mozilla.com
push dateWed, 14 Sep 2016 12:56:25 +0000
bugs1300707
Bug 1300707 - Expose release sanity errors to ship-it status MozReview-Commit-ID: 96WSDxfa6yn
buildfarm/release/release-runner.py
lib/python/kickoff/__init__.py
lib/python/kickoff/sanity.py
lib/python/kickoff/sanity/__init__.py
lib/python/kickoff/sanity/base.py
lib/python/kickoff/sanity/l10n.py
lib/python/kickoff/sanity/partials.py
lib/python/kickoff/sanity/revisions.py
--- a/buildfarm/release/release-runner.py
+++ b/buildfarm/release/release-runner.py
@@ -13,21 +13,24 @@ import shutil
 import tempfile
 import requests
 from os import path
 from optparse import OptionParser
 from twisted.python.lockfile import FilesystemLock
 
 site.addsitedir(path.join(path.dirname(__file__), "../../lib/python"))
 
-from kickoff import get_partials, ReleaseRunner, make_task_graph_strict_kwargs
+from kickoff import get_partials, ReleaseRunner, make_task_graph_strict_kwargs, long_revision
 from kickoff import get_l10n_config, get_en_US_config
 from kickoff import email_release_drivers
 from kickoff import bump_version
-from kickoff.sanity import ReleaseSanitizerRunner, SanityException, is_candidate_release
+from kickoff.sanity.base import SanityException, is_candidate_release
+from kickoff.sanity.revisions import RevisionsSanitizer
+from kickoff.sanity.l10n import L10nSanitizer
+from kickoff.sanity.partials import PartialsSanitizer
 from kickoff.build_status import are_en_us_builds_completed
 from release.info import readBranchConfig
 from release.l10n import parsePlainL10nChangesets
 from release.versions import getAppVersion
 from taskcluster import Scheduler, Index, Queue
 from taskcluster.utils import slugId
 from util.hg import mercurial
 from util.retry import retry
@@ -49,16 +52,58 @@ ALL_FILES = set([
     '.checksums.asc',
     '.complete.mar',
     '.exe',
     '.dmg',
     'i686.tar.bz2',
     'x86_64.tar.bz2',
 ])
 
+CONFIGS_WORKDIR = 'buildbot-configs'
+
+def run_prebuild_sanity_checks(release_runner):
+    new_valid_releases = []
+    for release in release_runner.new_releases:
+        log.info('Got a new release request: %s' % release)
+        try:
+            # TODO: this won't work for Thunderbird...do we care?
+            release['branchShortName'] = release['branch'].split("/")[-1]
+
+            check_and_assign_long_revision(release)
+            assign_and_check_l10n_changesets(release_runner, release)
+            assign_and_check_partial_updates(release_runner, release)
+
+            new_valid_releases.append(release)
+        except Exception as e:
+            release_runner.mark_as_failed(release, 'Sanity checks failed. Errors: %s' % e)
+            log.exception('Sanity checks failed. Errors: %s. Release: %s', e, release)
+    return new_valid_releases
+
+
+def check_and_assign_long_revision(release):
+    # Revisions must be checked before trying to get the long one.
+    RevisionsSanitizer(**release).run()
+    release['mozillaRevision'] = long_revision(release['branch'], release['mozillaRevision'])
+
+
+def assign_and_check_l10n_changesets(release_runner, release):
+    release['l10n_changesets'] = parsePlainL10nChangesets(release_runner.get_release_l10n(release['name']))
+    L10nSanitizer(**release).run()
+
+
+def assign_and_check_partial_updates(release_runner, release):
+    release['partial_updates'] = get_partials(release_runner, release['partials'], release['product'])
+    branchConfig = get_branch_config(release)
+    release['release_channels'] = update_channels(release['version'], branchConfig['release_channel_mappings'])
+    PartialsSanitizer(**release).run()
+
+
+def get_branch_config(release):
+    return readBranchConfig(path.join(CONFIGS_WORKDIR, "mozilla"), branch=release['branchShortName'])
+
 
 def update_channels(version, mappings):
     """Return a list of update channels for a version using version mapping
 
     >>> update_channels("40.0", [(r"^\d+\.0$", ["beta", "release"]), (r"^\d+\.\d+\.\d+$", ["release"])])
     ["beta", "release"]
     >>> update_channels("40.0.1", [(r"^\d+\.0$", ["beta", "release"]), (r"^\d+\.\d+\.\d+$", ["release"])])
     ["release"]
@@ -210,23 +255,16 @@ def validate_graph_kwargs(queue, gpg_key
     platforms = kwargs.get('en_US_config', {}).get('platforms', {})
     for platform in platforms.keys():
         task_id = platforms.get(platform).get('task_id', {})
         log.info('Performing release sanity for %s en-US binary', platform)
         sanitize_en_US_binary(queue, task_id, gpg_key_path)
 
     log.info("Release sanity for all en-US is now completed!")
 
-    log.info("Sanitizing the rest of the release ...")
-    sanitizer = ReleaseSanitizerRunner(**kwargs)
-    sanitizer.run()
-    if not sanitizer.was_successful():
-        errors = sanitizer.get_errors()
-        raise SanityException("Issues on release sanity %s" % errors)
-
 
 def main(options):
     log.info('Loading config from %s' % options.config)
     config = load_config(options.config)
 
     if config.getboolean('release-runner', 'verbose'):
         log_level = logging.DEBUG
     else:
@@ -259,17 +297,16 @@ def main(options):
             "accessToken": get_config(config, "taskcluster", "access_token", None),
         }
     }
     # Extend tc_config for retries, see Bug 1293744
     # https://github.com/taskcluster/taskcluster-client.py/blob/0.0.24/taskcluster/client.py#L30
     # This is a stopgap until Bug 1259627 is fixed.
     retrying_tc_config = tc_config.copy()
     retrying_tc_config.update({"maxRetries": 12})
-    configs_workdir = 'buildbot-configs'
     balrog_username = get_config(config, "balrog", "username", None)
     balrog_password = get_config(config, "balrog", "password", None)
     extra_balrog_submitter_params = get_config(config, "balrog", "extra_balrog_submitter_params", None)
     beetmover_aws_access_key_id = get_config(config, "beetmover", "aws_access_key_id", None)
     beetmover_aws_secret_access_key = get_config(config, "beetmover", "aws_secret_access_key", None)
     gpg_key_path = get_config(config, "signing", "gpg_key_path", None)
 
     # TODO: replace release sanity with direct checks of en-US and l10n revisions (and other things if needed)
@@ -280,116 +317,109 @@ def main(options):
     queue = Queue(tc_config)
 
     # Main loop waits for new releases, processes them and exits.
     while True:
         try:
             log.debug('Fetching release requests')
             rr.get_release_requests()
             if rr.new_releases:
-                for release in rr.new_releases:
-                    log.info('Got a new release request: %s' % release)
+                new_releases = run_prebuild_sanity_checks(rr)
                 break
             else:
                 log.debug('Sleeping for %d seconds before polling again' %
                           sleeptime)
                 time.sleep(sleeptime)
         except:
             log.error("Caught exception when polling:", exc_info=True)
             sys.exit(5)
 
-    retry(mercurial, args=(buildbot_configs, configs_workdir), kwargs=dict(branch=buildbot_configs_branch))
+    retry(mercurial, args=(buildbot_configs, CONFIGS_WORKDIR), kwargs=dict(branch=buildbot_configs_branch))
 
     if 'symlinks' in config.sections():
-        format_dict = dict(buildbot_configs=configs_workdir)
+        format_dict = dict(buildbot_configs=CONFIGS_WORKDIR)
         for target in config.options('symlinks'):
             symlink = config.get('symlinks', target).format(**format_dict)
             if path.exists(symlink):
                 log.warning("Skipping %s -> %s symlink" % (symlink, target))
             else:
                 log.info("Adding %s -> %s symlink" % (symlink, target))
                 os.symlink(target, symlink)
-
-    # TODO: this won't work for Thunderbird...do we care?
-    branch = release["branch"].split("/")[-1]
-    release['branchShortName'] = branch
-    branchConfig = readBranchConfig(path.join(configs_workdir, "mozilla"), branch=branch)
+    rc = 0
+    for release in new_releases:
+        branchConfig = get_branch_config(release)
+        # candidate releases are split in two graphs and release-runner only handles the first
+        # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
+        # channels are handled in the second generated graph outside of release-runner.
+        # This is not elegant but it should do the job for now
+        release_channels = release['release_channels']
+        candidate_release = is_candidate_release(release_channels)
+        if candidate_release:
+            postrelease_enabled = False
+            postrelease_bouncer_aliases_enabled = False
+            final_verify_channels = [
+                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
+            ]
+            publish_to_balrog_channels = [
+                c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
+            ]
+            push_to_releases_enabled = False
+            postrelease_mark_as_shipped_enabled = False
+        else:
+            postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
+            postrelease_bouncer_aliases_enabled = branchConfig['postrelease_bouncer_aliases_enabled']
+            postrelease_mark_as_shipped_enabled = branchConfig['postrelease_mark_as_shipped_enabled']
+            final_verify_channels = release_channels
+            publish_to_balrog_channels = release_channels
+            push_to_releases_enabled = True
 
-    release_channels = update_channels(release["version"], branchConfig["release_channel_mappings"])
-    # candidate releases are split in two graphs and release-runner only handles the first
-    # graph of tasks. so parts like postrelease, push_to_releases/mirrors, and mirror dependant
-    # channels are handled in the second generated graph outside of release-runner.
-    # This is not elegant but it should do the job for now
-    candidate_release = is_candidate_release(release_channels)
-    if candidate_release:
-        postrelease_enabled = False
-        postrelease_bouncer_aliases_enabled = False
-        final_verify_channels = [
-            c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
-        ]
-        publish_to_balrog_channels = [
-            c for c in release_channels if c not in branchConfig.get('mirror_requiring_channels', [])
-        ]
-        push_to_releases_enabled = False
-        postrelease_mark_as_shipped_enabled = False
-    else:
-        postrelease_enabled = branchConfig['postrelease_version_bump_enabled']
-        postrelease_bouncer_aliases_enabled = branchConfig['postrelease_bouncer_aliases_enabled']
-        postrelease_mark_as_shipped_enabled = branchConfig['postrelease_mark_as_shipped_enabled']
-        final_verify_channels = release_channels
-        publish_to_balrog_channels = release_channels
-        push_to_releases_enabled = True
-
-    rc = 0
-    for release in rr.new_releases:
         ship_it_product_name = release['product']
         tc_product_name = branchConfig['stage_product'][ship_it_product_name]
         # XXX: Doesn't work with neither Fennec nor Thunderbird
         platforms = branchConfig['release_platforms']
 
         try:
             if not are_en_us_builds_completed(index, release_name=release['name'], submitted_at=release['submittedAt'],
-                                              branch=branch, revision=release['mozillaRevision'],
+                                              branch=release['branchShortName'], revision=release['mozillaRevision'],
                                               tc_product_name=tc_product_name, platforms=platforms):
                 log.info('Builds are not completed yet, skipping release "%s" for now', release['name'])
                 rr.update_status(release, 'Waiting for builds to be completed')
                 continue
 
             log.info('Every build is completed for release: %s', release['name'])
             graph_id = slugId()
 
             rr.update_status(release, 'Generating task graph')
-            l10n_changesets = parsePlainL10nChangesets(rr.get_release_l10n(release["name"]))
 
             kwargs = {
                 "public_key": docker_worker_key,
                 "version": release["version"],
                 # ESR should not use "esr" suffix here:
                 "next_version": bump_version(release["version"].replace("esr", "")),
                 "appVersion": getAppVersion(release["version"]),
                 "buildNumber": release["buildNumber"],
                 "source_enabled": True,
                 "checksums_enabled": True,
                 "repo_path": release["branch"],
                 "revision": release["mozillaRevision"],
                 "product": release["product"],
                 # if mozharness_revision is not passed, use 'revision'
                 "mozharness_changeset": release.get('mh_changeset') or release['mozillaRevision'],
-                "partial_updates": get_partials(rr, release['partials'], release['product']),
-                "branch": branch,
+                "partial_updates": release['partial_updates'],
+                "branch": release['branchShortName'],
                 "updates_enabled": bool(release["partials"]),
                 "l10n_config": get_l10n_config(
-                    index=index, product=release["product"], branch=branch,
+                    index=index, product=release["product"], branch=release['branchShortName'],
                     revision=release['mozillaRevision'],
                     platforms=branchConfig['platforms'],
                     l10n_platforms=branchConfig['l10n_release_platforms'],
-                    l10n_changesets=l10n_changesets
+                    l10n_changesets=release['l10n_changesets']
                 ),
                 "en_US_config": get_en_US_config(
-                    index=index, product=release["product"], branch=branch,
+                    index=index, product=release["product"], branch=release['branchShortName'],
                     revision=release['mozillaRevision'],
                     platforms=branchConfig['release_platforms']
                 ),
                 "verifyConfigs": {},
                 "balrog_api_root": branchConfig["balrog_api_root"],
                 "funsize_balrog_api_root": branchConfig["funsize_balrog_api_root"],
                 "balrog_username": balrog_username,
                 "balrog_password": balrog_password,
@@ -411,44 +441,46 @@ def main(options):
                 "uptake_monitoring_enabled": branchConfig['uptake_monitoring_enabled'],
                 "tuxedo_server_url": branchConfig['tuxedoServerUrl'],
                 "postrelease_version_bump_enabled": postrelease_enabled,
                 "postrelease_mark_as_shipped_enabled": postrelease_mark_as_shipped_enabled,
                 "push_to_releases_enabled": push_to_releases_enabled,
                 "push_to_releases_automatic": branchConfig['push_to_releases_automatic'],
                 "beetmover_candidates_bucket": branchConfig["beetmover_buckets"][release["product"]],
                 "partner_repacks_platforms": branchConfig.get("partner_repacks_platforms", []),
-                "l10n_changesets": l10n_changesets,
+                "l10n_changesets": release['l10n_changesets'],
                 "extra_balrog_submitter_params": extra_balrog_submitter_params,
                 "publish_to_balrog_channels": publish_to_balrog_channels,
             }
 
             validate_graph_kwargs(queue, gpg_key_path, **kwargs)
             graph = make_task_graph_strict_kwargs(**kwargs)
             rr.update_status(release, "Submitting task graph")
             log.info("Task graph generated!")
             import pprint
             log.debug(pprint.pformat(graph, indent=4, width=160))
             print scheduler.createTaskGraph(graph_id, graph)
 
             rr.mark_as_completed(release)
             email_release_drivers(smtp_server=smtp_server, from_=notify_from,
                                   to=notify_to, release=release,
                                   task_group_id=graph_id)
-        except:
+        except Exception as exception:
             # We explicitly do not raise an error here because there's no
             # reason not to start other releases if creating the Task Graph
             # fails for another one. We _do_ need to set this in order to exit
             # with the right code, though.
             rc = 2
             rr.mark_as_failed(
                 release,
-                'Failed to start release promotion (graph ID: %s)' % graph_id)
-            log.exception("Failed to start release promotion for graph %s %s",
-                          graph_id, release)
+                'Failed to start release promotion (graph ID: %s). Error(s): %s' % (graph_id, exception)
+            )
+            log.exception('Failed to start release "%s" promotion for graph %s. Error(s): %s',
+                          release['name'], graph_id, exception)
+            log.debug('Release failed: %s', release)
 
     if rc != 0:
         sys.exit(rc)
 
     log.debug('Sleeping for %s seconds before polling again', sleeptime)
     time.sleep(sleeptime)
 
 if __name__ == '__main__':
--- a/lib/python/kickoff/__init__.py
+++ b/lib/python/kickoff/__init__.py
@@ -57,19 +57,16 @@ class ReleaseRunner(object):
     def get_release_requests(self):
         new_releases = self.releases_api.getReleases()
         if new_releases['releases']:
             new_releases = [self.release_api.getRelease(name) for name in
                             new_releases['releases']]
             our_releases = [r for r in new_releases if
                             matches(r['name'], RELEASE_PATTERNS)]
             if our_releases:
-                # make sure to use long revision
-                for r in our_releases:
-                    r["mozillaRevision"] = long_revision(r["branch"], r["mozillaRevision"])
                 self.new_releases = our_releases
                 log.info("Releases to handle are %s", self.new_releases)
                 return True
             else:
                 log.info("No releases to handle in %s", new_releases)
                 return False
         else:
             log.info("No new releases: %s" % new_releases)
deleted file mode 100644
--- a/lib/python/kickoff/sanity.py
+++ /dev/null
@@ -1,429 +0,0 @@
-"""
-Release sanity lib module in the new release promotion world.
-It's functionality is to replace the old way of doing it in
-http://hg.mozilla.org/build/tools/file/old-release-runner/buildbot-helpers/release_sanity.py
-
-Additionally, more checks are performed to cope with the new release promotion
-world regulations and constraints.
-"""
-import sys
-import site
-import logging
-from os import path
-
-import requests
-
-site.addsitedir(path.join(path.dirname(__file__), ".."))
-from util.retry import retry
-from util.hg import make_hg_url
-from release.versions import getL10nDashboardVersion
-from kickoff import matches
-
-log = logging.getLogger(__name__)
-
-CANDIDATES_SHA512_URL_TEMPLATE = "https://archive.mozilla.org/pub/firefox/candidates/{version}-candidates/build{build_number}/SHA512SUMS"
-RELEASES_SHA512_URL_TEMPLATE= "https://archive.mozilla.org/pub/firefox/releases/{version}/SHA512SUMS"
-L10N_DASHBOARD_URL_TEMPLATE = "https://l10n.mozilla.org/shipping/l10n-changesets?ms={milestone}"
-LOCALE_BASE_URL_TEMPLATE = "{hg_l10n_base}/{locale}/raw-file/{revision}"
-SINGLE_LOCALE_CONFIG_URI_TEMPLATE = "testing/mozharness/configs/single_locale/{branch}.py"
-VERSION_DISPLAY_CONFIG_URI = "browser/config/version_display.txt"
-SHIPPED_LOCALES_CONFIG_URI = "browser/locales/shipped-locales"
-BETA_PATTERNS = [r"\d+\.0b\d+"]
-
-
-def make_generic_head_request(page_url):
-    """Make generic HEAD request to check page existence"""
-    def _get():
-        req = requests.head(page_url, timeout=60)
-        req.raise_for_status()
-
-    retry(_get, attempts=5, sleeptime=1)
-
-
-def make_generic_get_request(page_url):
-    """Make generic GET request to retrieve some page content"""
-    def _get():
-        req = requests.get(page_url, timeout=60)
-        req.raise_for_status()
-        return req.content
-
-    return retry(_get, attempts=5, sleeptime=1)
-
-
-def make_hg_get_request(repo_path, revision,
-                        filename=None, hg_url='hg.mozilla.org'):
-    """Wrapper to make a GET request for a specific URI under hg repo"""
-    url = make_hg_url(hg_url, repo_path, revision=revision, filename=filename)
-    return make_generic_get_request(url)
-
-
-def is_candidate_release(channels):
-    """Determine if this is a candidate release or not
-
-    Because ship-it can not tell us if this is a candidate release (yet!),
-    we assume it is when we have determined, based on version,
-    that we are planning to ship to more than one update_channel
-    e.g. for candidate releases we have:
-     1) one channel to test the 'candidate' release with: 'beta' channel
-     2) once verified, we ship to the main channel: 'release' channel
-    """
-    return len(channels) > 1
-
-
-def get_l10_dashboard_changeset(version, product):
-    """Helper function to retrieve l10n dashboard changesets
-
-    >>> get_l10_dashboard_changeset('47.0, 'firefox')
-    ach revision_123
-    af revision_245
-    an revision_456
-    ...
-    """
-    l10n_dashboard_version = getL10nDashboardVersion(version, product)
-    url = L10N_DASHBOARD_URL_TEMPLATE.format(milestone=l10n_dashboard_version)
-
-    ret = make_generic_get_request(url).strip()
-
-    dash_dict = dict()
-    for line in ret.splitlines():
-        locale, revision = line.split()
-        dash_dict[locale] = revision
-
-    return dash_dict
-
-
-def get_single_locale_config(repo_path, revision, branch):
-    """Get single locale from remote mh configs
-    Example for mozilla-beta, random revision:
-
-    >>>
-    config = {
-        "nightly_build": True,
-        "branch": "mozilla-beta",
-        ...
-        # l10n
-        "hg_l10n_base": "https://hg.mozilla.org/releases/l10n/mozilla-beta",
-        # repositories
-        "mozilla_dir": "mozilla-beta",
-        'purge_minsize': 12,
-        'is_automation': True,
-        ...
-    }
-    """
-    filename = SINGLE_LOCALE_CONFIG_URI_TEMPLATE.format(branch=branch)
-    return make_hg_get_request(repo_path, revision, filename=filename)
-
-
-class SanityException(Exception):
-    """Should the release sanity process collect any errors, this
-    custom exception is to be thrown in release runner.
-    """
-    pass
-
-
-class OpsMixin(object):
-    """Helper class Mixin to enrich ReleaseSanitizerTestSuite behavior
-    """
-    def assertEqual(self, result, first, second, err_msg):
-        """Method inspired from unittest implementation
-        The :result is the aggregation object to collect all potential errors
-        """
-        if not first == second:
-            result.add_error(err_msg)
-
-
-class ReleaseSanitizerTestSuite(OpsMixin):
-    """Main release sanity class - the one to encompass all test methods and
-    all behavioral changes that need to be addressed. It is inspired by
-    the functionality of unittest module classes. It needs to be used
-    along with a ReleaseSanitizerResult object to aggregate all potential
-    exceptions.
-
-    Once instance needs to hold the task graph arguments that come from
-    Ship-it via release runner. Using the arguments, certain behaviors are
-    tested (e.g. partials, l10n, versions, config, etc)
-
-    To add more testing methods, please prefix the method with 'test_' in
-    order to have it run by sanitize() main method.
-    """
-    def __init__(self, **kwargs):
-        self.kwargs = kwargs
-        self.repo_path = self.kwargs["repo_path"]
-        self.revision = self.kwargs["revision"]
-        self.version = self.kwargs["version"]
-        self.branch = self.kwargs["branch"]
-        self.locales = self.kwargs["l10n_changesets"]
-        self.product = self.kwargs["product"]
-        self.partial_updates = self.kwargs["partial_updates"]
-
-    def sanitize(self, result):
-        """Main method to run all the sanity checks. It collects all the
-        methods prefixed with 'test_' and runs them accordingly.
-        It runs all the test and collects any potential errors in the :result
-        object.
-        """
-        test_methods = [m for m in filter(lambda k: k.startswith("test_"), dir(self))
-                        if callable(getattr(self, m))]
-        for test_method in test_methods:
-            log.debug("Calling testing method %s", test_method)
-            getattr(self, test_method)(result)
-
-    def test_versions_repo_and_revision_check(self, result):
-        """test_versions method
-        Tests if the indicated branch and revision repo exists
-        """
-        log.info("Testing repo and revision in tree ...")
-        try:
-            make_hg_get_request(self.repo_path, self.revision).strip()
-        except requests.HTTPError as err:
-            err_msg = "{path} repo does not exist with {rev} revision. URL: {url}".format(
-                path=self.repo_path, rev=self.revision, url=err.request.url)
-            result.add_error(err_msg, sys.exc_info())
-
-    def test_versions_display_validation_in_tree(self, result):
-        """test_versions method
-        Tests if the upstream display version exists and if it is the same
-        with the current one coming from release runner
-        """
-        log.info("Testing version display validation in tree ...")
-        # esr-hack: ensure trimming the suffix before comparing
-        version = self.version.replace("esr", "")
-
-        try:
-            display_version = make_hg_get_request(self.repo_path, self.revision,
-                                                  filename=VERSION_DISPLAY_CONFIG_URI).strip()
-        except requests.HTTPError as err:
-            err_msg = ("display_version config file not found in {path} under"
-                       " {rev} revision. URL: {url}").format(
-                           path=self.repo_path,
-                           rev=self.revision,
-                           url=err.request.url)
-            result.add_error(err_msg, sys.exc_info())
-            return
-
-        err_msg = ("In-tree display version {tree_version} doesn't "
-                   "match ship-it version {version}").format(
-                       tree_version=display_version, version=version)
-        self.assertEqual(result, version, display_version, err_msg)
-
-    def test_partials_validity(self, result):
-        """test_partials method
-        Tests some validity checks against the partials. It goes over the list
-        of specified partials and performs some tests:
-            1. Firstly, checks the partial version has a corresponding
-            (version, buildnumer) under the candidates directory on S3.
-            In order to perform this check, rather than checking the complete
-            mar (CMAR) files, we check the SHA512 checksums file. Upon a
-            successful release build, the checksums file is the last one
-            to be generated since it contains all the hashes for all the files.
-            Therefore, if the checksums file exist, it means allthe other files
-            made it through the candidates directory (including CMAR)
-
-            2. Secondly, checks if the partial versions has a corresponding
-            of that specific version under releases directory on S3.
-            The first check ensured that we had a release build that made it
-            through the candidates directory, now we need to checck if we
-            actually had a successful ship for that particular partial version.
-            For that, we follow the same logic as above by checking the SHA512
-            checksums under the releases directory. If it's there, it means we
-            have successfully shipped. If something went wrong, we'll hit an
-            error.
-
-            3. Ultimately it makes sure the partial version build from
-            candidates is actually the same that shipped under releases.
-            This check prevents one possible fail-scenario in which the build
-            under canidates is good and valid, but a follow-up build was
-            actually shipped under releases. Since shipping to releases
-            implies an actual copy of the files, for that particular reason we
-            make sure that SHA512 checksums of the build under candidates is
-            bitwise the same as the one from releases.
-        """
-        log.info("Testing partials validity ...")
-        def grab_partial_sha(url):
-            """Quick helper function to grab a SHA512 file"""
-            sha_sum = None
-            try:
-                sha_sum = make_generic_get_request(url).strip()
-            except requests.HTTPError:
-                err_msg = "Broken build - hash {url} not found".format(url=url)
-                result.add_error(err_msg, sys.exc_info())
-
-            return sha_sum
-
-        for pversion, info in self.kwargs["partial_updates"].iteritems():
-            buildno = info["buildNumber"]
-
-            # make sure partial is valid and shipped correctly to /candidates
-            _url = CANDIDATES_SHA512_URL_TEMPLATE.format(version=pversion,
-                                                         build_number=buildno)
-            candidate_sha = grab_partial_sha(_url)
-
-            # make sure partial has a shipped release under /releases
-            _url = RELEASES_SHA512_URL_TEMPLATE.format(version=pversion)
-            releases_sha = grab_partial_sha(_url)
-
-            err_msg = ("{version}-build{build_number} is a good candidate"
-                       " build, but not the one we shipped! URL: {url}").format(
-                           version=pversion,
-                           build_number=buildno,
-                           url=_url)
-            self.assertEqual(result, releases_sha, candidate_sha, err_msg)
-
-    def test_partials_release_candidate_validity(self, result):
-        """test_partials method
-        Tests if a RC contains both beta and release in list of partials.
-        We hit this issue in bug 1265579 in which the updates builder failed
-        if partials were all-beta OR if no-beta at all
-        """
-        log.info("Testing RC partials ...")
-        if not is_candidate_release(self.kwargs["release_channels"]):
-            log.info("Skipping this test as we're not dealing with a RC now")
-            return
-
-        ret = [matches(name, BETA_PATTERNS) for name in self.partial_updates]
-        at_least_one_beta = any(ret)
-        all_betas = all(ret) and ret != []
-
-        partials = ["{name}".format(name=p) for p in self.partial_updates]
-        err_msg = "No beta found in the RC list of partials: {l}".format(l=partials)
-        self.assertEqual(result, at_least_one_beta, True, err_msg)
-
-        err_msg = ("All partials in the RC list are betas. At least a non-beta"
-                   " release is needed in {l}").format(l=partials)
-        self.assertEqual(result, all_betas, False, err_msg)
-
-    def test_l10n_shipped_locales(self, result):
-        """test_l10n method
-        Tests if the current locales coming from release runner are in fact
-        the same as the shipped locales.
-        """
-        log.info("Testing l10n shipped locales ...")
-        try:
-            # TODO: mind that we will need something similar for Fennec
-            ret = make_hg_get_request(self.repo_path, self.revision,
-                                      filename=SHIPPED_LOCALES_CONFIG_URI).strip()
-        except requests.HTTPError as err:
-            err_msg = ("Shipped locale file not found in {path} repo under rev"
-                       " {revision}. URL: {url}").format(
-                           path=self.repo_path,
-                           revision=self.revision,
-                           url=err.request.url)
-            result.add_error(err_msg, sys.exc_info())
-            return
-
-        shipped_l10n = set([l.split()[0] for l in ret.splitlines()])
-        current_l10n = set(self.locales.keys())
-
-        err_msg = "Current l10n changesets and shipped locales differ!"
-        # we have en-US in shipped locales, but not in l10n changesets, because
-        # there is no en-US repo
-        self.assertEqual(result, shipped_l10n.difference(current_l10n),
-                         set(['en-US']),
-                         err_msg)
-        self.assertEqual(result, current_l10n.difference(shipped_l10n),
-                         set([]),
-                         err_msg)
-
-    def test_l10n_verify_changesets(self, result):
-        """test_l10n method
-        Tests if the l10n changesets (locale, revision) are actually valid.
-        It does a validity check on each of the locales revision. In order
-        to query that particular l10n release url, the single locale
-        config file from mozharness is grabbed first.
-        """
-        log.info("Testing current l10n changesets ...")
-        try:
-            ret = get_single_locale_config(self.repo_path,
-                                           self.revision,
-                                           self.branch).strip()
-        except requests.HTTPError as err:
-            err_msg = ("Failed to retrieve single locale config file for"
-                       " {path}, revision {rev}. URL: {url}").format(
-                           path=self.repo_path,
-                           rev=self.revision,
-                           branch=self.branch,
-                           url=err.request.url)
-            result.add_error(err_msg, sys.exc_info())
-            return
-
-        locals_dict = dict()
-        exec(ret, {}, locals_dict)
-        single_locale_config = locals_dict.get('config')
-
-        for locale in sorted(self.locales.keys()):
-            revision = self.locales[locale]
-            locale_url = LOCALE_BASE_URL_TEMPLATE.format(
-                hg_l10n_base=single_locale_config["hg_l10n_base"].strip('/'),
-                locale=locale,
-                revision=revision
-            )
-
-            try:
-                make_generic_head_request(locale_url)
-            except requests.HTTPError:
-                err_msg = "Locale {locale} not found".format(locale=locale_url)
-                result.add_error(err_msg, sys.exc_info())
-
-
-class ReleaseSanitizerResult(object):
-    """Aggregate exceptions result-object like. It's passed down in all
-    ReleaseSanitizerTestSuite methods to collect all potential errors.
-    This is usefule to avoid incremenatal fixes in release sanity
-    """
-    def __init__(self):
-        self.errors = []
-
-    def add_error(self, err_msg, err=None):
-        """Method to collect a new errors. It collects the exception
-        stacktrace and stores the exception value along with the message"""
-        # each error consist of a tuple containing the error message and any
-        # other potential information we might get useful from the
-        # sys.exc_info(). If there is no such, explanatory string will be added
-        self.errors.append((err_msg, self._exc_info_to_string(err)))
-        # append an empty line after each exceptions to have a nicer output
-        log.info("Collecting a new exception: %s", err_msg)
-
-    def _exc_info_to_string(self, err):
-        if err is None:
-            return "Result of assertion, no exception stacktrace available"
-        # trim the traceback part from the exc_info result tuple
-        _, value = err[:2]
-        return value
-
-    def __str__(self):
-        """Define the output to be user-friendly readable"""
-        # make some room to separate the output from the exception stacktrace
-        ret = "\n\n"
-        for msg, err in self.errors:
-            ret += "* {msg}:\n{err}\n\n".format(msg=msg, err=err)
-        return ret
-
-
-class ReleaseSanitizerRunner(object):
-    """Runner class that is to be called from release runner. It wraps up
-    the logic to interfere with both the ReleaseSanitizerTestSuite and the
-    ReleaseSanitizerResult. Upon successful run, errors in results should be
-    an empty list. Otherwise, the errors list can be retrieved and processed.
-    """
-    resultClass = ReleaseSanitizerResult
-    testSuite = ReleaseSanitizerTestSuite
-
-    def __init__(self, **kwargs):
-        self.kwargs = kwargs
-        self.result = self.resultClass()
-
-    def run(self):
-        """Main method to call for the actual test of release sanity"""
-        test_suite = self.testSuite(**self.kwargs)
-        log.info("Attempting to sanitize ...")
-        test_suite.sanitize(self.result)
-
-    def was_successful(self):
-        """Tells whether or not the result was a success"""
-        return len(self.result.errors) == 0
-
-    def get_errors(self):
-        """Retrieves the list of errors from the result objecti
-        in a nicely-formatted string
-        """
-        return self.result
new file mode 100644
--- /dev/null
+++ b/lib/python/kickoff/sanity/__init__.py
@@ -0,0 +1,8 @@
+"""
+Release sanity lib module in the new release promotion world.
+It's functionality is to replace the old way of doing it in
+http://hg.mozilla.org/build/tools/file/old-release-runner/buildbot-helpers/release_sanity.py
+
+Additionally, more checks are performed to cope with the new release promotion
+world regulations and constraints.
+"""
new file mode 100644
--- /dev/null
+++ b/lib/python/kickoff/sanity/base.py
@@ -0,0 +1,172 @@
+import site
+import logging
+from os import path
+
+import requests
+from util.hg import make_hg_url
+from util.retry import retry
+
+site.addsitedir(path.join(path.dirname(__file__), ".."))
+log = logging.getLogger(__name__)
+
+L10N_DASHBOARD_URL_TEMPLATE = "https://l10n.mozilla.org/shipping/l10n-changesets?ms={milestone}"
+
+
+def make_generic_head_request(page_url):
+    """Make generic HEAD request to check page existence"""
+    def _get():
+        req = requests.head(page_url, timeout=60)
+        req.raise_for_status()
+
+    retry(_get, attempts=5, sleeptime=1)
+
+
+def make_generic_get_request(page_url):
+    """Make generic GET request to retrieve some page content"""
+    def _get():
+        req = requests.get(page_url, timeout=60)
+        req.raise_for_status()
+        return req.content
+
+    return retry(_get, attempts=5, sleeptime=1)
+
+
+def make_hg_get_request(repo_path, revision,
+                        filename=None, hg_url='hg.mozilla.org'):
+    """Wrapper to make a GET request for a specific URI under hg repo"""
+    url = make_hg_url(hg_url, repo_path, revision=revision, filename=filename)
+    return make_generic_get_request(url)
+
+
+def is_candidate_release(channels):
+    """Determine if this is a candidate release or not
+
+    Because ship-it can not tell us if this is a candidate release (yet!),
+    we assume it is when we have determined, based on version,
+    that we are planning to ship to more than one update_channel
+    e.g. for candidate releases we have:
+     1) one channel to test the 'candidate' release with: 'beta' channel
+     2) once verified, we ship to the main channel: 'release' channel
+    """
+    return len(channels) > 1
+
+
+class SanityException(Exception):
+    """Should the release sanity process collect any errors, this
+    custom exception is to be thrown in release runner.
+    """
+    pass
+
+
+class OpsMixin(object):
+    """Helper class Mixin to enrich ReleaseSanitizerTestSuite behavior
+    """
+    def assertEqual(self, result, first, second, err_msg):
+        """Method inspired from unittest implementation
+        The :result is the aggregation object to collect all potential errors
+        """
+        if not first == second:
+            result.add_error(err_msg)
+
+
+class ReleaseSanitizerTestSuite(OpsMixin):
+    """Main release sanity class - the one to encompass all test methods and
+    all behavioral changes that need to be addressed. It is inspired by
+    the functionality of unittest module classes. It needs to be used
+    along with a ReleaseSanitizerResult object to aggregate all potential
+    exceptions.
+
+    Once instance needs to hold the task graph arguments that come from
+    Ship-it via release runner. Using the arguments, certain behaviors are
+    tested (e.g. partials, l10n, versions, config, etc)
+
+    To add more testing methods, please prefix the method with 'test_' in
+    order to have it run by sanitize() main method.
+    """
+    def __init__(self, **kwargs):
+        log.debug('Test suite kwargs', kwargs)
+        self.kwargs = kwargs
+        self.repo_path = self.kwargs['branch']
+        self.revision = self.kwargs['mozillaRevision']
+        # TODO be more consistent with branch names
+        self.branch = self.kwargs['branchShortName']
+
+    def sanitize(self, result):
+        """Main method to run all the sanity checks. It collects all the
+        methods prefixed with 'test_' and runs them accordingly.
+        It runs all the test and collects any potential errors in the :result
+        object.
+        """
+        test_methods = [m for m in filter(lambda k: k.startswith("test_"), dir(self))
+                        if callable(getattr(self, m))]
+        for test_method in test_methods:
+            log.debug("Calling testing method %s", test_method)
+            getattr(self, test_method)(result)
+
+
+class ReleaseSanitizerResult(object):
+    """Aggregate exceptions result-object like. It's passed down in all
+    ReleaseSanitizerTestSuite methods to collect all potential errors.
+    This is usefule to avoid incremenatal fixes in release sanity
+    """
+    def __init__(self):
+        self.errors = []
+
+    def add_error(self, err_msg, err=None):
+        """Method to collect a new errors. It collects the exception
+        stacktrace and stores the exception value along with the message"""
+        # each error consist of a tuple containing the error message and any
+        # other potential information we might get useful from the
+        # sys.exc_info(). If there is no such, explanatory string will be added
+        self.errors.append((err_msg, self._exc_info_to_string(err)))
+        # append an empty line after each exceptions to have a nicer output
+        log.info("Collecting a new exception: %s", err_msg)
+
+    def _exc_info_to_string(self, err):
+        if err is None:
+            return "Result of assertion, no exception stacktrace available"
+        # trim the traceback part from the exc_info result tuple
+        _, value = err[:2]
+        return value
+
+    def __str__(self):
+        """Define the output to be user-friendly readable"""
+        # make some room to separate the output from the exception stacktrace
+        ret = "\n\n"
+        for msg, err in self.errors:
+            ret += "* {msg}:\n{err}\n\n".format(msg=msg, err=err)
+        return ret
+
+
+class ReleaseSanitizerRunner(object):
+    """Runner class that is to be called from release runner. It wraps up
+    the logic to interfere with both the ReleaseSanitizerTestSuite and the
+    ReleaseSanitizerResult. Upon successful run, errors in results should be
+    an empty list. Otherwise, the errors list can be retrieved and processed.
+    """
+    resultClass = ReleaseSanitizerResult
+    testSuite = ReleaseSanitizerTestSuite
+
+    def __init__(self, **kwargs):
+        self.kwargs = kwargs
+        self.result = self.resultClass()
+
+    def run(self):
+        """Main method to call for the actual test of release sanity"""
+        test_suite = self.testSuite(**self.kwargs)
+        log.info("Attempting to sanitize ...")
+        test_suite.sanitize(self.result)
+
+        if not self.was_successful():
+            errors = self.get_errors()
+            raise SanityException("Issues on release sanity %s" % errors)
+
+    def was_successful(self):
+        """Tells whether or not the result was a success"""
+        return len(self.result.errors) == 0
+
+    def get_errors(self):
+        """Retrieves the list of errors from the result objecti
+        in a nicely-formatted string
+        """
+        return self.result
new file mode 100644
--- /dev/null
+++ b/lib/python/kickoff/sanity/l10n.py
@@ -0,0 +1,118 @@
+import sys
+import logging
+import requests
+import re
+
+from kickoff.sanity.base import ReleaseSanitizerTestSuite, ReleaseSanitizerRunner, make_generic_get_request, \
+                                make_generic_head_request, make_hg_get_request
+
+log = logging.getLogger(__name__)
+
+SINGLE_LOCALE_CONFIG_URI_TEMPLATE = "testing/mozharness/configs/single_locale/{branch}.py"
+LOCALE_BASE_URL_TEMPLATE = "{hg_l10n_base}/{locale}/raw-file/{revision}"
+SHIPPED_LOCALES_CONFIG_URI = "browser/locales/shipped-locales"
+
+
+def get_single_locale_config(repo_path, revision, branch):
+    """Get single locale from remote mh configs
+    Example for mozilla-beta, random revision:
+
+    >>>
+    config = {
+        "nightly_build": True,
+        "branch": "mozilla-beta",
+        ...
+        # l10n
+        "hg_l10n_base": "https://hg.mozilla.org/releases/l10n/mozilla-beta",
+        # repositories
+        "mozilla_dir": "mozilla-beta",
+        'purge_minsize': 12,
+        'is_automation': True,
+        ...
+    }
+    """
+    filename = SINGLE_LOCALE_CONFIG_URI_TEMPLATE.format(branch=branch)
+    return make_hg_get_request(repo_path, revision, filename=filename)
+
+
+class L10nTestSuite(ReleaseSanitizerTestSuite):
+
+    def __init__(self, **kwargs):
+        ReleaseSanitizerTestSuite.__init__(self, **kwargs)
+        self.locales = self.kwargs['l10n_changesets']
+
+    def test_l10n_verify_changesets(self, result):
+        """test_l10n method
+        Tests if the l10n changesets (locale, revision) are actually valid.
+        It does a validity check on each of the locales revision. In order
+        to query that particular l10n release url, the single locale
+        config file from mozharness is grabbed first.
+        """
+        log.info("Testing current l10n changesets ...")
+        try:
+            ret = get_single_locale_config(self.repo_path,
+                                           self.revision,
+                                           self.branch).strip()
+        except requests.HTTPError as err:
+            err_msg = ("Failed to retrieve single locale config file for"
+                       " {path}, revision {rev}. URL: {url}").format(
+                           path=self.repo_path,
+                           rev=self.revision,
+                           branch=self.branch,
+                           url=err.request.url)
+            result.add_error(err_msg, sys.exc_info())
+            return None
+
+        locals_dict = dict()
+        exec(ret, {}, locals_dict)
+        single_locale_config = locals_dict.get('config')
+
+        for locale in sorted(self.locales.keys()):
+            revision = self.locales[locale]
+            locale_url = LOCALE_BASE_URL_TEMPLATE.format(
+                hg_l10n_base=single_locale_config["hg_l10n_base"].strip('/'),
+                locale=locale,
+                revision=revision
+            )
+
+            try:
+                make_generic_head_request(locale_url)
+            except requests.HTTPError:
+                err_msg = "Locale {locale} not found".format(locale=locale_url)
+                result.add_error(err_msg, sys.exc_info())
+
+    def test_l10n_shipped_locales(self, result):
+        """test_l10n method
+        Tests if the current locales coming from release runner are in fact
+        the same as the shipped locales.
+        """
+        log.info("Testing l10n shipped locales ...")
+        try:
+            # TODO: mind that we will need something similar for Fennec
+            ret = make_hg_get_request(self.repo_path, self.revision,
+                                      filename=SHIPPED_LOCALES_CONFIG_URI).strip()
+        except requests.HTTPError as err:
+            err_msg = ("Shipped locale file not found in {path} repo under rev"
+                       " {revision}. URL: {url}").format(
+                           path=self.repo_path,
+                           revision=self.revision,
+                           url=err.request.url)
+            result.add_error(err_msg, sys.exc_info())
+            return
+
+        shipped_l10n = set([l.split()[0] for l in ret.splitlines()])
+        current_l10n = set(self.locales.keys())
+
+        err_msg = "Current l10n changesets and shipped locales differ!"
+        # we have en-US in shipped locales, but not in l10n changesets, because
+        # there is no en-US repo
+        self.assertEqual(result, shipped_l10n.difference(current_l10n),
+                         set(['en-US']),
+                         err_msg)
+        self.assertEqual(result, current_l10n.difference(shipped_l10n),
+                         set([]),
+                         err_msg)
+
+
+class L10nSanitizer(ReleaseSanitizerRunner):
+    testSuite = L10nTestSuite
new file mode 100644
--- /dev/null
+++ b/lib/python/kickoff/sanity/partials.py
@@ -0,0 +1,112 @@
+import sys
+import logging
+import requests
+
+from kickoff.sanity.base import ReleaseSanitizerTestSuite, ReleaseSanitizerRunner, make_generic_get_request, \
+                                is_candidate_release
+from kickoff import matches
+
+log = logging.getLogger(__name__)
+
+
+CANDIDATES_SHA512_URL_TEMPLATE = "https://archive.mozilla.org/pub/firefox/candidates/" \
+                                 "{version}-candidates/build{build_number}/SHA512SUMS"
+RELEASES_SHA512_URL_TEMPLATE = "https://archive.mozilla.org/pub/firefox/releases/{version}/SHA512SUMS"
+BETA_PATTERNS = [r"\d+\.0b\d+"]
+
+
+class PartialsTestSuite(ReleaseSanitizerTestSuite):
+    def __init__(self, **kwargs):
+        ReleaseSanitizerTestSuite.__init__(self, **kwargs)
+        self.partial_updates = self.kwargs['partial_updates']
+
+    def test_partials_validity(self, result):
+        """test_partials method
+        Tests some validity checks against the partials. It goes over the list
+        of specified partials and performs some tests:
+            1. Firstly, checks the partial version has a corresponding
+            (version, buildnumer) under the candidates directory on S3.
+            In order to perform this check, rather than checking the complete
+            mar (CMAR) files, we check the SHA512 checksums file. Upon a
+            successful release build, the checksums file is the last one
+            to be generated since it contains all the hashes for all the files.
+            Therefore, if the checksums file exist, it means all the other files
+            made it through the candidates directory (including CMAR)
+
+            2. Secondly, checks if the partial versions has a corresponding
+            of that specific version under releases directory on S3.
+            The first check ensured that we had a release build that made it
+            through the candidates directory, now we need to checck if we
+            actually had a successful ship for that particular partial version.
+            For that, we follow the same logic as above by checking the SHA512
+            checksums under the releases directory. If it's there, it means we
+            have successfully shipped. If something went wrong, we'll hit an
+            error.
+
+            3. Ultimately it makes sure the partial version build from
+            candidates is actually the same that shipped under releases.
+            This check prevents one possible fail-scenario in which the build
+            under canidates is good and valid, but a follow-up build was
+            actually shipped under releases. Since shipping to releases
+            implies an actual copy of the files, for that particular reason we
+            make sure that SHA512 checksums of the build under candidates is
+            bitwise the same as the one from releases.
+        """
+        log.info("Testing partials validity ...")
+
+        def grab_partial_sha(url):
+            """Quick helper function to grab a SHA512 file"""
+            sha_sum = None
+            try:
+                sha_sum = make_generic_get_request(url).strip()
+            except requests.HTTPError:
+                err_msg = "Broken build - hash {url} not found".format(url=url)
+                result.add_error(err_msg, sys.exc_info())
+
+            return sha_sum
+
+        for pversion, info in self.kwargs["partial_updates"].iteritems():
+            buildno = info["buildNumber"]
+
+            # make sure partial is valid and shipped correctly to /candidates
+            _url = CANDIDATES_SHA512_URL_TEMPLATE.format(version=pversion,
+                                                         build_number=buildno)
+            candidate_sha = grab_partial_sha(_url)
+
+            # make sure partial has a shipped release under /releases
+            _url = RELEASES_SHA512_URL_TEMPLATE.format(version=pversion)
+            releases_sha = grab_partial_sha(_url)
+
+            err_msg = ("{version}-build{build_number} is a good candidate"
+                       " build, but not the one we shipped! URL: {url}").format(
+                           version=pversion,
+                           build_number=buildno,
+                           url=_url)
+            self.assertEqual(result, releases_sha, candidate_sha, err_msg)
+
+    def test_partials_release_candidate_validity(self, result):
+        """test_partials method
+        Tests if a RC contains both beta and release in list of partials.
+        We hit this issue in bug 1265579 in which the updates builder failed
+        if partials were all-beta OR if no-beta at all
+        """
+        log.info("Testing RC partials ...")
+        if not is_candidate_release(self.kwargs["release_channels"]):
+            log.info("Skipping this test as we're not dealing with a RC now")
+            return
+
+        ret = [matches(name, BETA_PATTERNS) for name in self.partial_updates]
+        at_least_one_beta = any(ret)
+        all_betas = all(ret) and ret != []
+
+        partials = ["{name}".format(name=p) for p in self.partial_updates]
+        err_msg = "No beta found in the RC list of partials: {l}".format(l=partials)
+        self.assertEqual(result, at_least_one_beta, True, err_msg)
+
+        err_msg = ("All partials in the RC list are betas. At least a non-beta"
+                   " release is needed in {l}").format(l=partials)
+        self.assertEqual(result, all_betas, False, err_msg)
+
+
+class PartialsSanitizer(ReleaseSanitizerRunner):
+    testSuite = PartialsTestSuite
new file mode 100644
--- /dev/null
+++ b/lib/python/kickoff/sanity/revisions.py
@@ -0,0 +1,58 @@
+import sys
+import logging
+import requests
+
+from kickoff.sanity.base import ReleaseSanitizerTestSuite, ReleaseSanitizerRunner, make_generic_get_request, \
+                                make_generic_head_request, make_hg_get_request
+
+log = logging.getLogger(__name__)
+
+VERSION_DISPLAY_CONFIG_URI = "browser/config/version_display.txt"
+
+
+class RevisionsTestSuite(ReleaseSanitizerTestSuite):
+    def __init__(self, **kwargs):
+        ReleaseSanitizerTestSuite.__init__(self, **kwargs)
+        self.version = self.kwargs['version']
+
+    def test_versions_repo_and_revision_check(self, result):
+        """test_versions method
+        Tests if the indicated branch and revision repo exists
+        """
+        log.info("Testing repo and revision in tree ...")
+        try:
+            make_hg_get_request(self.repo_path, self.revision).strip()
+        except requests.HTTPError as err:
+            err_msg = "{path} repo does not exist with {rev} revision. URL: {url}".format(
+                path=self.repo_path, rev=self.revision, url=err.request.url)
+            result.add_error(err_msg, sys.exc_info())
+
+    def test_versions_display_validation_in_tree(self, result):
+        """test_versions method
+        Tests if the upstream display version exists and if it is the same
+        with the current one coming from release runner
+        """
+        log.info("Testing version display validation in tree ...")
+        # esr-hack: ensure trimming the suffix before comparing
+        version = self.version.replace("esr", "")
+
+        try:
+            display_version = make_hg_get_request(self.repo_path, self.revision,
+                                                  filename=VERSION_DISPLAY_CONFIG_URI).strip()
+        except requests.HTTPError as err:
+            err_msg = ("display_version config file not found in {path} under"
+                       " {rev} revision. URL: {url}").format(
+                           path=self.repo_path,
+                           rev=self.revision,
+                           url=err.request.url)
+            result.add_error(err_msg, sys.exc_info())
+            return
+
+        err_msg = ("In-tree display version {tree_version} doesn't "
+                   "match ship-it version {version}").format(
+                       tree_version=display_version, version=version)
+        self.assertEqual(result, version, display_version, err_msg)
+
+
+class RevisionsSanitizer(ReleaseSanitizerRunner):
+    testSuite = RevisionsTestSuite