--- a/hgext/robustcheckout/__init__.py
+++ b/hgext/robustcheckout/__init__.py
@@ -9,16 +9,17 @@ from a source repo using best practices
times and storage efficiency.
"""
from __future__ import absolute_import
import contextlib
import errno
import functools
+import json
import os
import random
import re
import socket
import ssl
import time
import urllib2
@@ -245,32 +246,76 @@ def robustcheckout(ui, url, dest, upstre
# We make the config change here instead of in a config file because
# otherwise we're at the whim of whatever configs are used in automation.
ui.setconfig('progress', 'delay', 1.0)
ui.setconfig('progress', 'refresh', 1.0)
ui.setconfig('progress', 'assume-tty', True)
sharebase = os.path.realpath(sharebase)
- return _docheckout(ui, url, dest, upstream, revision, branch, purge,
- sharebase, networkattempts,
- sparse_profile=sparseprofile)
+ optimes = []
+ start = time.time()
+
+ try:
+ return _docheckout(ui, url, dest, upstream, revision, branch, purge,
+ sharebase, optimes, networkattempts,
+ sparse_profile=sparseprofile)
+ finally:
+ overall = time.time() - start
+ optimes.append(('overall', overall))
+ if 'TASKCLUSTER_INSTANCE_TYPE' in os.environ:
+ perfherder = {
+ 'framework': {
+ 'name': 'vcs',
+ },
+ 'suites': [],
+ }
+ for op, duration in optimes:
+ perfherder['suites'].append({
+ 'name': op,
+ 'value': duration,
+ 'lowerIsBetter': True,
+ 'shouldAlert': False,
+ 'extraOptions': [os.environ['TASKCLUSTER_INSTANCE_TYPE']],
+ 'subtests': [],
+ })
+
+ ui.write('PERFHERDER_DATA: %s\n' % json.dumps(perfherder,
+ sort_keys=True))
def _docheckout(ui, url, dest, upstream, revision, branch, purge, sharebase,
- networkattemptlimit, networkattempts=None, sparse_profile=None):
+ optimes, networkattemptlimit, networkattempts=None,
+ sparse_profile=None):
if not networkattempts:
networkattempts = [1]
def callself():
return _docheckout(ui, url, dest, upstream, revision, branch, purge,
- sharebase, networkattemptlimit,
+ sharebase, optimes, networkattemptlimit,
networkattempts=networkattempts,
sparse_profile=sparse_profile)
+ @contextlib.contextmanager
+ def timeit(op):
+ errored = False
+ try:
+ start = time.time()
+ yield
+ except Exception:
+ errored = True
+ raise
+ finally:
+ elapsed = time.time() - start
+
+ if errored:
+ op += '_errored'
+
+ optimes.append((op, elapsed))
+
ui.write('ensuring %s@%s is available at %s\n' % (url, revision or branch,
dest))
# We assume that we're the only process on the machine touching the
# repository paths that we were told to use. This means our recovery
# scenario when things aren't "right" is to just nuke things and start
# from scratch. This is easier to implement than verifying the state
# of the data and attempting recovery. And in some scenarios (such as
@@ -302,46 +347,51 @@ def _docheckout(ui, url, dest, upstream,
if not sparse_profile and destvfs.exists('.hg/sparse'):
raise error.Abort('cannot use non-sparse checkout on existing sparse '
'checkout',
hint='use a separate working directory to use sparse')
# Require checkouts to be tied to shared storage because efficiency.
if destvfs.exists('.hg') and not destvfs.exists('.hg/sharedpath'):
ui.warn('(destination is not shared; deleting)\n')
- destvfs.rmtree(forcibly=True)
+ with timeit('remove_unshared_dest'):
+ destvfs.rmtree(forcibly=True)
# Verify the shared path exists and is using modern pooled storage.
if destvfs.exists('.hg/sharedpath'):
storepath = destvfs.read('.hg/sharedpath').strip()
ui.write('(existing repository shared store: %s)\n' % storepath)
if not os.path.exists(storepath):
ui.warn('(shared store does not exist; deleting destination)\n')
- destvfs.rmtree(forcibly=True)
+ with timeit('removed_missing_shared_store'):
+ destvfs.rmtree(forcibly=True)
elif not re.search('[a-f0-9]{40}/\.hg$', storepath.replace('\\', '/')):
ui.warn('(shared store does not belong to pooled storage; '
'deleting destination to improve efficiency)\n')
- destvfs.rmtree(forcibly=True)
+ with timeit('remove_unpooled_store'):
+ destvfs.rmtree(forcibly=True)
if destvfs.isfileorlink('.hg/wlock'):
ui.warn('(dest has an active working directory lock; assuming it is '
'left over from a previous process and that the destination '
'is corrupt; deleting it just to be sure)\n')
- destvfs.rmtree(forcibly=True)
+ with timeit('remove_locked_wdir'):
+ destvfs.rmtree(forcibly=True)
def handlerepoerror(e):
if e.message == _('abandoned transaction found'):
ui.warn('(abandoned transaction found; trying to recover)\n')
repo = hg.repository(ui, dest)
if not repo.recover():
ui.warn('(could not recover repo state; '
'deleting shared store)\n')
- deletesharedstore()
+ with timeit('remove_unrecovered_shared_store'):
+ deletesharedstore()
ui.warn('(attempting checkout from beginning)\n')
return callself()
raise
# At this point we either have an existing working directory using
# shared, pooled storage or we have nothing.
@@ -430,40 +480,49 @@ def _docheckout(ui, url, dest, upstream,
storevfs = getvfs()(storepath, audit=False)
if storevfs.isfileorlink('.hg/store/lock'):
ui.warn('(shared store has an active lock; assuming it is left '
'over from a previous process and that the store is '
'corrupt; deleting store and destination just to be '
'sure)\n')
if destvfs.exists():
- destvfs.rmtree(forcibly=True)
- storevfs.rmtree(forcibly=True)
+ with timeit('remove_dest_active_lock'):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit('remove_shared_store_active_lock'):
+ storevfs.rmtree(forcibly=True)
if storevfs.exists() and not storevfs.exists('.hg/requires'):
ui.warn('(shared store missing requires file; this is a really '
'odd failure; deleting store and destination)\n')
if destvfs.exists():
- destvfs.rmtree(forcibly=True)
- storevfs.rmtree(forcibly=True)
+ with timeit('remove_dest_no_requires'):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit('remove_shared_store_no_requires'):
+ storevfs.rmtree(forcibly=True)
if storevfs.exists('.hg/requires'):
requires = set(storevfs.read('.hg/requires').splitlines())
# FUTURE when we require generaldelta, this is where we can check
# for that.
required = {'dotencode', 'fncache'}
missing = required - requires
if missing:
ui.warn('(shared store missing requirements: %s; deleting '
'store and destination to ensure optimal behavior)\n' %
', '.join(sorted(missing)))
if destvfs.exists():
- destvfs.rmtree(forcibly=True)
- storevfs.rmtree(forcibly=True)
+ with timeit('remove_dest_missing_requires'):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit('remove_shared_store_missing_requires'):
+ storevfs.rmtree(forcibly=True)
created = False
if not destvfs.exists():
# Ensure parent directories of destination exist.
# Mercurial 3.8 removed ensuredirs and made makedirs race safe.
if util.safehasattr(util, 'ensuredirs'):
makedirs = util.ensuredirs
@@ -472,27 +531,30 @@ def _docheckout(ui, url, dest, upstream,
makedirs(os.path.dirname(destvfs.base), notindexed=True)
makedirs(sharebase, notindexed=True)
if upstream:
ui.write('(cloning from upstream repo %s)\n' % upstream)
try:
- res = hg.clone(ui, {}, clonepeer, dest=dest, update=False,
- shareopts={'pool': sharebase, 'mode': 'identity'})
+ with timeit('clone'):
+ shareopts = {'pool': sharebase, 'mode': 'identity'}
+ res = hg.clone(ui, {}, clonepeer, dest=dest, update=False,
+ shareopts=shareopts)
except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
if handlepullerror(e):
return callself()
raise
except error.RepoError as e:
return handlerepoerror(e)
except error.RevlogError as e:
ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
- deletesharedstore()
+ with timeit('remove_shared_store_revlogerror'):
+ deletesharedstore()
return callself()
# TODO retry here.
if res is None:
raise error.Abort('clone failed')
# Verify it is using shared pool storage.
if not destvfs.exists('.hg/sharedpath'):
@@ -530,19 +592,20 @@ def _docheckout(ui, url, dest, upstream,
if branch:
ui.warn('(remote resolved %s to %s; '
'result is not deterministic)\n' %
(branch, checkoutrevision))
if checkoutrevision in repo:
ui.warn('(revision already present locally; not pulling)\n')
else:
- pullop = exchange.pull(repo, remote, heads=pullrevs)
- if not pullop.rheads:
- raise error.Abort('unable to pull requested revision')
+ with timeit('pull'):
+ pullop = exchange.pull(repo, remote, heads=pullrevs)
+ if not pullop.rheads:
+ raise error.Abort('unable to pull requested revision')
except (error.Abort, ssl.SSLError, urllib2.URLError) as e:
if handlepullerror(e):
return callself()
raise
except error.RepoError as e:
return handlerepoerror(e)
except error.RevlogError as e:
ui.warn('(repo corruption: %s; deleting shared store)\n' % e.message)
@@ -565,22 +628,25 @@ def _docheckout(ui, url, dest, upstream,
# See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
# purging by monkeypatching the sparse matcher.
try:
old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
if old_sparse_fn is not None:
assert util.versiontuple(n=2) in ((4, 3), (4, 4), (4, 5))
repo.dirstate._sparsematchfn = lambda: matchmod.always(repo.root, '')
- if purgeext.purge(ui, repo, all=True, abort_on_err=True,
- # The function expects all arguments to be
- # defined.
- **{'print': None, 'print0': None, 'dirs': None,
- 'files': None}):
- raise error.Abort('error purging')
+ with timeit('purge'):
+ if purgeext.purge(ui, repo, all=True, abort_on_err=True,
+ # The function expects all arguments to be
+ # defined.
+ **{'print': None,
+ 'print0': None,
+ 'dirs': None,
+ 'files': None}):
+ raise error.Abort('error purging')
finally:
if old_sparse_fn is not None:
repo.dirstate._sparsematchfn = old_sparse_fn
# Update the working directory.
if sparse_profile:
sparsemod = getsparse()
@@ -607,27 +673,30 @@ def _docheckout(ui, url, dest, upstream,
else:
ui.write('(setting sparse config to profile %s)\n' %
sparse_profile)
# If doing an incremental update, this will perform two updates:
# one to change the sparse profile and another to update to the new
# revision. This is not desired. But there's not a good API in
# Mercurial to do this as one operation.
- with repo.wlock():
+ with repo.wlock(), timeit('sparse_update_config'):
fcounts = map(len, sparsemod._updateconfigandrefreshwdir(
repo, [], [], [sparse_profile], force=True))
repo.ui.status('%d files added, %d files dropped, '
'%d files conflicting\n' % tuple(fcounts))
ui.write('(sparse refresh complete)\n')
- if commands.update(ui, repo, rev=checkoutrevision, clean=True):
- raise error.Abort('error updating')
+ op = 'update_sparse' if sparse_profile else 'update'
+
+ with timeit(op):
+ if commands.update(ui, repo, rev=checkoutrevision, clean=True):
+ raise error.Abort('error updating')
ui.write('updated to %s\n' % checkoutrevision)
return None
def extsetup(ui):
# Ensure required extensions are loaded.
for ext in ('purge', 'share'):