Bug 1286075: make redo logging prettier; r?mshal
This adds a "name" to avoid the ugly logging of tuple and dict reprs in the
info messages
MozReview-Commit-ID: 5xBjcLhJJRr
--- a/python/redo/redo/__init__.py
+++ b/python/redo/redo/__init__.py
@@ -70,17 +70,17 @@ def retrier(attempts=5, sleeptime=10, ma
time.sleep(sleeptime)
sleeptime *= sleepscale
if sleeptime > max_sleeptime:
sleeptime = max_sleeptime
def retry(action, attempts=5, sleeptime=60, max_sleeptime=5 * 60,
sleepscale=1.5, jitter=1, retry_exceptions=(Exception,),
- cleanup=None, args=(), kwargs={}):
+ cleanup=None, args=(), kwargs={}, name=None):
"""
Calls an action function until it succeeds, or we give up.
Args:
action (callable): the function to retry
attempts (int): maximum number of times to try; defaults to 5
sleeptime (float): how many seconds to sleep between tries; defaults to
60s (one minute)
@@ -95,17 +95,18 @@ def retry(action, attempts=5, sleeptime=
exceptions are raised by action(), then these
are immediately re-raised to the caller.
cleanup (callable): optional; called if one of `retry_exceptions` is
caught. No arguments are passed to the cleanup
function; if your cleanup requires arguments,
consider using functools.partial or a lambda
function.
args (tuple): positional arguments to call `action` with
- hwargs (dict): keyword arguments to call `action` with
+ kwargs (dict): keyword arguments to call `action` with
+ name: name of this call (for info-level logging)
Returns:
Whatever action(*args, **kwargs) returns
Raises:
Whatever action(*args, **kwargs) raises. `retry_exceptions` are caught
up until the last attempt, in which case they are re-raised.
@@ -125,30 +126,32 @@ def retry(action, attempts=5, sleeptime=
'success!'
"""
assert callable(action)
assert not cleanup or callable(cleanup)
if max_sleeptime < sleeptime:
log.debug("max_sleeptime %d less than sleeptime %d" % (
max_sleeptime, sleeptime))
+ if not name:
+ name = "%s with args: %s, kwargs: %s" % (action, str(args), str(kwargs))
+
n = 1
for _ in retrier(attempts=attempts, sleeptime=sleeptime,
max_sleeptime=max_sleeptime, sleepscale=sleepscale,
jitter=jitter):
try:
- log.info("retry: Calling %s with args: %s, kwargs: %s, "
- "attempt #%d" % (action, str(args), str(kwargs), n))
+ log.info("retry: Calling %s, attempt #%d" % (name, n))
return action(*args, **kwargs)
except retry_exceptions:
log.debug("retry: Caught exception: ", exc_info=True)
if cleanup:
cleanup()
if n == attempts:
- log.info("retry: Giving up on %s" % action)
+ log.info("retry: Giving up on %s" % name)
raise
continue
finally:
n += 1
def retriable(*retry_args, **retry_kwargs):
"""
--- a/taskcluster/taskgraph/files_changed.py
+++ b/taskcluster/taskgraph/files_changed.py
@@ -20,17 +20,18 @@ def get_changed_files(repository, revisi
"""
Get the set of files changed in the push headed by the given revision.
Responses are cached, so multiple calls with the same arguments are OK.
"""
if (repository, revision) not in _cache:
url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
logger.debug("Querying version control for metadata: %s", url)
response = retry(requests.get, attempts=2, sleeptime=10,
- args=(url,), kwargs={'timeout': 5})
+ args=(url,), kwargs={'timeout': 5},
+ name=url)
contents = response.json()
logger.debug('{} commits influencing task scheduling:'
.format(len(contents['changesets'])))
changed_files = set()
for c in contents['changesets']:
logger.debug(" {cset} {desc}".format(
cset=c['node'][0:12],