--- a/testing/mozbase/docs/_static/structured_example.py
+++ b/testing/mozbase/docs/_static/structured_example.py
@@ -1,51 +1,60 @@
import argparse
import sys
import traceback
import types
from mozlog import commandline, get_default_logger
+
class TestAssertion(Exception):
pass
+
def assert_equals(a, b):
if a != b:
raise TestAssertion("%r not equal to %r" % (a, b))
+
def expected(status):
def inner(f):
def test_func():
f()
test_func.__name__ = f.__name__
test_func._expected = status
return test_func
return inner
+
def test_that_passes():
assert_equals(1, int("1"))
+
def test_that_fails():
assert_equals(1, int("2"))
+
def test_that_has_an_error():
assert_equals(2, 1 + "1")
+
@expected("FAIL")
def test_expected_fail():
assert_equals(2 + 2, 5)
+
class TestRunner(object):
+
def __init__(self):
self.logger = get_default_logger(component='TestRunner')
def gather_tests(self):
for item in globals().itervalues():
- if type(item) == types.FunctionType and item.__name__.startswith("test_"):
+ if isinstance(item, types.FunctionType) and item.__name__.startswith("test_"):
yield item.__name__, item
def run(self):
tests = list(self.gather_tests())
self.logger.suite_start(tests=[name for name, func in tests])
self.logger.info("Running tests")
for name, func in tests:
@@ -64,20 +73,22 @@ class TestRunner(object):
message = e.message
except:
status = "ERROR"
message = traceback.format_exc()
else:
status = "PASS"
self.logger.test_end(name, status=status, expected=expected, message=message)
+
def get_parser():
parser = argparse.ArgumentParser()
return parser
+
def main():
parser = get_parser()
commandline.add_logging_group(parser)
args = parser.parse_args()
logger = commandline.setup_logging("structured-example", args, {"raw": sys.stdout})
--- a/testing/mozbase/docs/conf.py
+++ b/testing/mozbase/docs/conf.py
@@ -6,46 +6,47 @@
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys, os
+import sys
+import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
here = os.path.dirname(os.path.abspath(__file__))
parent = os.path.dirname(here)
for item in os.listdir(parent):
path = os.path.join(parent, item)
if (not os.path.isdir(path)) or (not os.path.exists(os.path.join(path, 'setup.py'))):
continue
sys.path.insert(0, path)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MozBase'
copyright = u'2012, Mozilla Automation and Tools team'
@@ -55,47 +56,47 @@ copyright = u'2012, Mozilla Automation a
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
-#language = None
+# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
@@ -106,152 +107,152 @@ if not on_rtd:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "mozbase documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MozBasedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+ # Additional stuff for the LaTeX preamble.
+ # 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'MozBase.tex', u'MozBase Documentation',
- u'Mozilla Automation and Tools team', 'manual'),
+ ('index', 'MozBase.tex', u'MozBase Documentation',
+ u'Mozilla Automation and Tools team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# If true, show page references after internal links.
-#latex_show_pagerefs = False
+# latex_show_pagerefs = False
# If true, show URL addresses after external links.
-#latex_show_urls = False
+# latex_show_urls = False
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_domain_indices = True
+# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mozbase', u'MozBase Documentation',
[u'Mozilla Automation and Tools team'], 1)
]
# If true, show URL addresses after external links.
-#man_show_urls = False
+# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'MozBase', u'MozBase Documentation',
- u'Mozilla Automation and Tools team', 'MozBase', 'One line description of project.',
- 'Miscellaneous'),
+ ('index', 'MozBase', u'MozBase Documentation',
+ u'Mozilla Automation and Tools team', 'MozBase', 'One line description of project.',
+ 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
+# texinfo_appendices = []
# If false, no module index is generated.
-#texinfo_domain_indices = True
+# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
+# texinfo_show_urls = 'footnote'
--- a/testing/mozbase/manifestparser/manifestparser/__init__.py
+++ b/testing/mozbase/manifestparser/manifestparser/__init__.py
@@ -1,7 +1,8 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .manifestparser import *
from .expression import *
from .ini import *
--- a/testing/mozbase/manifestparser/manifestparser/cli.py
+++ b/testing/mozbase/manifestparser/manifestparser/cli.py
@@ -13,17 +13,18 @@ import sys
from .manifestparser import (
convert,
ManifestParser,
)
class ParserError(Exception):
- """error for exceptions while parsing the command line"""
+ """error for exceptions while parsing the command line"""
+
def parse_args(_args):
"""
parse and return:
--keys=value (or --key value)
-tags
args
"""
@@ -56,46 +57,51 @@ def parse_args(_args):
if key:
_dict[key] = arg
continue
args.append(arg)
# return values
return (_dict, tags, args)
+
class CLICommand(object):
usage = '%prog [options] command'
+
def __init__(self, parser):
- self._parser = parser # master parser
+ self._parser = parser # master parser
+
def parser(self):
- return OptionParser(usage=self.usage, description=self.__doc__,
- add_help_option=False)
+ return OptionParser(usage=self.usage, description=self.__doc__,
+ add_help_option=False)
+
class Copy(CLICommand):
usage = '%prog [options] copy manifest directory -tag1 -tag2 --key1=value1 --key2=value2 ...'
+
def __call__(self, options, args):
- # parse the arguments
- try:
- kwargs, tags, args = parse_args(args)
- except ParserError, e:
- self._parser.error(e.message)
+ # parse the arguments
+ try:
+ kwargs, tags, args = parse_args(args)
+ except ParserError, e:
+ self._parser.error(e.message)
- # make sure we have some manifests, otherwise it will
- # be quite boring
- if not len(args) == 2:
- HelpCLI(self._parser)(options, ['copy'])
- return
+ # make sure we have some manifests, otherwise it will
+ # be quite boring
+ if not len(args) == 2:
+ HelpCLI(self._parser)(options, ['copy'])
+ return
- # read the manifests
- # TODO: should probably ensure these exist here
- manifests = ManifestParser()
- manifests.read(args[0])
+ # read the manifests
+ # TODO: should probably ensure these exist here
+ manifests = ManifestParser()
+ manifests.read(args[0])
- # print the resultant query
- manifests.copy(args[1], None, *tags, **kwargs)
+ # print the resultant query
+ manifests.copy(args[1], None, *tags, **kwargs)
class CreateCLI(CLICommand):
"""
create a manifest from a list of directories
"""
usage = '%prog [options] create directory <directory> <...>'
@@ -129,16 +135,17 @@ class CreateCLI(CLICommand):
print manifest
class WriteCLI(CLICommand):
"""
write a manifest based on a query
"""
usage = '%prog [options] write manifest <manifest> -tag1 -tag2 --key1=value1 --key2=value2 ...'
+
def __call__(self, options, args):
# parse the arguments
try:
kwargs, tags, args = parse_args(args)
except ParserError, e:
self._parser.error(e.message)
@@ -152,32 +159,32 @@ class WriteCLI(CLICommand):
# TODO: should probably ensure these exist here
manifests = ManifestParser()
manifests.read(*args)
# print the resultant query
manifests.write(global_tags=tags, global_kwargs=kwargs)
-
class HelpCLI(CLICommand):
"""
get help on a command
"""
usage = '%prog [options] help [command]'
def __call__(self, options, args):
if len(args) == 1 and args[0] in commands:
commands[args[0]](self._parser).parser().print_help()
else:
self._parser.print_help()
print '\nCommands:'
for command in sorted(commands):
print ' %s : %s' % (command, commands[command].__doc__.strip())
+
class UpdateCLI(CLICommand):
"""
update the tests as listed in a manifest from a directory
"""
usage = '%prog [options] update manifest directory -tag1 -tag2 --key1=value1 --key2=value2 ...'
def __call__(self, options, args):
# parse the arguments
@@ -197,20 +204,21 @@ class UpdateCLI(CLICommand):
manifests = ManifestParser()
manifests.read(args[0])
# print the resultant query
manifests.update(args[1], None, *tags, **kwargs)
# command -> class mapping
-commands = { 'create': CreateCLI,
- 'help': HelpCLI,
- 'update': UpdateCLI,
- 'write': WriteCLI }
+commands = {'create': CreateCLI,
+ 'help': HelpCLI,
+ 'update': UpdateCLI,
+ 'write': WriteCLI}
+
def main(args=sys.argv[1:]):
"""console_script entry point"""
# set up an option parser
usage = '%prog [options] [command] ...'
description = "%s. Use `help` to display commands" % __doc__.strip()
parser = OptionParser(usage=usage, description=description)
@@ -223,15 +231,16 @@ def main(args=sys.argv[1:]):
if not args:
HelpCLI(parser)(options, args)
parser.exit()
# get the command
command = args[0]
if command not in commands:
- parser.error("Command must be one of %s (you gave '%s')" % (', '.join(sorted(commands.keys())), command))
+ parser.error("Command must be one of %s (you gave '%s')" %
+ (', '.join(sorted(commands.keys())), command))
handler = commands[command](parser)
handler(options, args[1:])
if __name__ == '__main__':
main()
--- a/testing/mozbase/manifestparser/manifestparser/expression.py
+++ b/testing/mozbase/manifestparser/manifestparser/expression.py
@@ -40,119 +40,155 @@ import traceback
# argument.
# Glossary (see above URL for details):
# - nud: null denotation
# - led: left detonation
# - lbp: left binding power
# - rbp: right binding power
+
class ident_token(object):
+
def __init__(self, scanner, value):
self.value = value
+
def nud(self, parser):
# identifiers take their value from the value mappings passed
# to the parser
return parser.value(self.value)
+
class literal_token(object):
+
def __init__(self, scanner, value):
self.value = value
+
def nud(self, parser):
return self.value
+
class eq_op_token(object):
"=="
+
def led(self, parser, left):
return left == parser.expression(self.lbp)
+
class neq_op_token(object):
"!="
+
def led(self, parser, left):
return left != parser.expression(self.lbp)
+
class lt_op_token(object):
"<"
+
def led(self, parser, left):
return left < parser.expression(self.lbp)
+
class gt_op_token(object):
">"
+
def led(self, parser, left):
return left > parser.expression(self.lbp)
+
class le_op_token(object):
"<="
+
def led(self, parser, left):
return left <= parser.expression(self.lbp)
+
class ge_op_token(object):
">="
+
def led(self, parser, left):
return left >= parser.expression(self.lbp)
+
class not_op_token(object):
"!"
+
def nud(self, parser):
return not parser.expression(100)
+
class and_op_token(object):
"&&"
+
def led(self, parser, left):
right = parser.expression(self.lbp)
return left and right
+
class or_op_token(object):
"||"
+
def led(self, parser, left):
right = parser.expression(self.lbp)
return left or right
+
class lparen_token(object):
"("
+
def nud(self, parser):
expr = parser.expression()
parser.advance(rparen_token)
return expr
+
class rparen_token(object):
")"
+
class end_token(object):
"""always ends parsing"""
-### derived literal tokens
+# derived literal tokens
+
class bool_token(literal_token):
+
def __init__(self, scanner, value):
- value = {'true':True, 'false':False}[value]
+ value = {'true': True, 'false': False}[value]
literal_token.__init__(self, scanner, value)
+
class int_token(literal_token):
+
def __init__(self, scanner, value):
literal_token.__init__(self, scanner, int(value))
+
class string_token(literal_token):
+
def __init__(self, scanner, value):
literal_token.__init__(self, scanner, value[1:-1])
precedence = [(end_token, rparen_token),
(or_op_token,),
(and_op_token,),
(lt_op_token, gt_op_token, le_op_token, ge_op_token,
eq_op_token, neq_op_token),
(lparen_token,),
]
for index, rank in enumerate(precedence):
for token in rank:
- token.lbp = index # lbp = lowest left binding power
+ token.lbp = index # lbp = lowest left binding power
+
class ParseError(Exception):
"""error parsing conditional expression"""
+
class ExpressionParser(object):
"""
A parser for a simple expression language.
The expression language can be described as follows::
EXPRESSION ::= LITERAL | '(' EXPRESSION ')' | '!' EXPRESSION | EXPRESSION OP EXPRESSION
OP ::= '==' | '!=' | '<' | '>' | '<=' | '>=' | '&&' | '||'
@@ -210,17 +246,17 @@ class ExpressionParser(object):
(r">=", ge_op_token()),
(r"<", lt_op_token()),
(r">", gt_op_token()),
(r"\|\|", or_op_token()),
(r"!", not_op_token()),
(r"&&", and_op_token()),
(r"\(", lparen_token()),
(r"\)", rparen_token()),
- (r"\s+", None), # skip whitespace
+ (r"\s+", None), # skip whitespace
])
tokens, remainder = ExpressionParser.scanner.scan(self.text)
for t in tokens:
yield t
yield end_token()
def value(self, ident):
"""
@@ -233,17 +269,17 @@ class ExpressionParser(object):
return self.valuemapping.get(ident, None)
def advance(self, expected):
"""
Assert that the next token is an instance of |expected|, and advance
to the next token.
"""
if not isinstance(self.token, expected):
- raise Exception, "Unexpected token!"
+ raise Exception("Unexpected token!")
self.token = self.iter.next()
def expression(self, rbp=0):
"""
Parse and return the value of an expression until a token with
right binding power greater than rbp is encountered.
"""
t = self.token
@@ -263,17 +299,20 @@ class ExpressionParser(object):
"""
try:
self.iter = self._tokenize()
self.token = self.iter.next()
return self.expression()
except:
extype, ex, tb = sys.exc_info()
formatted = ''.join(traceback.format_exception_only(extype, ex))
- raise ParseError("could not parse: %s\nexception: %svariables: %s" % (self.text, formatted, self.valuemapping)), None, tb
+ raise ParseError("could not parse: "
+ "%s\nexception: %svariables: %s" % (self.text,
+ formatted,
+ self.valuemapping)), None, tb
__call__ = parse
def parse(text, **values):
"""
Parse and evaluate a boolean expression.
:param text: The expression to parse, as a string.
--- a/testing/mozbase/manifestparser/manifestparser/filters.py
+++ b/testing/mozbase/manifestparser/manifestparser/filters.py
@@ -112,16 +112,17 @@ class subsuite(InstanceFilter):
subsuite = foo,condition
where 'foo' is the subsuite name, and 'condition' is the same type of
condition used for skip-if. If the condition doesn't evaluate to true,
the subsuite designation will be removed from the test.
:param name: The name of the subsuite to run (default None)
"""
+
def __init__(self, name=None):
InstanceFilter.__init__(self, name=name)
self.name = name
def __call__(self, tests, values):
# Look for conditional subsuites, and replace them with the subsuite
# itself (if the condition is true), or nothing.
for test in tests:
@@ -222,17 +223,17 @@ class chunk_by_dir(InstanceFilter):
ordered_dirs = []
for test in tests:
path = test['relpath']
if path.startswith(os.sep):
path = path[1:]
dirs = path.split(os.sep)
- dirs = dirs[:min(self.depth, len(dirs)-1)]
+ dirs = dirs[:min(self.depth, len(dirs) - 1)]
path = os.sep.join(dirs)
# don't count directories that only have disabled tests in them,
# but still yield disabled tests that are alongside enabled tests
if path not in ordered_dirs and 'disabled' not in test:
ordered_dirs.append(path)
tests_by_dir[path].append(test)
@@ -298,17 +299,17 @@ class chunk_by_runtime(InstanceFilter):
for runtime, batch in tests_by_manifest:
# sort first by runtime, then by number of tests in case of a tie.
# This guarantees the chunk with the fastest runtime will always
# get the next batch of tests.
tests_by_chunk.sort(key=lambda x: (x[0], len(x[1])))
tests_by_chunk[0][0] += runtime
tests_by_chunk[0][1].extend(batch)
- return (t for t in tests_by_chunk[self.this_chunk-1][1])
+ return (t for t in tests_by_chunk[self.this_chunk - 1][1])
class tags(InstanceFilter):
"""
Removes tests that don't contain any of the given tags. This overrides
InstanceFilter's __eq__ method, so multiple instances can be added.
Multiple tag filters is equivalent to joining tags with the AND operator.
--- a/testing/mozbase/manifestparser/manifestparser/ini.py
+++ b/testing/mozbase/manifestparser/manifestparser/ini.py
@@ -1,15 +1,16 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
+import os
+
__all__ = ['read_ini']
-import os
def read_ini(fp, variables=None, default='DEFAULT', defaults_only=False,
comments=';#', separators=('=', ':'),
strict=True):
"""
read an .ini file and return a list of [(section, values)]
- fp : file pointer or path to read
- variables : default set of variables
@@ -53,17 +54,18 @@ def read_ini(fp, variables=None, default
if strict:
assert default not in section_names
section_names.add(default)
current_section = variables
continue
if strict:
# make sure this section doesn't already exist
- assert section not in section_names, "Section '%s' already found in '%s'" % (section, section_names)
+ assert section not in section_names, "Section '%s' already found in '%s'" % (
+ section, section_names)
section_names.add(section)
current_section = {}
sections.append((section, current_section))
continue
# if there aren't any sections yet, something bad happen
if not section_names:
@@ -114,15 +116,16 @@ def read_ini(fp, variables=None, default
variables = global_dict.copy()
# These variables are combinable when they appear both in default
# and per-entry.
for field_name, pattern in (('skip-if', '(%s) || (%s)'),
('support-files', '%s %s')):
local_value, global_value = local_dict.get(field_name), variables.get(field_name)
if local_value and global_value:
- local_dict[field_name] = pattern % (global_value.split('#')[0], local_value.split('#')[0])
+ local_dict[field_name] = pattern % (
+ global_value.split('#')[0], local_value.split('#')[0])
variables.update(local_dict)
return variables
sections = [(i, interpret_variables(variables, j)) for i, j in sections]
return sections
--- a/testing/mozbase/manifestparser/manifestparser/manifestparser.py
+++ b/testing/mozbase/manifestparser/manifestparser/manifestparser.py
@@ -1,50 +1,51 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
-__all__ = ['ManifestParser', 'TestManifest', 'convert']
-
from StringIO import StringIO
import json
import fnmatch
import os
import shutil
import sys
import types
from .ini import read_ini
from .filters import (
DEFAULT_FILTERS,
enabled,
exists as _exists,
filterlist,
)
+__all__ = ['ManifestParser', 'TestManifest', 'convert']
+
relpath = os.path.relpath
string = (basestring,)
-### path normalization
+# path normalization
def normalize_path(path):
"""normalize a relative path"""
if sys.platform.startswith('win'):
return path.replace('/', os.path.sep)
return path
+
def denormalize_path(path):
"""denormalize a relative path"""
if sys.platform.startswith('win'):
return path.replace(os.path.sep, '/')
return path
-### objects for parsing manifests
+# objects for parsing manifests
class ManifestParser(object):
"""read .ini manifests"""
def __init__(self, manifests=(), defaults=None, strict=True, rootdir=None,
finder=None):
"""Creates a ManifestParser from the given manifest files.
@@ -77,17 +78,17 @@ class ManifestParser(object):
if manifests:
self.read(*manifests)
def path_exists(self, path):
if self.finder:
return self.finder.get(path) is not None
return os.path.exists(path)
- ### methods for reading manifests
+ # methods for reading manifests
def _read(self, root, filename, defaults, defaults_only=False, parentmanifest=None):
"""
Internal recursive method for reading and parsing manifests.
Stores all found tests in self.tests
:param root: The base path
:param filename: File object or string path for the base manifest file
:param defaults: Options that apply to all items
@@ -181,17 +182,17 @@ class ManifestParser(object):
test['name'] = section
# Will be None if the manifest being read is a file-like object.
test['manifest'] = filename
# determine the path
path = test.get('path', section)
_relpath = path
- if '://' not in path: # don't futz with URLs
+ if '://' not in path: # don't futz with URLs
path = normalize_path(path)
if here and not os.path.isabs(path):
# Profiling indicates 25% of manifest parsing is spent
# in this call to normpath, but almost all calls return
# their argument unmodified, so we avoid the call if
# '..' if not present in the path.
path = os.path.join(here, path)
if '..' in path:
@@ -255,27 +256,26 @@ class ManifestParser(object):
# process each file
for filename in filenames:
# set the per file defaults
defaults = _defaults.copy()
here = None
if isinstance(filename, string):
here = os.path.dirname(os.path.abspath(filename))
- defaults['here'] = here # directory of master .ini file
+ defaults['here'] = here # directory of master .ini file
if self.rootdir is None:
# set the root directory
# == the directory of the first manifest given
self.rootdir = here
self._read(here, filename, defaults)
-
- ### methods for querying manifests
+ # methods for querying manifests
def query(self, *checks, **kw):
"""
general query function for tests
- checks : callable conditions to test if the test fulfills the query
"""
tests = kw.get('tests', None)
if tests is None:
@@ -299,24 +299,28 @@ class ManifestParser(object):
# fix up tags
if tags:
tags = set(tags)
else:
tags = set()
# make some check functions
if inverse:
- has_tags = lambda test: not tags.intersection(test.keys())
+ def has_tags(test):
+ return not tags.intersection(test.keys())
+
def dict_query(test):
for key, value in kwargs.items():
if test.get(key) == value:
return False
return True
else:
- has_tags = lambda test: tags.issubset(test.keys())
+ def has_tags(test):
+ return tags.issubset(test.keys())
+
def dict_query(test):
for key, value in kwargs.items():
if test.get(key) != value:
return False
return True
# query the tests
tests = self.query(has_tags, dict_query, tests=tests)
@@ -344,18 +348,17 @@ class ManifestParser(object):
continue
if manifest not in manifests:
manifests.append(manifest)
return manifests
def paths(self):
return [i['path'] for i in self.tests]
-
- ### methods for auditing
+ # methods for auditing
def missing(self, tests=None):
"""
return list of tests that do not exist on the filesystem
"""
if tests is None:
tests = self.tests
existing = list(_exists(tests, {}))
@@ -365,17 +368,17 @@ class ManifestParser(object):
missing = self.missing(tests=tests)
if missing:
missing_paths = [test['path'] for test in missing]
if self.strict:
raise IOError("Strict mode enabled, test paths must exist. "
"The following test(s) are missing: %s" %
json.dumps(missing_paths, indent=2))
print >> sys.stderr, "Warning: The following test(s) are missing: %s" % \
- json.dumps(missing_paths, indent=2)
+ json.dumps(missing_paths, indent=2)
return missing
def verifyDirectory(self, directories, pattern=None, extensions=None):
"""
checks what is on the filesystem vs what is in a manifest
returns a 2-tuple of sets:
(missing_from_filesystem, missing_from_manifest)
"""
@@ -399,18 +402,17 @@ class ManifestParser(object):
files.update([os.path.join(dirpath, filename) for filename in filenames])
paths = set(self.paths())
missing_from_filesystem = paths.difference(files)
missing_from_manifest = files.difference(paths)
return (missing_from_filesystem, missing_from_manifest)
-
- ### methods for output
+ # methods for output
def write(self, fp=sys.stdout, rootdir=None,
global_tags=None, global_kwargs=None,
local_tags=None, local_kwargs=None):
"""
write a manifest given a query
global and local options will be munged to do the query
globals will be written to the top of the file
@@ -449,17 +451,17 @@ class ManifestParser(object):
print >> fp, '[DEFAULT]'
for tag in global_tags:
print >> fp, '%s =' % tag
for key, value in global_kwargs.items():
print >> fp, '%s = %s' % (key, value)
print >> fp
for test in tests:
- test = test.copy() # don't overwrite
+ test = test.copy() # don't overwrite
path = test['name']
if not os.path.isabs(path):
path = test['path']
if self.rootdir:
path = relpath(test['path'], self.rootdir)
path = denormalize_path(path)
print >> fp, '[%s]' % path
@@ -504,17 +506,17 @@ class ManifestParser(object):
os.path.makedirs(directory)
else:
# sanity check
assert os.path.isdir(directory)
# tests to copy
tests = self.get(tags=tags, **kwargs)
if not tests:
- return # nothing to do!
+ return # nothing to do!
# root directory
if rootdir is None:
rootdir = self.rootdir
# copy the manifests + tests
manifests = [relpath(manifest, rootdir) for manifest in self.manifests()]
for manifest in manifests:
@@ -562,42 +564,45 @@ class ManifestParser(object):
message = "Missing test: '%s' does not exist!"
if self.strict:
raise IOError(message)
print >> sys.stderr, message + " Skipping."
continue
destination = os.path.join(rootdir, _relpath)
shutil.copy(source, destination)
- ### directory importers
+ # directory importers
@classmethod
def _walk_directories(cls, directories, callback, pattern=None, ignore=()):
"""
internal function to import directories
"""
if isinstance(pattern, basestring):
patterns = [pattern]
else:
patterns = pattern
ignore = set(ignore)
if not patterns:
- accept_filename = lambda filename: True
+ def accept_filename(filename):
+ return True
else:
def accept_filename(filename):
for pattern in patterns:
if fnmatch.fnmatch(filename, pattern):
return True
if not ignore:
- accept_dirname = lambda dirname: True
+ def accept_dirname(dirname):
+ return True
else:
- accept_dirname = lambda dirname: dirname not in ignore
+ def accept_dirname(dirname):
+ return dirname not in ignore
rootdirectories = directories[:]
seen_directories = set()
for rootdirectory in rootdirectories:
# let's recurse directories using list
directories = [os.path.realpath(rootdirectory)]
while directories:
directory = directories.pop(0)
@@ -627,22 +632,22 @@ class ManifestParser(object):
# this subdir is added for recursion
directories.insert(0, path)
# here we got all subdirs and files filtered, we can
# call the callback function if directory is not empty
if subdirs or files:
callback(rootdirectory, directory, subdirs, files)
-
@classmethod
- def populate_directory_manifests(cls, directories, filename, pattern=None, ignore=(), overwrite=False):
+ def populate_directory_manifests(cls, directories, filename, pattern=None, ignore=(),
+ overwrite=False):
"""
- walks directories and writes manifests of name `filename` in-place; returns `cls` instance populated
- with the given manifests
+ walks directories and writes manifests of name `filename` in-place;
+ returns `cls` instance populated with the given manifests
filename -- filename of manifests to write
pattern -- shell pattern (glob) or patterns of filenames to match
ignore -- directory names to ignore
overwrite -- whether to overwrite existing files of given name
"""
manifest_dict = {}
@@ -687,20 +692,19 @@ class ManifestParser(object):
pattern -- shell pattern (glob) or patterns of filenames to match
ignore -- directory names to ignore
write -- filename or file-like object of manifests to write;
if `None` then a StringIO instance will be created
relative_to -- write paths relative to this path;
if false then the paths are absolute
"""
-
# determine output
- opened_manifest_file = None # name of opened manifest file
- absolute = not relative_to # whether to output absolute path names as names
+ opened_manifest_file = None # name of opened manifest file
+ absolute = not relative_to # whether to output absolute path names as names
if isinstance(write, string):
opened_manifest_file = write
write = file(write, 'w')
if write is None:
write = StringIO()
# walk the directories, generating manifests
def callback(directory, dirpath, dirnames, filenames):
@@ -713,33 +717,31 @@ class ManifestParser(object):
if filename != opened_manifest_file]
# normalize paths
if not absolute and relative_to:
filenames = [relpath(filename, relative_to)
for filename in filenames]
# write to manifest
print >> write, '\n'.join(['[%s]' % denormalize_path(filename)
- for filename in filenames])
-
+ for filename in filenames])
cls._walk_directories(directories, callback, pattern=pattern, ignore=ignore)
if opened_manifest_file:
# close file
write.close()
manifests = [opened_manifest_file]
else:
# manifests/write is a file-like object;
# rewind buffer
write.flush()
write.seek(0)
manifests = [write]
-
# make a ManifestParser instance
return cls(manifests=manifests)
convert = ManifestParser.from_directories
class TestManifest(ManifestParser):
"""
@@ -757,17 +759,17 @@ class TestManifest(ManifestParser):
Run all applied filters on the set of tests.
:param exists: filter out non-existing tests (default True)
:param disabled: whether to return disabled tests (default True)
:param values: keys and values to filter on (e.g. `os = linux mac`)
:param filters: list of filters to apply to the tests
:returns: list of test objects that were not filtered out
"""
- tests = [i.copy() for i in self.tests] # shallow copy
+ tests = [i.copy() for i in self.tests] # shallow copy
# mark all tests as passing
for test in tests:
test['expected'] = test.get('expected', 'pass')
# make a copy so original doesn't get modified
fltrs = self.filters[:]
if exists:
--- a/testing/mozbase/manifestparser/setup.py
+++ b/testing/mozbase/manifestparser/setup.py
@@ -6,22 +6,22 @@ from setuptools import setup
PACKAGE_NAME = "manifestparser"
PACKAGE_VERSION = '1.1'
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Library to create and manage test manifests",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla manifests',
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
zip_safe=False,
packages=['manifestparser'],
install_requires=[],
entry_points="""
[console_scripts]
manifestparser = manifestparser.cli:main
""",
- )
+ )
--- a/testing/mozbase/manifestparser/tests/test_chunking.py
+++ b/testing/mozbase/manifestparser/tests/test_chunking.py
@@ -28,17 +28,17 @@ class ChunkBySlice(TestCase):
return tests
def run_all_combos(self, num_tests, disabled=None):
tests = self.generate_tests(num_tests, disabled=disabled)
for total in range(1, num_tests + 1):
res = []
res_disabled = []
- for chunk in range(1, total+1):
+ for chunk in range(1, total + 1):
f = chunk_by_slice(chunk, total)
res.append(list(f(tests, {})))
if disabled:
f.disabled = True
res_disabled.append(list(f(tests, {})))
lengths = [len([t for t in c if 'disabled' not in t]) for c in res]
# the chunk with the most tests should have at most one more test
@@ -91,30 +91,30 @@ class ChunkByDir(TestCase):
name = 'test%i' % i
test = {'name': name,
'relpath': os.path.join(d, name)}
yield test
def run_all_combos(self, dirs):
tests = list(self.generate_tests(dirs))
- deepest = max(len(t['relpath'].split(os.sep))-1 for t in tests)
- for depth in range(1, deepest+1):
+ deepest = max(len(t['relpath'].split(os.sep)) - 1 for t in tests)
+ for depth in range(1, deepest + 1):
def num_groups(tests):
unique = set()
for p in [t['relpath'] for t in tests]:
p = p.split(os.sep)
- p = p[:min(depth, len(p)-1)]
+ p = p[:min(depth, len(p) - 1)]
unique.add(os.sep.join(p))
return len(unique)
- for total in range(1, num_groups(tests)+1):
+ for total in range(1, num_groups(tests) + 1):
res = []
- for this in range(1, total+1):
+ for this in range(1, total + 1):
f = chunk_by_dir(this, total, depth)
res.append(list(f(tests, {})))
lengths = map(num_groups, res)
# the chunk with the most dirs should have at most one more
# dir than the chunk with the least dirs
self.assertLessEqual(max(lengths) - min(lengths), 1)
@@ -203,36 +203,36 @@ class ChunkByRuntime(TestCase):
chunks = [[] for i in range(total)]
d = 1 # direction
i = 0
for runtime, batch in tests_by_manifest:
chunks[i].extend(batch)
# "draft" style (last pick goes first in the next round)
- if (i == 0 and d == -1) or (i == total-1 and d == 1):
+ if (i == 0 and d == -1) or (i == total - 1 and d == 1):
d = -d
else:
i += d
# make sure this test algorithm is valid
all_chunks = list(chain.from_iterable(chunks))
self.assertEqual(len(all_chunks), len(tests))
for t in tests:
self.assertIn(t, all_chunks)
return chunks
def run_all_combos(self, dirs):
tests = list(self.generate_tests(dirs))
runtimes = self.get_runtimes(tests)
- for total in range(1, len(dirs)+1):
+ for total in range(1, len(dirs) + 1):
chunks = []
- for this in range(1, total+1):
+ for this in range(1, total + 1):
f = chunk_by_runtime(this, total, runtimes)
ret = list(f(tests, {}))
chunks.append(ret)
# chunk_by_runtime will mess up order, but chained chunks should
# contain all of the original tests and be the same length
all_chunks = list(chain.from_iterable(chunks))
self.assertEqual(len(all_chunks), len(tests))
--- a/testing/mozbase/manifestparser/tests/test_convert_directory.py
+++ b/testing/mozbase/manifestparser/tests/test_convert_directory.py
@@ -15,22 +15,25 @@ from manifestparser import ManifestParse
here = os.path.dirname(os.path.abspath(__file__))
# In some cases tempfile.mkdtemp() may returns a path which contains
# symlinks. Some tests here will then break, as the manifestparser.convert
# function returns paths that does not contains symlinks.
#
# Workaround is to use the following function, if absolute path of temp dir
# must be compared.
+
+
def create_realpath_tempdir():
"""
Create a tempdir without symlinks.
"""
return os.path.realpath(tempfile.mkdtemp())
+
class TestDirectoryConversion(unittest.TestCase):
"""test conversion of a directory tree to a manifest structure"""
def create_stub(self, directory=None):
"""stub out a directory with files in it"""
files = ('foo', 'bar', 'fleem')
if directory is None:
@@ -51,34 +54,34 @@ class TestDirectoryConversion(unittest.T
# create a stub directory
stub = self.create_stub()
try:
stub = stub.replace(os.path.sep, "/")
self.assertTrue(os.path.exists(stub) and os.path.isdir(stub))
# Make a manifest for it
manifest = convert([stub])
- self.assertEqual(str(manifest),
-"""[%(stub)s/bar]
+ out_tmpl = """[%(stub)s/bar]
subsuite =
[%(stub)s/fleem]
subsuite =
[%(stub)s/foo]
subsuite =
[%(stub)s/subdir/subfile]
subsuite =
-""" % dict(stub=stub))
+""" # noqa
+ self.assertEqual(str(manifest), out_tmpl % dict(stub=stub))
except:
raise
finally:
- shutil.rmtree(stub) # cleanup
+ shutil.rmtree(stub) # cleanup
def test_convert_directory_manifests_in_place(self):
"""
keep the manifests in place
"""
stub = self.create_stub()
try:
@@ -98,17 +101,18 @@ subsuite =
finally:
shutil.rmtree(stub)
def test_manifest_ignore(self):
"""test manifest `ignore` parameter for ignoring directories"""
stub = self.create_stub()
try:
- ManifestParser.populate_directory_manifests([stub], filename='manifest.ini', ignore=('subdir',))
+ ManifestParser.populate_directory_manifests(
+ [stub], filename='manifest.ini', ignore=('subdir',))
parser = ManifestParser()
parser.read(os.path.join(stub, 'manifest.ini'))
self.assertEqual([i['name'] for i in parser.tests],
['bar', 'fleem', 'foo'])
self.assertFalse(os.path.exists(os.path.join(stub, 'subdir', 'manifest.ini')))
except:
raise
finally:
@@ -157,25 +161,25 @@ subsuite =
paths = [str(i) for i in range(10)]
self.assertEqual([i['name'] for i in manifest.missing()],
paths)
# But then we copy one over:
self.assertEqual(manifest.get('name', name='1'), ['1'])
manifest.update(tempdir, name='1')
self.assertEqual(sorted(os.listdir(newtempdir)),
- ['1', 'manifest.ini'])
+ ['1', 'manifest.ini'])
# Update that one file and copy all the "tests":
file(os.path.join(tempdir, '1'), 'w').write('secret door')
manifest.update(tempdir)
self.assertEqual(sorted(os.listdir(newtempdir)),
- ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'manifest.ini'])
+ ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'manifest.ini'])
self.assertEqual(file(os.path.join(newtempdir, '1')).read().strip(),
- 'secret door')
+ 'secret door')
# clean up:
shutil.rmtree(tempdir)
shutil.rmtree(newtempdir)
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/manifestparser/tests/test_convert_symlinks.py
+++ b/testing/mozbase/manifestparser/tests/test_convert_symlinks.py
@@ -6,16 +6,17 @@
import os
import shutil
import tempfile
import unittest
from manifestparser import convert, ManifestParser
+
class TestSymlinkConversion(unittest.TestCase):
"""
test conversion of a directory tree with symlinks to a manifest structure
"""
def create_stub(self, directory=None):
"""stub out a directory with files in it"""
@@ -120,16 +121,17 @@ class TestSymlinkConversion(unittest.Tes
os.symlink(os.path.join('..', 'dir2'),
os.path.join(workspace, 'dir1', 'ldir2'))
# create one file in each dir
open(os.path.join(workspace, 'dir1', 'f1.txt'), 'a').close()
open(os.path.join(workspace, 'dir1', 'ldir2', 'f2.txt'), 'a').close()
data = []
+
def callback(rootdirectory, directory, subdirs, files):
for f in files:
data.append(f)
ManifestParser._walk_directories([workspace], callback)
self.assertEqual(sorted(data), ['f1.txt', 'f2.txt'])
--- a/testing/mozbase/manifestparser/tests/test_default_overrides.py
+++ b/testing/mozbase/manifestparser/tests/test_default_overrides.py
@@ -5,38 +5,40 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from manifestparser import ManifestParser
here = os.path.dirname(os.path.abspath(__file__))
+
class TestDefaultSkipif(unittest.TestCase):
"""Tests applying a skip-if condition in [DEFAULT] and || with the value for the test"""
-
def test_defaults(self):
default = os.path.join(here, 'default-skipif.ini')
parser = ManifestParser(manifests=(default,))
for test in parser.tests:
if test['name'] == 'test1':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (debug)")
elif test['name'] == 'test2':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'linux')")
elif test['name'] == 'test3':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'win')")
elif test['name'] == 'test4':
- self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'win' && debug)")
+ self.assertEqual(
+ test['skip-if'], "(os == 'win' && debug ) || (os == 'win' && debug)")
elif test['name'] == 'test5':
self.assertEqual(test['skip-if'], "os == 'win' && debug # a pesky comment")
elif test['name'] == 'test6':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (debug )")
+
class TestDefaultSupportFiles(unittest.TestCase):
"""Tests combining support-files field in [DEFAULT] with the value for a test"""
def test_defaults(self):
default = os.path.join(here, 'default-suppfiles.ini')
parser = ManifestParser(manifests=(default,))
expected_supp_files = {
--- a/testing/mozbase/manifestparser/tests/test_expressionparser.py
+++ b/testing/mozbase/manifestparser/tests/test_expressionparser.py
@@ -1,13 +1,14 @@
#!/usr/bin/env python
import unittest
from manifestparser import parse
+
class ExpressionParserTest(unittest.TestCase):
"""Test the conditional expression parser."""
def test_basic(self):
self.assertEqual(parse("1"), 1)
self.assertEqual(parse("100"), 100)
self.assertEqual(parse("true"), True)
@@ -59,17 +60,16 @@ class ExpressionParserTest(unittest.Test
self.assertTrue(parse("(true || false)"))
self.assertTrue(parse("(true && true || false)"))
self.assertFalse(parse("(true || false) && false"))
self.assertTrue(parse("(true || false) && true"))
self.assertTrue(parse("true && (true || false)"))
self.assertTrue(parse("true && (true || false)"))
self.assertTrue(parse("(true && false) || (true && (true || false))"))
-
def test_comments(self):
# comments in expressions work accidentally, via an implementation
# detail - the '#' character doesn't match any of the regular
# expressions we specify as tokens, and thus are ignored.
# However, having explicit tests for them means that should the
# implementation ever change, comments continue to work, even if that
# means a new implementation must handle them explicitly.
self.assertTrue(parse("true == true # it does!"))
--- a/testing/mozbase/manifestparser/tests/test_filters.py
+++ b/testing/mozbase/manifestparser/tests/test_filters.py
@@ -1,9 +1,10 @@
#!/usr/bin/env python
+# flake8: noqa
from copy import deepcopy
import os
import unittest
from manifestparser.filters import (
subsuite,
tags,
@@ -139,17 +140,17 @@ class BuiltinFilters(unittest.TestCase):
def test_subsuite(self):
sub1 = subsuite()
sub2 = subsuite('baz')
tests = deepcopy(self.tests)
tests = list(sub1(tests, {}))
self.assertNotIn(self.tests[5], tests)
- self.assertEquals(len(tests), len(self.tests)-1)
+ self.assertEquals(len(tests), len(self.tests) - 1)
tests = deepcopy(self.tests)
tests = list(sub2(tests, {}))
self.assertEquals(len(tests), 1)
self.assertIn(self.tests[5], tests)
def test_subsuite_condition(self):
sub1 = subsuite()
--- a/testing/mozbase/manifestparser/tests/test_manifestparser.py
+++ b/testing/mozbase/manifestparser/tests/test_manifestparser.py
@@ -8,16 +8,17 @@ import os
import shutil
import tempfile
import unittest
from manifestparser import ManifestParser
from StringIO import StringIO
here = os.path.dirname(os.path.abspath(__file__))
+
class TestManifestParser(unittest.TestCase):
"""
Test the manifest parser
You must have manifestparser installed before running these tests.
Run ``python manifestparser.py setup develop`` with setuptools installed.
"""
@@ -36,57 +37,59 @@ class TestManifestParser(unittest.TestCa
# Show how you select subsets of tests:
mozmill_restart_example = os.path.join(here, 'mozmill-restart-example.ini')
parser.read(mozmill_restart_example)
restart_tests = parser.get(type='restart')
self.assertTrue(len(restart_tests) < len(parser.tests))
self.assertEqual(len(restart_tests), len(parser.get(manifest=mozmill_restart_example)))
self.assertFalse([test for test in restart_tests
- if test['manifest'] != os.path.join(here, 'mozmill-restart-example.ini')])
+ if test['manifest'] != os.path.join(here,
+ 'mozmill-restart-example.ini')])
self.assertEqual(parser.get('name', tags=['foo']),
['restartTests/testExtensionInstallUninstall/test2.js',
'restartTests/testExtensionInstallUninstall/test1.js'])
self.assertEqual(parser.get('name', foo='bar'),
['restartTests/testExtensionInstallUninstall/test2.js'])
def test_include(self):
"""Illustrate how include works"""
include_example = os.path.join(here, 'include-example.ini')
parser = ManifestParser(manifests=(include_example,))
# All of the tests should be included, in order:
self.assertEqual(parser.get('name'),
['crash-handling', 'fleem', 'flowers'])
- self.assertEqual([(test['name'], os.path.basename(test['manifest'])) for test in parser.tests],
- [('crash-handling', 'bar.ini'), ('fleem', 'include-example.ini'), ('flowers', 'foo.ini')])
+ self.assertEqual([(test['name'], os.path.basename(test['manifest']))
+ for test in parser.tests],
+ [('crash-handling', 'bar.ini'),
+ ('fleem', 'include-example.ini'),
+ ('flowers', 'foo.ini')])
# The including manifest is always reported as a part of the generated test object.
self.assertTrue(all([t['ancestor-manifest'] == include_example
for t in parser.tests if t['name'] != 'fleem']))
-
# The manifests should be there too:
self.assertEqual(len(parser.manifests()), 3)
# We already have the root directory:
self.assertEqual(here, parser.rootdir)
-
# DEFAULT values should persist across includes, unless they're
# overwritten. In this example, include-example.ini sets foo=bar, but
# it's overridden to fleem in bar.ini
self.assertEqual(parser.get('name', foo='bar'),
['fleem', 'flowers'])
self.assertEqual(parser.get('name', foo='fleem'),
['crash-handling'])
# Passing parameters in the include section allows defining variables in
- #the submodule scope:
+ # the submodule scope:
self.assertEqual(parser.get('name', tags=['red']),
['flowers'])
# However, this should be overridable from the DEFAULT section in the
# included file and that overridable via the key directly connected to
# the test:
self.assertEqual(parser.get(name='flowers')[0]['blue'],
'ocean')
@@ -102,40 +105,53 @@ class TestManifestParser(unittest.TestCa
['crash-handling', 'fleem'])
# All of the included tests actually exist:
self.assertEqual([i['name'] for i in parser.missing()], [])
# Write the output to a manifest:
buffer = StringIO()
parser.write(fp=buffer, global_kwargs={'foo': 'bar'})
+ expected_output = """[DEFAULT]
+foo = bar
+
+[fleem]
+subsuite =
+
+[include/flowers]
+blue = ocean
+red = roses
+subsuite =
+yellow = submarine""" # noqa
+
self.assertEqual(buffer.getvalue().strip(),
- '[DEFAULT]\nfoo = bar\n\n[fleem]\nsubsuite = \n\n[include/flowers]\nblue = ocean\nred = roses\nsubsuite = \nyellow = submarine')
+ expected_output)
def test_invalid_path(self):
"""
Test invalid path should not throw when not strict
"""
manifest = os.path.join(here, 'include-invalid.ini')
- parser = ManifestParser(manifests=(manifest,), strict=False)
+ ManifestParser(manifests=(manifest,), strict=False)
def test_parent_inheritance(self):
"""
Test parent manifest variable inheritance
Specifically tests that inherited variables from parent includes
properly propagate downstream
"""
parent_example = os.path.join(here, 'parent', 'level_1', 'level_2',
'level_3', 'level_3.ini')
parser = ManifestParser(manifests=(parent_example,))
# Parent manifest test should not be included
self.assertEqual(parser.get('name'),
['test_3'])
- self.assertEqual([(test['name'], os.path.basename(test['manifest'])) for test in parser.tests],
+ self.assertEqual([(test['name'], os.path.basename(test['manifest']))
+ for test in parser.tests],
[('test_3', 'level_3.ini')])
# DEFAULT values should be the ones from level 1
self.assertEqual(parser.get('name', x='level_1'),
['test_3'])
# Write the output to a manifest:
buffer = StringIO()
@@ -149,17 +165,18 @@ class TestManifestParser(unittest.TestCa
"""
parent_example = os.path.join(here, 'parent', 'level_1', 'level_2',
'level_3', 'level_3_default.ini')
parser = ManifestParser(manifests=(parent_example,))
# Parent manifest test should not be included
self.assertEqual(parser.get('name'),
['test_3'])
- self.assertEqual([(test['name'], os.path.basename(test['manifest'])) for test in parser.tests],
+ self.assertEqual([(test['name'], os.path.basename(test['manifest']))
+ for test in parser.tests],
[('test_3', 'level_3_default.ini')])
# DEFAULT values should be the ones from level 3
self.assertEqual(parser.get('name', x='level_3'),
['test_3'])
# Write the output to a manifest:
buffer = StringIO()
@@ -186,17 +203,17 @@ class TestManifestParser(unittest.TestCa
Test server_root properly expands as an absolute path
"""
server_example = os.path.join(here, 'parent', 'level_1', 'level_2',
'level_3', 'level_3_server-root.ini')
parser = ManifestParser(manifests=(server_example,))
# A regular variable will inherit its value directly
self.assertEqual(parser.get('name', **{'other-root': '../root'}),
- ['test_3'])
+ ['test_3'])
# server-root will expand its value as an absolute path
# we will not find anything for the original value
self.assertEqual(parser.get('name', **{'server-root': '../root'}), [])
# check that the path has expanded
self.assertEqual(parser.get('server-root')[0],
os.path.join(here, 'parent', 'root'))
--- a/testing/mozbase/manifestparser/tests/test_read_ini.py
+++ b/testing/mozbase/manifestparser/tests/test_read_ini.py
@@ -10,16 +10,17 @@ is the default:
http://docs.python.org/2/library/configparser.html
"""
import unittest
from manifestparser import read_ini
from ConfigParser import ConfigParser
from StringIO import StringIO
+
class IniParserTest(unittest.TestCase):
def test_inline_comments(self):
"""
We have no inline comments; so we're testing to ensure we don't:
https://bugzilla.mozilla.org/show_bug.cgi?id=855288
"""
--- a/testing/mozbase/manifestparser/tests/test_testmanifest.py
+++ b/testing/mozbase/manifestparser/tests/test_testmanifest.py
@@ -13,19 +13,21 @@ here = os.path.dirname(os.path.abspath(_
class TestTestManifest(unittest.TestCase):
"""Test the Test Manifest"""
def test_testmanifest(self):
# Test filtering based on platform:
filter_example = os.path.join(here, 'filter-example.ini')
manifest = TestManifest(manifests=(filter_example,), strict=False)
- self.assertEqual([i['name'] for i in manifest.active_tests(os='win', disabled=False, exists=False)],
+ self.assertEqual([i['name'] for i in manifest.active_tests(os='win', disabled=False,
+ exists=False)],
['windowstest', 'fleem'])
- self.assertEqual([i['name'] for i in manifest.active_tests(os='linux', disabled=False, exists=False)],
+ self.assertEqual([i['name'] for i in manifest.active_tests(os='linux', disabled=False,
+ exists=False)],
['fleem', 'linuxtest'])
# Look for existing tests. There is only one:
self.assertEqual([i['name'] for i in manifest.active_tests()],
['fleem'])
# You should be able to expect failures:
last = manifest.active_tests(exists=False, toolkit='gtk2')[-1]
--- a/testing/mozbase/mozcrash/mozcrash/__init__.py
+++ b/testing/mozbase/mozcrash/mozcrash/__init__.py
@@ -1,8 +1,10 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
-mozcrash is a library for getting a stack trace out of processes that have crashed and left behind a minidump file using the Google Breakpad library.
+mozcrash is a library for getting a stack trace out of processes that have crashed
+and left behind a minidump file using the Google Breakpad library.
"""
from mozcrash import *
--- a/testing/mozbase/mozcrash/mozcrash/mozcrash.py
+++ b/testing/mozbase/mozcrash/mozcrash/mozcrash.py
@@ -1,36 +1,36 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
-__all__ = [
- 'check_for_crashes',
- 'check_for_java_exception',
- 'kill_and_get_minidump',
- 'log_crashes',
- 'cleanup_pending_crash_reports',
-]
-
import glob
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import urllib2
import zipfile
from collections import namedtuple
import mozfile
import mozinfo
import mozlog
+__all__ = [
+ 'check_for_crashes',
+ 'check_for_java_exception',
+ 'kill_and_get_minidump',
+ 'log_crashes',
+ 'cleanup_pending_crash_reports',
+]
+
StackInfo = namedtuple("StackInfo",
["minidump_path",
"signature",
"stackwalk_stdout",
"stackwalk_stderr",
"stackwalk_retcode",
"stackwalk_errors",
@@ -188,17 +188,18 @@ class CrashInfo(object):
def dump_files(self):
"""List of tuple (path_to_dump_file, path_to_extra_file) for each dump
file in self.dump_directory. The extra files may not exist."""
if self._dump_files is None:
self._dump_files = [(path, os.path.splitext(path)[0] + '.extra') for path in
glob.glob(os.path.join(self.dump_directory, '*.dmp'))]
max_dumps = 10
if len(self._dump_files) > max_dumps:
- self.logger.warning("Found %d dump files -- limited to %d!" % (len(self._dump_files), max_dumps))
+ self.logger.warning("Found %d dump files -- limited to %d!" %
+ (len(self._dump_files), max_dumps))
del self._dump_files[max_dumps:]
return self._dump_files
@property
def has_dumps(self):
"""Boolean indicating whether any crash dump files were found in the
current directory"""
@@ -233,17 +234,17 @@ class CrashInfo(object):
errors = []
signature = None
include_stderr = False
out = None
err = None
retcode = None
if (self.symbols_path and self.stackwalk_binary and
os.path.exists(self.stackwalk_binary) and
- os.access(self.stackwalk_binary, os.X_OK)):
+ os.access(self.stackwalk_binary, os.X_OK)):
command = [
self.stackwalk_binary,
path,
self.symbols_path
]
self.logger.info('Copy/paste: ' + ' '.join(command))
# run minidump_stackwalk
@@ -257,22 +258,23 @@ class CrashInfo(object):
if len(out) > 3:
# minidump_stackwalk is chatty,
# so ignore stderr when it succeeds.
# The top frame of the crash is always the line after "Thread N (crashed)"
# Examples:
# 0 libc.so + 0xa888
# 0 libnss3.so!nssCertificate_Destroy [certificate.c : 102 + 0x0]
- # 0 mozjs.dll!js::GlobalObject::getDebuggers() [GlobalObject.cpp:89df18f9b6da : 580 + 0x0]
- # 0 libxul.so!void js::gc::MarkInternal<JSObject>(JSTracer*, JSObject**) [Marking.cpp : 92 + 0x28]
+ # 0 mozjs.dll!js::GlobalObject::getDebuggers() [GlobalObject.cpp:89df18f9b6da : 580 + 0x0] # noqa
+ # 0 libxul.so!void js::gc::MarkInternal<JSObject>(JSTracer*, JSObject**)
+ # [Marking.cpp : 92 + 0x28]
lines = out.splitlines()
for i, line in enumerate(lines):
if "(crashed)" in line:
- match = re.search(r"^ 0 (?:.*!)?(?:void )?([^\[]+)", lines[i+1])
+ match = re.search(r"^ 0 (?:.*!)?(?:void )?([^\[]+)", lines[i + 1])
if match:
signature = "@ %s" % match.group(1).strip()
break
else:
include_stderr = True
else:
if not self.symbols_path:
@@ -320,50 +322,52 @@ class CrashInfo(object):
def check_for_java_exception(logcat, quiet=False):
"""
Print a summary of a fatal Java exception, if present in the provided
logcat output.
Example:
- PROCESS-CRASH | java-exception | java.lang.NullPointerException at org.mozilla.gecko.GeckoApp$21.run(GeckoApp.java:1833)
+ PROCESS-CRASH | java-exception | java.lang.NullPointerException at org.mozilla.gecko.GeckoApp$21.run(GeckoApp.java:1833) # noqa
`logcat` should be a list of strings.
If `quiet` is set, no PROCESS-CRASH message will be printed to stdout if a
crash is detected.
Returns True if a fatal Java exception was found, False otherwise.
"""
found_exception = False
for i, line in enumerate(logcat):
# Logs will be of form:
#
- # 01-30 20:15:41.937 E/GeckoAppShell( 1703): >>> REPORTING UNCAUGHT EXCEPTION FROM THREAD 9 ("GeckoBackgroundThread")
+ # 01-30 20:15:41.937 E/GeckoAppShell( 1703): >>> REPORTING UNCAUGHT EXCEPTION FROM THREAD 9 ("GeckoBackgroundThread") # noqa
# 01-30 20:15:41.937 E/GeckoAppShell( 1703): java.lang.NullPointerException
- # 01-30 20:15:41.937 E/GeckoAppShell( 1703): at org.mozilla.gecko.GeckoApp$21.run(GeckoApp.java:1833)
- # 01-30 20:15:41.937 E/GeckoAppShell( 1703): at android.os.Handler.handleCallback(Handler.java:587)
+ # 01-30 20:15:41.937 E/GeckoAppShell( 1703): at org.mozilla.gecko.GeckoApp$21.run(GeckoApp.java:1833) # noqa
+ # 01-30 20:15:41.937 E/GeckoAppShell( 1703): at android.os.Handler.handleCallback(Handler.java:587) # noqa
if "REPORTING UNCAUGHT EXCEPTION" in line or "FATAL EXCEPTION" in line:
# Strip away the date, time, logcat tag and pid from the next two lines and
# concatenate the remainder to form a concise summary of the exception.
found_exception = True
if len(logcat) >= i + 3:
logre = re.compile(r".*\): \t?(.*)")
- m = logre.search(logcat[i+1])
+ m = logre.search(logcat[i + 1])
if m and m.group(1):
exception_type = m.group(1)
- m = logre.search(logcat[i+2])
+ m = logre.search(logcat[i + 2])
if m and m.group(1):
exception_location = m.group(1)
if not quiet:
- print "PROCESS-CRASH | java-exception | %s %s" % (exception_type, exception_location)
+ print "PROCESS-CRASH | java-exception | %s %s" % (exception_type,
+ exception_location)
else:
- print "Automation Error: java exception in logcat at line %d of %d: %s" % (i, len(logcat), line)
+ print "Automation Error: java exception in logcat at line " \
+ "%d of %d: %s" % (i, len(logcat), line)
break
return found_exception
if mozinfo.isWin:
import ctypes
import uuid
@@ -385,17 +389,17 @@ if mozinfo.isWin:
CREATE_ALWAYS = 2
FILE_ATTRIBUTE_NORMAL = 0x80
INVALID_HANDLE_VALUE = -1
file_name = os.path.join(dump_directory,
str(uuid.uuid4()) + ".dmp")
if (mozinfo.info['bits'] != ctypes.sizeof(ctypes.c_voidp) * 8 and
- utility_path):
+ utility_path):
# We're not going to be able to write a minidump with ctypes if our
# python process was compiled for a different architecture than
# firefox, so we invoke the minidumpwriter utility program.
log = get_logger()
minidumpwriter = os.path.normpath(os.path.join(utility_path,
"minidumpwriter.exe"))
log.info("Using %s to write a dump to %s for [%d]" %
@@ -460,16 +464,17 @@ else:
def kill_pid(pid):
"""
Terminate a process with extreme prejudice.
:param pid: PID of the process to terminate.
"""
os.kill(pid, signal.SIGKILL)
+
def kill_and_get_minidump(pid, dump_directory, utility_path=None):
"""
Attempt to kill a process and leave behind a minidump describing its
execution state.
:param pid: The PID of the process to kill.
:param dump_directory: The directory where a minidump should be written on
Windows, where the dump will be written from outside the process.
@@ -487,16 +492,17 @@ def kill_and_get_minidump(pid, dump_dire
if mozinfo.isWin:
write_minidump(pid, dump_directory, utility_path)
elif mozinfo.isLinux or mozinfo.isMac:
os.kill(pid, signal.SIGABRT)
needs_killing = False
if needs_killing:
kill_pid(pid)
+
def cleanup_pending_crash_reports():
"""
Delete any pending crash reports.
The presence of pending crash reports may be reported by the browser,
affecting test results; it is best to ensure that these are removed
before starting any browser tests.
--- a/testing/mozbase/mozcrash/setup.py
+++ b/testing/mozbase/mozcrash/setup.py
@@ -8,19 +8,20 @@ PACKAGE_NAME = 'mozcrash'
PACKAGE_VERSION = '0.17'
# dependencies
deps = ['mozfile >= 1.0',
'mozlog >= 3.0']
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
- description="Library for printing stack traces from minidumps left behind by crashed processes",
+ description="Library for printing stack traces from minidumps "
+ "left behind by crashed processes",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozcrash'],
include_package_data=True,
zip_safe=False,
--- a/testing/mozbase/mozcrash/tests/test.py
+++ b/testing/mozbase/mozcrash/tests/test.py
@@ -1,41 +1,52 @@
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
-import os, unittest, subprocess, tempfile, shutil, urlparse, zipfile, StringIO
+import os
+import unittest
+import subprocess
+import tempfile
+import shutil
+import urlparse
+import zipfile
+import StringIO
import mozcrash
import mozhttpd
import mozlog.unstructured as mozlog
# Make logs go away
log = mozlog.getLogger("mozcrash", handler=mozlog.FileHandler(os.devnull))
+
def popen_factory(stdouts):
"""
Generate a class that can mock subprocess.Popen. |stdouts| is an iterable that
should return an iterable for the stdout of each process in turn.
"""
class mock_popen(object):
+
def __init__(self, args, *args_rest, **kwargs):
self.stdout = stdouts.next()
self.returncode = 0
def wait(self):
return 0
def communicate(self):
return (self.stdout.next(), "")
return mock_popen
+
class TestCrash(unittest.TestCase):
+
def setUp(self):
self.tempdir = tempfile.mkdtemp()
# a fake file to use as a stackwalk binary
self.stackwalk = os.path.join(self.tempdir, "stackwalk")
open(self.stackwalk, "w").write("fake binary")
self._subprocess_popen = subprocess.Popen
subprocess.Popen = popen_factory(self.next_mock_stdout())
self.stdouts = []
@@ -168,61 +179,72 @@ class TestCrash(unittest.TestCase):
self.stdouts.append(["this is some output"])
def make_zipfile():
data = StringIO.StringIO()
z = zipfile.ZipFile(data, 'w')
z.writestr("symbols.txt", "abc/xyz")
z.close()
return data.getvalue()
+
def get_symbols(req):
headers = {}
return (200, headers, make_zipfile())
httpd = mozhttpd.MozHttpd(port=0,
- urlhandlers=[{'method':'GET', 'path':'/symbols', 'function':get_symbols}])
+ urlhandlers=[{'method': 'GET',
+ 'path': '/symbols',
+ 'function': get_symbols}])
httpd.start()
symbol_url = urlparse.urlunsplit(('http', '%s:%d' % httpd.httpd.server_address,
- '/symbols','',''))
+ '/symbols', '', ''))
self.assert_(mozcrash.check_for_crashes(self.tempdir,
symbol_url,
stackwalk_binary=self.stackwalk,
quiet=True))
+
class TestJavaException(unittest.TestCase):
- def setUp(self):
- self.test_log = ["01-30 20:15:41.937 E/GeckoAppShell( 1703): >>> REPORTING UNCAUGHT EXCEPTION FROM THREAD 9 (\"GeckoBackgroundThread\")",
- "01-30 20:15:41.937 E/GeckoAppShell( 1703): java.lang.NullPointerException",
- "01-30 20:15:41.937 E/GeckoAppShell( 1703): at org.mozilla.gecko.GeckoApp$21.run(GeckoApp.java:1833)",
- "01-30 20:15:41.937 E/GeckoAppShell( 1703): at android.os.Handler.handleCallback(Handler.java:587)"]
- def test_uncaught_exception(self):
- """
- Test for an exception which should be caught
- """
- self.assert_(mozcrash.check_for_java_exception(self.test_log, quiet=True))
+ def setUp(self):
+ self.test_log = [
+ "01-30 20:15:41.937 E/GeckoAppShell( 1703): >>> "
+ "REPORTING UNCAUGHT EXCEPTION FROM THREAD 9 (\"GeckoBackgroundThread\")",
+ "01-30 20:15:41.937 E/GeckoAppShell( 1703): java.lang.NullPointerException",
+ "01-30 20:15:41.937 E/GeckoAppShell( 1703):"
+ " at org.mozilla.gecko.GeckoApp$21.run(GeckoApp.java:1833)",
+ "01-30 20:15:41.937 E/GeckoAppShell( 1703):"
+ " at android.os.Handler.handleCallback(Handler.java:587)"]
+
+ def test_uncaught_exception(self):
+ """
+ Test for an exception which should be caught
+ """
+ self.assert_(mozcrash.check_for_java_exception(self.test_log, quiet=True))
- def test_fatal_exception(self):
- """
- Test for an exception which should be caught
- """
- fatal_log = list(self.test_log)
- fatal_log[0] = "01-30 20:15:41.937 E/GeckoAppShell( 1703): >>> FATAL EXCEPTION FROM THREAD 9 (\"GeckoBackgroundThread\")"
- self.assert_(mozcrash.check_for_java_exception(fatal_log, quiet=True))
+ def test_fatal_exception(self):
+ """
+ Test for an exception which should be caught
+ """
+ fatal_log = list(self.test_log)
+ fatal_log[0] = "01-30 20:15:41.937 E/GeckoAppShell( 1703):" \
+ " >>> FATAL EXCEPTION FROM THREAD 9 (\"GeckoBackgroundThread\")"
+ self.assert_(mozcrash.check_for_java_exception(fatal_log, quiet=True))
- def test_truncated_exception(self):
- """
- Test for an exception which should be caught which
- was truncated
- """
- truncated_log = list(self.test_log)
- truncated_log[0], truncated_log[1] = truncated_log[1], truncated_log[0]
- self.assert_(mozcrash.check_for_java_exception(truncated_log, quiet=True))
+ def test_truncated_exception(self):
+ """
+ Test for an exception which should be caught which
+ was truncated
+ """
+ truncated_log = list(self.test_log)
+ truncated_log[0], truncated_log[1] = truncated_log[1], truncated_log[0]
+ self.assert_(mozcrash.check_for_java_exception(truncated_log, quiet=True))
- def test_unchecked_exception(self):
- """
- Test for an exception which should not be caught
- """
- passable_log = list(self.test_log)
- passable_log[0] = "01-30 20:15:41.937 E/GeckoAppShell( 1703): >>> NOT-SO-BAD EXCEPTION FROM THREAD 9 (\"GeckoBackgroundThread\")"
- self.assert_(not mozcrash.check_for_java_exception(passable_log, quiet=True))
+ def test_unchecked_exception(self):
+ """
+ Test for an exception which should not be caught
+ """
+ passable_log = list(self.test_log)
+ passable_log[0] = "01-30 20:15:41.937 E/GeckoAppShell( 1703):" \
+ " >>> NOT-SO-BAD EXCEPTION FROM THREAD 9 (\"GeckoBackgroundThread\")"
+ self.assert_(not mozcrash.check_for_java_exception(passable_log, quiet=True))
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozdebug/mozdebug/__init__.py
+++ b/testing/mozbase/mozdebug/mozdebug/__init__.py
@@ -1,8 +1,9 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
This module contains a set of function to gather information about the
debugging capabilities of the platform. It allows to look for a specific
debugger or to query the system for a compatible/default debugger.
--- a/testing/mozbase/mozdebug/mozdebug/mozdebug.py
+++ b/testing/mozbase/mozdebug/mozdebug/mozdebug.py
@@ -56,36 +56,38 @@ To add support for a new debugger, simpl
# Windows Development Kit super-debugger.
'windbg.exe': {
'interactive': True,
},
}
# Maps each OS platform to the preferred debugger programs found in _DEBUGGER_INFO.
_DEBUGGER_PRIORITIES = {
- 'win': ['devenv.exe', 'wdexpress.exe'],
- 'linux': ['gdb', 'cgdb', 'lldb'],
- 'mac': ['lldb', 'gdb'],
- 'android': ['gdb'],
- 'unknown': ['gdb']
+ 'win': ['devenv.exe', 'wdexpress.exe'],
+ 'linux': ['gdb', 'cgdb', 'lldb'],
+ 'mac': ['lldb', 'gdb'],
+ 'android': ['gdb'],
+ 'unknown': ['gdb']
}
+
def _windbg_installation_paths():
programFilesSuffixes = ['', ' (x86)']
programFiles = "C:/Program Files"
# Try the most recent versions first.
windowsKitsVersions = ['10', '8.1', '8']
for suffix in programFilesSuffixes:
windowsKitsPrefix = os.path.join(programFiles + suffix,
'Windows Kits')
for version in windowsKitsVersions:
yield os.path.join(windowsKitsPrefix, version,
'Debuggers', 'x86', 'windbg.exe')
+
def get_debugger_path(debugger):
'''
Get the full path of the debugger.
:param debugger: The name of the debugger.
'''
if mozinfo.os == 'mac' and debugger == 'lldb':
@@ -100,17 +102,18 @@ def get_debugger_path(debugger):
if path:
return path
except:
# Just default to find_executable instead.
pass
return find_executable(debugger)
-def get_debugger_info(debugger, debuggerArgs = None, debuggerInteractive = False):
+
+def get_debugger_info(debugger, debuggerArgs=None, debuggerInteractive=False):
'''
Get the information about the requested debugger.
Returns a dictionary containing the |path| of the debugger executable,
if it will run in |interactive| mode, its arguments and whether it needs
to escape arguments it passes to the debugged program (|requiresEscapedArgs|).
If the debugger cannot be found in the system, returns |None|.
@@ -122,17 +125,17 @@ def get_debugger_info(debugger, debugger
'''
debuggerPath = None
if debugger:
# Append '.exe' to the debugger on Windows if it's not present,
# so things like '--debugger=devenv' work.
if (os.name == 'nt'
- and not debugger.lower().endswith('.exe')):
+ and not debugger.lower().endswith('.exe')):
debugger += '.exe'
debuggerPath = get_debugger_path(debugger)
if not debuggerPath:
# windbg is not installed with the standard set of tools, and it's
# entirely possible that the user hasn't added the install location to
# PATH, so we have to be a little more clever than normal to locate it.
@@ -181,19 +184,22 @@ def get_debugger_info(debugger, debugger
debugger_interactive,
debugger_arguments,
get_debugger_info('requiresEscapedArgs', False)
)
return d
# Defines the search policies to use in get_default_debugger_name.
+
+
class DebuggerSearch:
- OnlyFirst = 1
- KeepLooking = 2
+ OnlyFirst = 1
+ KeepLooking = 2
+
def get_default_debugger_name(search=DebuggerSearch.OnlyFirst):
'''
Get the debugger name for the default debugger on current platform.
:param search: If specified, stops looking for the debugger if the
default one is not found (|DebuggerSearch.OnlyFirst|) or keeps
looking for other compatible debuggers (|DebuggerSearch.KeepLooking|).
@@ -254,28 +260,32 @@ def get_default_debugger_name(search=Deb
# TODO: pass in the path to the Valgrind to be used (--valgrind=), and
# check what flags it accepts. Possible args that might be beneficial:
#
# --num-transtab-sectors=24 [reduces re-jitting overheads in long runs]
# --px-default=allregs-at-mem-access
# --px-file-backed=unwindregs-at-mem-access
# [these reduce PX overheads as described above]
#
+
+
def get_default_valgrind_args():
return (['--fair-sched=yes',
'--smc-check=all-non-file',
'--vex-iropt-register-updates=allregs-at-mem-access',
'--trace-children=yes',
'--child-silent-after-fork=yes',
('--trace-children-skip='
+ '/usr/bin/hg,/bin/rm,*/bin/certutil,*/bin/pk12util,'
+ '*/bin/ssltunnel,*/bin/uname,*/bin/which,*/bin/ps,'
+ '*/bin/grep,*/bin/java'),
- ]
+ ]
+ get_default_valgrind_tool_specific_args())
# The default tool is Memcheck. Feeding these arguments to a different
# Valgrind tool will cause it to fail at startup, so don't do that!
+
+
def get_default_valgrind_tool_specific_args():
return ['--partial-loads-ok=yes',
'--leak-check=full',
'--show-possibly-lost=no',
- ]
+ ]
--- a/testing/mozbase/mozdebug/setup.py
+++ b/testing/mozbase/mozdebug/setup.py
@@ -3,25 +3,25 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import setup
PACKAGE_VERSION = '0.1'
setup(name='mozdebug',
version=PACKAGE_VERSION,
- description="Utilities for running applications under native code debuggers intended for use in Mozilla testing",
+ description="Utilities for running applications under native code debuggers "
+ "intended for use in Mozilla testing",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozdebug'],
include_package_data=True,
zip_safe=False,
install_requires=['mozinfo'],
entry_points="""
# -*- Entry points: -*-
""",
)
-
--- a/testing/mozbase/mozdevice/adb_tests/test_device_running_adb_as_root.py
+++ b/testing/mozbase/mozdevice/adb_tests/test_device_running_adb_as_root.py
@@ -5,17 +5,19 @@
Running this test case requires various reboots which makes it a
very slow test case to run.
"""
import unittest
import sys
from mozdevice import DeviceManagerADB
+
class TestFileOperations(unittest.TestCase):
+
def setUp(self):
dm = DeviceManagerADB()
dm.reboot(wait=True)
def test_run_adb_as_root_parameter(self):
dm = DeviceManagerADB()
self.assertTrue(dm.processInfo("adbd")[2] != "root")
dm = DeviceManagerADB(runAdbAsRoot=True)
@@ -29,18 +31,18 @@ class TestFileOperations(unittest.TestCa
def tearDown(self):
dm = DeviceManagerADB()
dm.reboot()
if __name__ == "__main__":
dm = DeviceManagerADB()
if not dm.devices():
- print "There are no connected adb devices"
- sys.exit(1)
+ print "There are no connected adb devices"
+ sys.exit(1)
else:
- if not (int(dm._runCmd(["shell", "getprop", "ro.secure"]).output[0]) and \
+ if not (int(dm._runCmd(["shell", "getprop", "ro.secure"]).output[0]) and
int(dm._runCmd(["shell", "getprop", "ro.debuggable"]).output[0])):
print "This test case is meant for devices with devices that start " \
- "adbd as non-root and allows for adbd to be restarted as root."
+ "adbd as non-root and allows for adbd to be restarted as root."
sys.exit(1)
unittest.main()
--- a/testing/mozbase/mozdevice/adb_tests/test_devicemanagerADB.py
+++ b/testing/mozbase/mozdevice/adb_tests/test_devicemanagerADB.py
@@ -39,21 +39,23 @@ import re
import socket
import sys
import tempfile
import unittest
from StringIO import StringIO
from mozdevice import DeviceManagerADB, DMError
+
def find_mount_permissions(dm, mount_path):
for mount_point in dm._runCmd(["shell", "mount"]).output:
if mount_point.find(mount_path) > 0:
return re.search('(ro|rw)(?=,)', mount_point).group(0)
+
class DeviceManagerADBTestCase(unittest.TestCase):
tempLocalDir = "tempDir"
tempLocalFile = os.path.join(tempLocalDir, "tempfile.txt")
tempRemoteDir = None
tempRemoteFile = None
tempRemoteSystemFile = None
def setUp(self):
@@ -76,32 +78,33 @@ class DeviceManagerADBTestCase(unittest.
self.dm = DeviceManagerADB()
if not os.path.exists(self.tempLocalDir):
os.mkdir(self.tempLocalDir)
if not os.path.exists(self.tempLocalFile):
# Create empty file
open(self.tempLocalFile, 'w').close()
self.tempRemoteDir = self.dm.getTempDir()
self.tempRemoteFile = os.path.join(self.tempRemoteDir,
- os.path.basename(self.tempLocalFile))
+ os.path.basename(self.tempLocalFile))
self.tempRemoteSystemFile = \
os.path.join("/system", os.path.basename(self.tempLocalFile))
@classmethod
def tearDownClass(self):
os.remove(self.tempLocalFile)
os.rmdir(self.tempLocalDir)
if self.dm.dirExists(self.tempRemoteDir):
# self.tempRemoteFile will get deleted with it
self.dm.removeDir(self.tempRemoteDir)
if self.dm.fileExists(self.tempRemoteSystemFile):
self.dm.removeFile(self.tempRemoteSystemFile)
class TestFileOperations(DeviceManagerADBTestCase):
+
def test_make_and_remove_directory(self):
dir1 = os.path.join(self.tempRemoteDir, "dir1")
self.assertFalse(self.dm.dirExists(dir1))
self.dm.mkDir(dir1)
self.assertTrue(self.dm.dirExists(dir1))
self.dm.removeDir(dir1)
self.assertFalse(self.dm.dirExists(dir1))
@@ -163,32 +166,33 @@ class TestFileOperations(DeviceManagerAD
self.dm.removeFile(self.tempRemoteSystemFile)
self.assertFalse(self.dm.fileExists(self.tempRemoteSystemFile))
self.dm.shell(['mount', '-r', '-o', 'remount', '/system'], out)
out.close()
self.assertTrue(find_mount_permissions(self.dm, "/system") == "ro")
class TestOther(DeviceManagerADBTestCase):
+
def test_get_list_of_processes(self):
self.assertEquals(type(self.dm.getProcessList()), list)
def test_get_current_time(self):
self.assertEquals(type(self.dm.getCurrentTime()), int)
def test_get_info(self):
self.assertEquals(type(self.dm.getInfo()), dict)
def test_list_devices(self):
self.assertEquals(len(list(self.dm.devices())), 1)
def test_shell(self):
out = StringIO()
self.dm.shell(["echo", "$COMPANY", ";", "pwd"], out,
- env={"COMPANY":"Mozilla"}, cwd="/", timeout=4, root=True)
+ env={"COMPANY": "Mozilla"}, cwd="/", timeout=4, root=True)
output = str(out.getvalue()).rstrip().splitlines()
out.close()
self.assertEquals(output, ['Mozilla', '/'])
def test_port_forwarding(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
@@ -198,18 +202,18 @@ class TestOther(DeviceManagerADBTestCase
def test_port_forwarding_error(self):
self.assertRaises(DMError, self.dm.forward, "", "")
if __name__ == '__main__':
dm = DeviceManagerADB()
if not dm.devices():
- print "There are no connected adb devices"
- sys.exit(1)
+ print "There are no connected adb devices"
+ sys.exit(1)
if find_mount_permissions(dm, "/system") == "rw":
print "We've found out that /system is mounted as 'rw'. This is because the command " \
- "'adb remount' has been run before running this test case. Please reboot the device " \
- "and try again."
+ "'adb remount' has been run before running this test case. Please reboot the device " \
+ "and try again."
sys.exit(1)
unittest.main()
--- a/testing/mozbase/mozdevice/mozdevice/__init__.py
+++ b/testing/mozbase/mozdevice/mozdevice/__init__.py
@@ -4,8 +4,12 @@
from adb import ADBError, ADBRootError, ADBTimeoutError, ADBProcess, ADBCommand, ADBHost, ADBDevice
from adb_android import ADBAndroid
from adb_b2g import ADBB2G
from devicemanager import DeviceManager, DMError, ZeroconfListener
from devicemanagerADB import DeviceManagerADB
from devicemanagerSUT import DeviceManagerSUT
from droid import DroidADB, DroidSUT, DroidConnectByHWID
+
+__all__ = ['ADBError', 'ADBRootError', 'ADBTimeoutError', 'ADBProcess', 'ADBCommand', 'ADBHost',
+ 'ADBDevice', 'ADBAndroid', 'ADBB2G', 'DeviceManager', 'DMError', 'ZeroconfListener',
+ 'DeviceManagerADB', 'DeviceManagerSUT', 'DroidADB', 'DroidSUT', 'DroidConnectByHWID']
--- a/testing/mozbase/mozdevice/mozdevice/adb.py
+++ b/testing/mozbase/mozdevice/mozdevice/adb.py
@@ -10,16 +10,17 @@ import tempfile
import time
import traceback
from abc import ABCMeta, abstractmethod
class ADBProcess(object):
"""ADBProcess encapsulates the data related to executing the adb process."""
+
def __init__(self, args):
#: command argument argument list.
self.args = args
#: Temporary file handle to be used for stdout.
self.stdout_file = tempfile.TemporaryFile()
#: Temporary file handle to be used for stderr.
self.stderr_file = tempfile.TemporaryFile()
#: boolean indicating if the command timed out.
@@ -54,42 +55,47 @@ class ADBProcess(object):
def __str__(self):
return ('args: %s, exitcode: %s, stdout: %s, stderr: %s' % (
' '.join(self.args), self.exitcode, self.stdout, self.stderr))
# ADBError, ADBRootError, and ADBTimeoutError are treated
# differently in order that unhandled ADBRootErrors and
# ADBTimeoutErrors can be handled distinctly from ADBErrors.
+
class ADBError(Exception):
"""ADBError is raised in situations where a command executed on a
device either exited with a non-zero exitcode or when an
unexpected error condition has occurred. Generally, ADBErrors can
be handled and the device can continue to be used.
"""
pass
+
class ADBListDevicesError(ADBError):
"""ADBListDevicesError is raised when errors are found listing the
devices, typically not any permissions.
The devices information is stocked with the *devices* member.
"""
+
def __init__(self, msg, devices):
ADBError.__init__(self, msg)
self.devices = devices
+
class ADBRootError(Exception):
"""ADBRootError is raised when a shell command is to be executed as
root but the device does not support it. This error is fatal since
there is no recovery possible by the script. You must either root
your device or change your scripts to not require running as root.
"""
pass
+
class ADBTimeoutError(Exception):
"""ADBTimeoutError is raised when either a host command or shell
command takes longer than the specified timeout to execute. The
timeout value is set in the ADBCommand constructor and is 300 seconds by
default. This error is typically fatal since the host is having
problems communicating with the device. You may be able to recover
by rebooting, but this is not guaranteed.
@@ -229,20 +235,20 @@ class ADBCommand(object):
adb_process = ADBProcess(args)
if timeout is None:
timeout = self._timeout
start_time = time.time()
adb_process.exitcode = adb_process.proc.poll()
while ((time.time() - start_time) <= timeout and
- adb_process.exitcode == None):
+ adb_process.exitcode is None):
time.sleep(self._polling_interval)
adb_process.exitcode = adb_process.proc.poll()
- if adb_process.exitcode == None:
+ if adb_process.exitcode is None:
adb_process.proc.kill()
adb_process.timedout = True
adb_process.exitcode = adb_process.proc.poll()
adb_process.stdout_file.seek(0, os.SEEK_SET)
adb_process.stderr_file.seek(0, os.SEEK_SET)
return adb_process
@@ -305,16 +311,17 @@ class ADBHost(ADBCommand):
::
from mozdevice import ADBHost
adbhost = ADBHost()
adbhost.start_server()
"""
+
def __init__(self,
adb='adb',
adb_host=None,
adb_port=None,
logger_name='adb',
timeout=300,
verbose=False):
"""Initializes the ADBHost object.
@@ -453,17 +460,19 @@ class ADBHost(ADBCommand):
is parsed and placed into an object as in
[{'device_serial': 'b313b945', 'state': 'device', 'product': 'd2vzw',
'usb': '1-7', 'device': 'd2vzw', 'model': 'SCH_I535' }]
"""
# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw
# from Android system/core/adb/transport.c statename()
- re_device_info = re.compile(r'([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)')
+ re_device_info = re.compile(
+ r"([^\s]+)\s+(offline|bootloader|device|host|recovery|sideload|"
+ "no permissions|unauthorized|unknown)")
devices = []
lines = self.command_output(["devices", "-l"], timeout=timeout).split('\n')
for line in lines:
if line == 'List of devices attached ':
continue
match = re_device_info.match(line)
if match:
device = {
@@ -660,17 +669,16 @@ class ADBDevice(ADBCommand):
self.command_output(
["root"],
timeout=timeout).find("cannot run as root") == -1):
self._have_root_shell = True
self._logger.info("adbd restarted as root")
except ADBError:
self._logger.debug("Check for root adbd failed")
-
@staticmethod
def _escape_command_line(cmd):
"""Utility function to return escaped and quoted version of command
line.
"""
quoted_cmd = []
for arg in cmd:
@@ -1034,20 +1042,20 @@ class ADBDevice(ADBCommand):
args.extend(["wait-for-device", "shell", cmd])
adb_process = ADBProcess(args)
if timeout is None:
timeout = self._timeout
start_time = time.time()
exitcode = adb_process.proc.poll()
- while ((time.time() - start_time) <= timeout) and exitcode == None:
+ while ((time.time() - start_time) <= timeout) and exitcode is None:
time.sleep(self._polling_interval)
exitcode = adb_process.proc.poll()
- if exitcode == None:
+ if exitcode is None:
adb_process.proc.kill()
adb_process.timedout = True
adb_process.exitcode = adb_process.proc.poll()
elif exitcode == 0:
adb_process.exitcode = self._get_exitcode(adb_process.stdout_file)
else:
adb_process.exitcode = exitcode
@@ -1125,22 +1133,22 @@ class ADBDevice(ADBCommand):
output = adb_process.stdout_file.read().rstrip()
if self._verbose:
self._logger.debug('shell_output: %s, '
'timeout: %s, '
'root: %s, '
'timedout: %s, '
'exitcode: %s, '
'output: %s' %
- (' '.join(adb_process.args),
- timeout,
+ (' '.join(adb_process.args),
+ timeout,
root,
- adb_process.timedout,
- adb_process.exitcode,
- output))
+ adb_process.timedout,
+ adb_process.exitcode,
+ output))
return output
finally:
if adb_process and isinstance(adb_process.stdout_file, file):
adb_process.stdout_file.close()
adb_process.stderr_file.close()
# Informational methods
@@ -1693,17 +1701,17 @@ class ADBDevice(ADBCommand):
raise
pid_set = set(pid_list)
current_pid_set = set([str(proc[0]) for proc in
self.get_process_list(timeout=timeout)])
pid_list = list(pid_set.intersection(current_pid_set))
if not pid_list:
break
self._logger.debug("Attempt %d of %d to kill processes %s failed" %
- (attempt+1, attempts, pid_list))
+ (attempt + 1, attempts, pid_list))
time.sleep(wait)
if pid_list:
raise ADBError('kill: processes %s not killed' % pid_list)
def pkill(self, appname, sig=None, attempts=3, wait=5,
timeout=None, root=False):
"""Kills a processes on the device matching a name.
--- a/testing/mozbase/mozdevice/mozdevice/adb_android.py
+++ b/testing/mozbase/mozdevice/mozdevice/adb_android.py
@@ -148,17 +148,17 @@ class ADBAndroid(ADBDevice):
if match:
parameter = match.group(1)
value = match.group(2)
if parameter == 'level':
level = float(value)
elif parameter == 'scale':
scale = float(value)
if parameter is not None and scale is not None:
- percentage = 100.0*level/scale
+ percentage = 100.0 * level / scale
break
return percentage
# System control methods
def is_device_ready(self, timeout=None):
"""Checks if a device is ready for testing.
@@ -214,17 +214,17 @@ class ADBAndroid(ADBDevice):
success = False
break
except ADBError as e:
success = False
failure = e.message
if not success:
self._logger.debug('Attempt %s of %s device not ready: %s' % (
- attempt+1, self._device_ready_retry_attempts,
+ attempt + 1, self._device_ready_retry_attempts,
failure))
time.sleep(self._device_ready_retry_wait)
return success
def power_on(self, timeout=None):
"""Sets the device's power stayon value.
@@ -288,18 +288,18 @@ class ADBAndroid(ADBDevice):
data = self.shell_output("pm list package %s" % app_name, timeout=timeout)
if pm_error_string in data:
raise ADBError(pm_error_string)
if app_name not in data:
return False
return True
def launch_application(self, app_name, activity_name, intent, url=None,
- extras=None, wait=True, fail_if_running=True,
- timeout=None):
+ extras=None, wait=True, fail_if_running=True,
+ timeout=None):
"""Launches an Android application
:param str app_name: Name of application (e.g. `com.android.chrome`)
:param str activity_name: Name of activity to launch (e.g. `.Main`)
:param str intent: Intent to launch application with
:param url: URL to open
:type url: str or None
:param extras: Extra arguments for application.
@@ -321,17 +321,17 @@ class ADBAndroid(ADBDevice):
# If fail_if_running is True, we throw an exception here. Only one
# instance of an application can be running at once on Android,
# starting a new instance may not be what we want depending on what
# we want to do
if fail_if_running and self.process_exist(app_name, timeout=timeout):
raise ADBError("Only one instance of an application may be running "
"at once")
- acmd = [ "am", "start" ] + \
+ acmd = ["am", "start"] + \
["-W" if wait else '', "-n", "%s/%s" % (app_name, activity_name)]
if intent:
acmd.extend(["-a", intent])
if extras:
for (key, val) in extras.iteritems():
if type(val) is int:
@@ -344,18 +344,18 @@ class ADBAndroid(ADBDevice):
if url:
acmd.extend(["-d", url])
cmd = self._escape_command_line(acmd)
self.shell_output(cmd, timeout=timeout)
def launch_fennec(self, app_name, intent="android.intent.action.VIEW",
- moz_env=None, extra_args=None, url=None, wait=True,
- fail_if_running=True, timeout=None):
+ moz_env=None, extra_args=None, url=None, wait=True,
+ fail_if_running=True, timeout=None):
"""Convenience method to launch Fennec on Android with various
debugging arguments
:param str app_name: Name of fennec application (e.g.
`org.mozilla.fennec`)
:param str intent: Intent to launch application.
:param moz_env: Mozilla specific environment to pass into
application.
@@ -386,19 +386,20 @@ class ADBAndroid(ADBDevice):
for (env_count, (env_key, env_val)) in enumerate(moz_env.iteritems()):
extras["env" + str(env_count)] = env_key + "=" + env_val
# Additional command line arguments that fennec will read and use (e.g.
# with a custom profile)
if extra_args:
extras['args'] = " ".join(extra_args)
- self.launch_application(app_name, "org.mozilla.gecko.BrowserApp", intent, url=url, extras=extras,
- wait=wait, fail_if_running=fail_if_running,
- timeout=timeout)
+ self.launch_application(app_name, "org.mozilla.gecko.BrowserApp", intent, url=url,
+ extras=extras,
+ wait=wait, fail_if_running=fail_if_running,
+ timeout=timeout)
def stop_application(self, app_name, timeout=None, root=False):
"""Stops the specified application
For Android 3.0+, we use the "am force-stop" to do this, which
is reliable and does not require root. For earlier versions of
Android, we simply try to manually kill the processes started
by the app repeatedly until none is around any more. This is
@@ -423,17 +424,17 @@ class ADBAndroid(ADBDevice):
self.shell_output("am force-stop %s" % app_name,
timeout=timeout, root=root)
else:
num_tries = 0
max_tries = 5
while self.process_exist(app_name, timeout=timeout):
if num_tries > max_tries:
raise ADBError("Couldn't successfully kill %s after %s "
- "tries" % (app_name, max_tries))
+ "tries" % (app_name, max_tries))
self.pkill(app_name, timeout=timeout, root=root)
num_tries += 1
# sleep for a short duration to make sure there are no
# additional processes in the process of being launched
# (this is not 100% guaranteed to work since it is inherently
# racey, but it's the best we can do)
time.sleep(1)
--- a/testing/mozbase/mozdevice/mozdevice/devicemanager.py
+++ b/testing/mozbase/mozdevice/mozdevice/devicemanager.py
@@ -9,36 +9,40 @@ import os
import posixpath
import re
import struct
import StringIO
import zlib
from functools import wraps
+
class DMError(Exception):
"generic devicemanager exception."
- def __init__(self, msg= '', fatal = False):
+ def __init__(self, msg='', fatal=False):
self.msg = msg
self.fatal = fatal
def __str__(self):
return self.msg
+
def abstractmethod(method):
line = method.func_code.co_firstlineno
filename = method.func_code.co_filename
+
@wraps(method)
def not_implemented(*args, **kwargs):
raise NotImplementedError('Abstract method %s at File "%s", line %s '
- 'should be implemented by a concrete class' %
- (repr(method), filename, line))
+ 'should be implemented by a concrete class' %
+ (repr(method), filename, line))
return not_implemented
+
class DeviceManager(object):
"""
Represents a connection to a device. Once an implementation of this class
is successfully instantiated, you may do things like list/copy files to
the device, launch processes on the device, and install or remove
applications from the device.
Never instantiate this class directly! Instead, instantiate an
@@ -47,17 +51,17 @@ class DeviceManager(object):
_logcatNeedsRoot = True
default_timeout = 300
short_timeout = 30
def __init__(self, logLevel=None, deviceRoot=None):
try:
self._logger = mozlog.get_default_logger(component="mozdevice")
- if not self._logger: # no global structured logger, fall back to reg logging
+ if not self._logger: # no global structured logger, fall back to reg logging
self._logger = mozlog.unstructured.getLogger("mozdevice")
if logLevel is not None:
self._logger.setLevel(logLevel)
except AttributeError:
# Structured logging doesn't work on Python 2.6
self._logger = None
self._logLevel = logLevel
self._remoteIsWin = None
@@ -91,32 +95,33 @@ class DeviceManager(object):
self._logger.warning("dm.debug is deprecated. Use logLevel.")
levels = {logging.DEBUG: 5, logging.INFO: 3, logging.WARNING: 2,
logging.ERROR: 1, logging.CRITICAL: 0}
return levels[self.logLevel]
@debug.setter
def debug_setter(self, newDebug):
self._logger.warning("dm.debug is deprecated. Use logLevel.")
- newDebug = 5 if newDebug > 5 else newDebug # truncate >=5 to 5
+ newDebug = 5 if newDebug > 5 else newDebug # truncate >=5 to 5
levels = {5: logging.DEBUG, 3: logging.INFO, 2: logging.WARNING,
1: logging.ERROR, 0: logging.CRITICAL}
self.logLevel = levels[newDebug]
@abstractmethod
def getInfo(self, directive=None):
"""
Returns a dictionary of information strings about the device.
:param directive: information you want to get. Options are:
- `os` - name of the os
- `id` - unique id of the device
- `uptime` - uptime of the device
- - `uptimemillis` - uptime of the device in milliseconds (NOT supported on all implementations)
+ - `uptimemillis` - uptime of the device in milliseconds
+ (NOT supported on all implementations)
- `systime` - system time of the device
- `screen` - screen resolution
- `memory` - memory stats
- `memtotal` - total memory available on the device, for example 927208 kB
- `process` - list of running processes (same as ps)
- `disk` - total, free, available bytes on disk
- `power` - power status (charge, battery temp)
- `temperature` - device temperature
@@ -132,43 +137,45 @@ class DeviceManager(object):
def getIP(self, interfaces=['eth0', 'wlan0']):
"""
Returns the IP of the device, or None if no connection exists.
"""
for interface in interfaces:
match = re.match(r"%s: ip (\S+)" % interface,
self.shellCheckOutput(['ifconfig', interface],
- timeout=self.short_timeout))
+ timeout=self.short_timeout))
if match:
return match.group(1)
def recordLogcat(self):
"""
Clears the logcat file making it easier to view specific events.
"""
- #TODO: spawn this off in a separate thread/process so we can collect all the logcat information
+ # TODO: spawn this off in a separate thread/process so we can collect all
+ # the logcat information
- # Right now this is just clearing the logcat so we can only see what happens after this call.
+ # Right now this is just clearing the logcat so we can only see what
+ # happens after this call.
self.shellCheckOutput(['/system/bin/logcat', '-c'], root=self._logcatNeedsRoot,
timeout=self.short_timeout)
def getLogcat(self, filterSpecs=["dalvikvm:I", "ConnectivityService:S",
- "WifiMonitor:S", "WifiStateTracker:S",
- "wpa_supplicant:S", "NetworkStateTracker:S"],
+ "WifiMonitor:S", "WifiStateTracker:S",
+ "wpa_supplicant:S", "NetworkStateTracker:S"],
format="time",
filterOutRegexps=[]):
"""
Returns the contents of the logcat file as a list of
'\n' terminated strings
"""
cmdline = ["/system/bin/logcat", "-v", format, "-d"] + filterSpecs
output = self.shellCheckOutput(cmdline,
- root=self._logcatNeedsRoot,
- timeout=self.short_timeout)
+ root=self._logcatNeedsRoot,
+ timeout=self.short_timeout)
lines = output.replace('\r\n', '\n').splitlines(True)
for regex in filterOutRegexps:
lines = [line for line in lines if not re.search(regex, line)]
return lines
def saveScreenshot(self, filename):
@@ -247,18 +254,18 @@ class DeviceManager(object):
for root, dirs, files in os.walk(localDirname):
parts = root.split(localDirname)
for f in files:
remoteRoot = remoteDirname + '/' + parts[1]
remoteRoot = remoteRoot.replace('/', '/')
if (parts[1] == ""):
remoteRoot = remoteDirname
remoteName = remoteRoot + '/' + f
- if (self.validateFile(remoteName, os.path.join(root, f)) <> True):
- return False
+ if (self.validateFile(remoteName, os.path.join(root, f)) is not True):
+ return False
return True
@abstractmethod
def mkDir(self, remoteDirname):
"""
Creates a single directory on the device file system.
"""
@@ -272,17 +279,17 @@ class DeviceManager(object):
filename = posixpath.normpath(filename)
containing = posixpath.dirname(filename)
if not self.dirExists(containing):
parts = filename.split('/')
name = "/" if not self.remoteIsWin else parts.pop(0)
for part in parts[:-1]:
if part != "":
name = posixpath.join(name, part)
- self.mkDir(name) # mkDir will check previous existence
+ self.mkDir(name) # mkDir will check previous existence
@abstractmethod
def dirExists(self, dirpath):
"""
Returns whether dirpath exists and is a directory on the device file system.
"""
@abstractmethod
@@ -309,31 +316,31 @@ class DeviceManager(object):
@abstractmethod
def removeDir(self, remoteDirname):
"""
Does a recursive delete of directory on the device: rm -Rf remoteDirname.
"""
@abstractmethod
def moveTree(self, source, destination):
- """
- Does a move of the file or directory on the device.
+ """
+ Does a move of the file or directory on the device.
- :param source: Path to the original file or directory
- :param destination: Path to the destination file or directory
- """
+ :param source: Path to the original file or directory
+ :param destination: Path to the destination file or directory
+ """
@abstractmethod
def copyTree(self, source, destination):
- """
- Does a copy of the file or directory on the device.
+ """
+ Does a copy of the file or directory on the device.
- :param source: Path to the original file or directory
- :param destination: Path to the destination file or directory
- """
+ :param source: Path to the original file or directory
+ :param destination: Path to the destination file or directory
+ """
@abstractmethod
def chmodDir(self, remoteDirname, mask="777"):
"""
Recursively changes file permissions in a directory.
"""
@property
@@ -397,17 +404,19 @@ class DeviceManager(object):
:param root: Specifies whether command requires root privileges
:raises: DMError
"""
buf = StringIO.StringIO()
retval = self.shell(cmd, buf, env=env, cwd=cwd, timeout=timeout, root=root)
output = str(buf.getvalue()[0:-1]).rstrip()
buf.close()
if retval != 0:
- raise DMError("Non-zero return code for command: %s (output: '%s', retval: '%s')" % (cmd, output, retval))
+ raise DMError(
+ "Non-zero return code for command: %s "
+ "(output: '%s', retval: '%s')" % (cmd, output, retval))
return output
@abstractmethod
def getProcessList(self):
"""
Returns array of tuples representing running processes on the device.
Format of tuples is (processId, processName, userId)
@@ -419,22 +428,22 @@ class DeviceManager(object):
Information on process is in tuple format: (pid, process path, user)
If a process with the specified name does not exist this function will return None.
"""
if not isinstance(processName, basestring):
raise TypeError("Process name %s is not a string" % processName)
processInfo = None
- #filter out extra spaces
+ # filter out extra spaces
parts = filter(lambda x: x != '', processName.split(' '))
processName = ' '.join(parts)
- #filter out the quoted env string if it exists
- #ex: '"name=value;name2=value2;etc=..." process args' -> 'process args'
+ # filter out the quoted env string if it exists
+ # ex: '"name=value;name2=value2;etc=..." process args' -> 'process args'
parts = processName.split('"')
if (len(parts) > 2):
processName = ' '.join(parts[2:]).strip()
pieces = processName.split(' ')
parts = pieces[0].split('/')
app = parts[-1]
@@ -480,17 +489,18 @@ class DeviceManager(object):
"""
@abstractmethod
def installApp(self, appBundlePath, destPath=None):
"""
Installs an application onto the device.
:param appBundlePath: path to the application bundle on the device
- :param destPath: destination directory of where application should be installed to (optional)
+ :param destPath: destination directory of where application should be
+ installed to (optional)
"""
@abstractmethod
def uninstallApp(self, appName, installPath=None):
"""
Uninstalls the named application from device and DOES NOT cause a reboot.
:param appName: the name of the application (e.g org.mozilla.fennec)
@@ -526,39 +536,43 @@ class DeviceManager(object):
@staticmethod
def _writePNG(buf, width, height):
"""
Method for writing a PNG from a buffer, used by getScreenshot on older devices,
"""
# Based on: http://code.activestate.com/recipes/577443-write-a-png-image-in-native-python/
width_byte_4 = width * 4
- raw_data = b"".join(b'\x00' + buf[span:span + width_byte_4] for span in range(0, (height - 1) * width * 4, width_byte_4))
+ raw_data = b"".join(b'\x00' + buf[span:span + width_byte_4]
+ for span in range(0, (height - 1) * width * 4, width_byte_4))
+
def png_pack(png_tag, data):
chunk_head = png_tag + data
- return struct.pack("!I", len(data)) + chunk_head + struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head))
+ return struct.pack("!I", len(data)) \
+ + chunk_head \
+ + struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head))
return b"".join([
- b'\x89PNG\r\n\x1a\n',
- png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
- png_pack(b'IDAT', zlib.compress(raw_data, 9)),
- png_pack(b'IEND', b'')])
+ b'\x89PNG\r\n\x1a\n',
+ png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
+ png_pack(b'IDAT', zlib.compress(raw_data, 9)),
+ png_pack(b'IEND', b'')])
@abstractmethod
def _getRemoteHash(self, filename):
"""
Return the md5 sum of a file on the device.
"""
@staticmethod
def _getLocalHash(filename):
"""
Return the MD5 sum of a file on the host.
"""
f = open(filename, 'rb')
- if (f == None):
+ if f is None:
return None
try:
mdsum = hashlib.md5()
except:
return None
while 1:
@@ -577,67 +591,70 @@ class DeviceManager(object):
Utility function to return escaped and quoted version of command line.
"""
quotedCmd = []
for arg in cmd:
arg.replace('&', '\&')
needsQuoting = False
- for char in [ ' ', '(', ')', '"', '&' ]:
+ for char in [' ', '(', ')', '"', '&']:
if arg.find(char) >= 0:
needsQuoting = True
break
if needsQuoting:
arg = '\'%s\'' % arg
quotedCmd.append(arg)
return " ".join(quotedCmd)
+
def _pop_last_line(file_obj):
"""
Utility function to get the last line from a file (shared between ADB and
SUT device managers). Function also removes it from the file. Intended to
strip off the return code from a shell command.
"""
bytes_from_end = 1
file_obj.seek(0, 2)
length = file_obj.tell() + 1
while bytes_from_end < length:
- file_obj.seek((-1)*bytes_from_end, 2)
+ file_obj.seek((-1) * bytes_from_end, 2)
data = file_obj.read()
- if bytes_from_end == length-1 and len(data) == 0: # no data, return None
+ if bytes_from_end == length - 1 and len(data) == 0: # no data, return None
return None
- if data[0] == '\n' or bytes_from_end == length-1:
+ if data[0] == '\n' or bytes_from_end == length - 1:
# found the last line, which should have the return value
if data[0] == '\n':
data = data[1:]
# truncate off the return code line
file_obj.truncate(length - bytes_from_end)
- file_obj.seek(0,2)
+ file_obj.seek(0, 2)
file_obj.write('\0')
return data
bytes_from_end += 1
return None
+
class ZeroconfListener(object):
+
def __init__(self, hwid, evt):
self.hwid = hwid
self.evt = evt
# Format is 'SUTAgent [hwid:015d2bc2825ff206] [ip:10_242_29_221]._sutagent._tcp.local.'
def addService(self, zeroconf, type, name):
- #print "Found _sutagent service broadcast:", name
+ # print "Found _sutagent service broadcast:", name
if not name.startswith("SUTAgent"):
return
sutname = name.split('.')[0]
m = re.search('\[hwid:([^\]]*)\]', sutname)
if m is None:
return
--- a/testing/mozbase/mozdevice/mozdevice/devicemanagerADB.py
+++ b/testing/mozbase/mozdevice/mozdevice/devicemanagerADB.py
@@ -126,50 +126,51 @@ class DeviceManagerADB(DeviceManager):
# prepend cwd and env to command if necessary
if cwd:
cmdline = "cd %s; %s" % (cwd, cmdline)
if env:
envstr = '; '.join(map(lambda x: 'export %s=%s' % (x[0], x[1]), env.iteritems()))
cmdline = envstr + "; " + cmdline
# all output should be in stdout
- args=[self._adbPath]
+ args = [self._adbPath]
if self._serverHost is not None:
args.extend(['-H', self._serverHost])
if self._serverPort is not None:
args.extend(['-P', str(self._serverPort)])
if self._deviceSerial:
args.extend(['-s', self._deviceSerial])
args.extend(["shell", cmdline])
def _timeout():
self._logger.error("Timeout exceeded for shell call '%s'" % ' '.join(args))
self._logger.debug("shell - command: %s" % ' '.join(args))
proc = ProcessHandler(args, processOutputLine=self._log, onTimeout=_timeout)
if not timeout:
- # We are asserting that all commands will complete in this time unless otherwise specified
+ # We are asserting that all commands will complete in this time unless
+ # otherwise specified
timeout = self.default_timeout
timeout = int(timeout)
proc.run(timeout)
proc.wait()
output = proc.output
if output:
lastline = output[-1]
if lastline:
m = re.search('([0-9]+)', lastline)
if m:
return_code = m.group(1)
for line in output:
outputfile.write(line + '\n')
outputfile.seek(-2, 2)
- outputfile.truncate() # truncate off the return code
+ outputfile.truncate() # truncate off the return code
return int(return_code)
return None
def forward(self, local, remote):
"""
Forward socket connections.
@@ -191,25 +192,24 @@ class DeviceManagerADB(DeviceManager):
cmd = ['forward']
if local is None:
cmd.extend(['--remove-all'])
else:
cmd.extend(['--remove', local])
if not self._checkCmd(cmd, timeout=self.short_timeout) == 0:
raise DMError("Failed to remove connection forwarding.")
-
def remount(self):
"Remounts the /system partition on the device read-write."
return self._checkCmd(['remount'], timeout=self.short_timeout)
def devices(self):
"Return a list of connected devices as (serial, status) tuples."
proc = self._runCmd(['devices'])
- proc.output.pop(0) # ignore first line of output
+ proc.output.pop(0) # ignore first line of output
devices = []
for line in proc.output:
result = re.match('(.*?)\t(.*)', line)
if result:
devices.append((result.group(1), result.group(2)))
return devices
def _connectRemoteADB(self):
@@ -226,39 +226,40 @@ class DeviceManagerADB(DeviceManager):
retryLimit = retryLimit or self.retryLimit
if self.dirExists(destname):
raise DMError("Attempted to push a file (%s) to a directory (%s)!" %
(localname, destname))
if not os.access(localname, os.F_OK):
raise DMError("File not found: %s" % localname)
proc = self._runCmd(["push", os.path.realpath(localname), destname],
- retryLimit=retryLimit)
+ retryLimit=retryLimit)
if proc.returncode != 0:
- raise DMError("Error pushing file %s -> %s; output: %s" % (localname, destname, proc.output))
+ raise DMError("Error pushing file %s -> %s; output: %s" %
+ (localname, destname, proc.output))
def mkDir(self, name):
result = self._runCmd(["shell", "mkdir", name], timeout=self.short_timeout).output
if len(result) and 'read-only file system' in result[0].lower():
raise DMError("Error creating directory: read only file system")
def pushDir(self, localDir, remoteDir, retryLimit=None, timeout=None):
# adb "push" accepts a directory as an argument, but if the directory
# contains symbolic links, the links are pushed, rather than the linked
# files; we either zip/unzip or re-copy the directory into a temporary
# one to get around this limitation
retryLimit = retryLimit or self.retryLimit
if self._useZip:
self.removeDir(remoteDir)
- self.mkDirs(remoteDir+"/x")
+ self.mkDirs(remoteDir + "/x")
try:
localZip = tempfile.mktemp() + ".zip"
remoteZip = remoteDir + "/adbdmtmp.zip"
proc = ProcessHandler(["zip", "-r", localZip, '.'], cwd=localDir,
- processOutputLine=self._log)
+ processOutputLine=self._log)
proc.run()
proc.wait()
self.pushFile(localZip, remoteZip, retryLimit=retryLimit, createDir=False)
mozfile.remove(localZip)
data = self._runCmd(["shell", "unzip", "-o", remoteZip,
"-d", remoteDir]).output[0]
self._checkCmd(["shell", "rm", remoteZip],
retryLimit=retryLimit, timeout=self.short_timeout)
@@ -268,17 +269,17 @@ class DeviceManagerADB(DeviceManager):
self._logger.warning(traceback.format_exc())
self._logger.warning("zip/unzip failure: falling back to normal push")
self._useZip = False
self.pushDir(localDir, remoteDir, retryLimit=retryLimit, timeout=timeout)
else:
# If the remote directory exists, newer implementations of
# "adb push" will create a sub-directory, while older versions
# will not! Bug 1285040
- self.mkDirs(remoteDir+"/x")
+ self.mkDirs(remoteDir + "/x")
self.removeDir(remoteDir)
tmpDir = tempfile.mkdtemp()
# copytree's target dir must not already exist, so create a subdir
tmpDirTarget = os.path.join(tmpDir, "tmp")
shutil.copytree(localDir, tmpDirTarget)
self._checkCmd(["push", tmpDirTarget, remoteDir],
retryLimit=retryLimit, timeout=timeout)
mozfile.remove(tmpDir)
@@ -360,23 +361,23 @@ class DeviceManagerADB(DeviceManager):
def fireProcess(self, appname, failIfRunning=False):
"""
Starts a process
returns: pid
DEPRECATED: Use shell() or launchApplication() for new code
"""
- #strip out env vars
- parts = appname.split('"');
+ # strip out env vars
+ parts = appname.split('"')
if (len(parts) > 2):
parts = parts[2:]
return self.launchProcess(parts, failIfRunning)
- def launchProcess(self, cmd, outputFile = "process.txt", cwd = '', env = '', failIfRunning=False):
+ def launchProcess(self, cmd, outputFile="process.txt", cwd='', env='', failIfRunning=False):
"""
Launches a process, redirecting output to standard out
WARNING: Does not work how you expect on Android! The application's
own output will be flushed elsewhere.
DEPRECATED: Use shell() or launchApplication() for new code
"""
@@ -398,23 +399,23 @@ class DeviceManagerADB(DeviceManager):
else:
args = cmd[i:].strip()
acmd.append("-n")
acmd.append(cmd[0:i] + "/org.mozilla.gecko.BrowserApp")
if args != "":
acmd.append("--es")
acmd.append("args")
acmd.append(args)
- if env != '' and env != None:
+ if env != '' and env is not None:
envCnt = 0
# env is expected to be a dict of environment variables
for envkey, envval in env.iteritems():
acmd.append("--es")
acmd.append("env" + str(envCnt))
- acmd.append(envkey + "=" + envval);
+ acmd.append(envkey + "=" + envval)
envCnt += 1
if uri != "":
acmd.append("-d")
acmd.append(uri)
acmd = ["shell", ' '.join(map(lambda x: '"' + x + '"', ["am", "start"] + acmd))]
self._logger.info(acmd)
self._checkCmd(acmd)
@@ -499,51 +500,52 @@ class DeviceManagerADB(DeviceManager):
root = os.path.join(basePath, subPath)
try:
self.mkDir(root)
return root
except:
pass
raise DMError("Unable to set up device root using paths: [%s]"
- % ", ".join(["'%s'" % os.path.join(b, s) for b, s in paths]))
+ % ", ".join(["'%s'" % os.path.join(b, s) for b, s in paths]))
def getTempDir(self):
# Cache result to speed up operations depending
# on the temporary directory.
if not self._tempDir:
self._tempDir = "%s/tmp" % self.deviceRoot
self.mkDir(self._tempDir)
return self._tempDir
- def reboot(self, wait = False, **kwargs):
+ def reboot(self, wait=False, **kwargs):
self._checkCmd(["reboot"])
if wait:
self._checkCmd(["wait-for-device"])
if self._runAdbAsRoot:
self._adb_root()
self._checkCmd(["shell", "ls", "/sbin"], timeout=self.short_timeout)
def updateApp(self, appBundlePath, **kwargs):
return self._runCmd(["install", "-r", appBundlePath]).output
def getCurrentTime(self):
timestr = str(self._runCmd(["shell", "date", "+%s"], timeout=self.short_timeout).output[0])
if (not timestr or not timestr.isdigit()):
raise DMError("Unable to get current time using date (got: '%s')" % timestr)
- return int(timestr)*1000
+ return int(timestr) * 1000
def getInfo(self, directive=None):
directive = directive or "all"
ret = {}
if directive == "id" or directive == "all":
ret["id"] = self._runCmd(["get-serialno"], timeout=self.short_timeout).output[0]
if directive == "os" or directive == "all":
- ret["os"] = self.shellCheckOutput(["getprop", "ro.build.display.id"], timeout=self.short_timeout)
+ ret["os"] = self.shellCheckOutput(
+ ["getprop", "ro.build.display.id"], timeout=self.short_timeout)
if directive == "uptime" or directive == "all":
uptime = self.shellCheckOutput(["uptime"], timeout=self.short_timeout)
if not uptime:
raise DMError("error getting uptime")
m = re.match("up time: ((\d+) days, )*(\d{2}):(\d{2}):(\d{2})", uptime)
if m:
uptime = "%d days %d hours %d minutes %d seconds" % tuple(
[int(g or 0) for g in m.groups()[1:]])
@@ -555,17 +557,18 @@ class DeviceManagerADB(DeviceManager):
ret["systime"] = self.shellCheckOutput(["date"], timeout=self.short_timeout)
if directive == "memtotal" or directive == "all":
meminfo = {}
for line in self.pullFile("/proc/meminfo").splitlines():
key, value = line.split(":")
meminfo[key] = value.strip()
ret["memtotal"] = meminfo["MemTotal"]
if directive == "disk" or directive == "all":
- data = self.shellCheckOutput(["df", "/data", "/system", "/sdcard"], timeout=self.short_timeout)
+ data = self.shellCheckOutput(
+ ["df", "/data", "/system", "/sdcard"], timeout=self.short_timeout)
ret["disk"] = data.split('\n')
self._logger.debug("getInfo: %s" % ret)
return ret
def uninstallApp(self, appName, installPath=None):
status = self._runCmd(["uninstall", appName]).output[0].strip()
if status != 'Success':
raise DMError("uninstall failed for %s. Got: %s" % (appName, status))
@@ -595,20 +598,20 @@ class DeviceManagerADB(DeviceManager):
timeout = self.default_timeout
def _timeout():
self._logger.error("Timeout exceeded for _runCmd call '%s'" % ' '.join(finalArgs))
retries = 0
while retries < retryLimit:
proc = ProcessHandler(finalArgs, storeOutput=True,
- processOutputLine=self._log, onTimeout=_timeout)
+ processOutputLine=self._log, onTimeout=_timeout)
proc.run(timeout=timeout)
proc.returncode = proc.wait()
- if proc.returncode == None:
+ if proc.returncode is None:
proc.kill()
retries += 1
else:
return proc
# timeout is specified in seconds, and if no timeout is given,
# we will run until we hit the default_timeout specified in the __init__
def _checkCmd(self, args, timeout=None, retryLimit=None):
@@ -637,17 +640,17 @@ class DeviceManagerADB(DeviceManager):
self._logger.error("Timeout exceeded for _checkCmd call '%s'" % ' '.join(finalArgs))
timeout = int(timeout)
retries = 0
while retries < retryLimit:
proc = ProcessHandler(finalArgs, processOutputLine=self._log, onTimeout=_timeout)
proc.run(timeout=timeout)
ret_code = proc.wait()
- if ret_code == None:
+ if ret_code is None:
proc.kill()
retries += 1
else:
return ret_code
raise DMError("Timeout exceeded for _checkCmd call after %d retries." % retries)
def chmodDir(self, remoteDir, mask="777"):
@@ -656,17 +659,18 @@ class DeviceManagerADB(DeviceManager):
self._logger.debug("chmod %s -- skipped (/sdcard)" % remoteDir)
else:
files = self.listFiles(remoteDir.strip())
for f in files:
remoteEntry = remoteDir.strip() + "/" + f.strip()
if (self.dirExists(remoteEntry)):
self.chmodDir(remoteEntry)
else:
- self._checkCmd(["shell", "chmod", mask, remoteEntry], timeout=self.short_timeout)
+ self._checkCmd(["shell", "chmod", mask, remoteEntry],
+ timeout=self.short_timeout)
self._logger.info("chmod %s" % remoteEntry)
self._checkCmd(["shell", "chmod", mask, remoteDir], timeout=self.short_timeout)
self._logger.debug("chmod %s" % remoteDir)
else:
self._checkCmd(["shell", "chmod", mask, remoteDir.strip()], timeout=self.short_timeout)
self._logger.debug("chmod %s" % remoteDir.strip())
def _verifyADB(self):
@@ -675,28 +679,30 @@ class DeviceManagerADB(DeviceManager):
"""
if self._adbPath != 'adb':
if not os.access(self._adbPath, os.X_OK):
raise DMError("invalid adb path, or adb not executable: %s" % self._adbPath)
try:
self._checkCmd(["version"], timeout=self.short_timeout)
except os.error as err:
- raise DMError("unable to execute ADB (%s): ensure Android SDK is installed and adb is in your $PATH" % err)
+ raise DMError(
+ "unable to execute ADB (%s): ensure Android SDK is installed "
+ "and adb is in your $PATH" % err)
def _verifyDevice(self):
# If there is a device serial number, see if adb is connected to it
if self._deviceSerial:
deviceStatus = None
for line in self._runCmd(["devices"]).output:
m = re.match('(.+)?\s+(.+)$', line)
if m:
if self._deviceSerial == m.group(1):
deviceStatus = m.group(2)
- if deviceStatus == None:
+ if deviceStatus is None:
raise DMError("device not found: %s" % self._deviceSerial)
elif deviceStatus != "device":
raise DMError("bad status for device %s: %s" % (self._deviceSerial, deviceStatus))
# Check to see if we can connect to device and run a simple command
if not self._checkCmd(["shell", "echo"], timeout=self.short_timeout) == 0:
raise DMError("unable to connect to device")
@@ -725,17 +731,17 @@ class DeviceManagerADB(DeviceManager):
# wait for response for maximum of 15 seconds, in case su
# prompts for a password or triggers the Android SuperUser
# prompt
start_time = time.time()
retcode = None
while (time.time() - start_time) <= 15 and retcode is None:
retcode = proc.poll()
- if retcode is None: # still not terminated, kill
+ if retcode is None: # still not terminated, kill
proc.kill()
if proc.output and 'uid=0(root)' in proc.output[0]:
return True
return False
if su_id('0', self.short_timeout):
self._haveSu = True
@@ -784,19 +790,18 @@ class DeviceManagerADB(DeviceManager):
if self.processInfo("adbd")[2] != "root":
raise DMError("We tried rebooting adbd as root, however, it failed.")
def _detectLsModifier(self):
if self._lsModifier is None:
# Check if busybox -1A is required in order to get one
# file per line.
output = self._runCmd(["shell", "ls", "-1A", "/"],
- timeout=self.short_timeout).output
+ timeout=self.short_timeout).output
output = ' '.join(output)
if 'error: device not found' in output:
raise DMError(output)
if "Unknown option '-1'. Aborting." in output:
self._lsModifier = "-a"
elif "No such file or directory" in output:
self._lsModifier = "-a"
else:
self._lsModifier = "-1A"
-
--- a/testing/mozbase/mozdevice/mozdevice/devicemanagerSUT.py
+++ b/testing/mozbase/mozdevice/mozdevice/devicemanagerSUT.py
@@ -12,16 +12,17 @@ import os
import re
import posixpath
import subprocess
import StringIO
from devicemanager import DeviceManager, DMError, _pop_last_line
import errno
from distutils.version import StrictVersion
+
class DeviceManagerSUT(DeviceManager):
"""
Implementation of DeviceManager interface that speaks to a device over
TCP/IP using the "system under test" protocol. A software agent such as
Negatus (http://github.com/mozilla/Negatus) or the Mozilla Android SUTAgent
app must be present and listening for connections for this to work.
"""
@@ -40,17 +41,17 @@ class DeviceManagerSUT(DeviceManager):
deviceRoot=deviceRoot)
self.host = host
self.port = port
self.retryLimit = retryLimit
self._sock = None
self._everConnected = False
# Get version
- verstring = self._runCmds([{ 'cmd': 'ver' }])
+ verstring = self._runCmds([{'cmd': 'ver'}])
ver_re = re.match('(\S+) Version (\S+)', verstring)
self.agentProductName = ver_re.group(1)
self.agentVersion = ver_re.group(2)
def _cmdNeedsResponse(self, cmd):
""" Not all commands need a response from the agent:
* rebt obviously doesn't get a response
* uninstall performs a reboot to ensure starting in a clean state and
@@ -104,17 +105,17 @@ class DeviceManagerSUT(DeviceManager):
re.compile('^rebt.*'),
re.compile('^uninst .*$')]
for c in socketClosingCmds:
if (c.match(cmd)):
return True
return False
- def _sendCmds(self, cmdlist, outputfile, timeout = None, retryLimit = None):
+ def _sendCmds(self, cmdlist, outputfile, timeout=None, retryLimit=None):
"""
Wrapper for _doCmds that loops up to retryLimit iterations
"""
# this allows us to move the retry logic outside of the _doCmds() to make it
# easier for debugging in the future.
# note that since cmdlist is a list of commands, they will all be retried if
# one fails. this is necessary in particular for pushFile(), where we don't want
# to accidentally send extra data if a failure occurs during data transmission.
@@ -133,61 +134,65 @@ class DeviceManagerSUT(DeviceManager):
self._logger.debug(err)
retries += 1
# if we lost the connection or failed to establish one, wait a bit
if retries < retryLimit and not self._sock:
sleep_time = 5 * retries
self._logger.info('Could not connect; sleeping for %d seconds.' % sleep_time)
time.sleep(sleep_time)
- raise DMError("Remote Device Error: unable to connect to %s after %s attempts" % (self.host, retryLimit))
+ raise DMError("Remote Device Error: unable to connect to %s after %s attempts" %
+ (self.host, retryLimit))
- def _runCmds(self, cmdlist, timeout = None, retryLimit = None):
+ def _runCmds(self, cmdlist, timeout=None, retryLimit=None):
"""
Similar to _sendCmds, but just returns any output as a string instead of
writing to a file
"""
retryLimit = retryLimit or self.retryLimit
outputfile = StringIO.StringIO()
self._sendCmds(cmdlist, outputfile, timeout, retryLimit=retryLimit)
outputfile.seek(0)
return outputfile.read()
def _doCmds(self, cmdlist, outputfile, timeout):
promptre = re.compile(self._prompt_regex + '$')
shouldCloseSocket = False
if not timeout:
- # We are asserting that all commands will complete in this time unless otherwise specified
+ # We are asserting that all commands will complete in this time unless
+ # otherwise specified
timeout = self.default_timeout
if not self._sock:
try:
if self._everConnected:
self._logger.info("reconnecting socket")
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as msg:
self._sock = None
- raise DMError("Automation Error: unable to create socket: "+str(msg))
+ raise DMError("Automation Error: unable to create socket: " + str(msg))
try:
self._sock.settimeout(float(timeout))
self._sock.connect((self.host, int(self.port)))
self._everConnected = True
except socket.error as msg:
self._sock = None
- raise DMError("Remote Device Error: Unable to connect socket: "+str(msg))
+ raise DMError("Remote Device Error: Unable to connect socket: " + str(msg))
# consume prompt
try:
self._sock.recv(1024)
except socket.error as msg:
self._sock.close()
self._sock = None
- raise DMError("Remote Device Error: Did not get prompt after connecting: " + str(msg), fatal=True)
+ raise DMError(
+ "Remote Device Error: Did not get prompt after connecting: " + str(msg),
+ fatal=True)
# future recv() timeouts are handled by select() calls
self._sock.settimeout(None)
for cmd in cmdlist:
cmdline = '%s\r\n' % cmd['cmd']
try:
@@ -203,18 +208,18 @@ class DeviceManagerSUT(DeviceManager):
if sent == 0:
raise DMError("Socket connection broken when sending data")
totalsent += sent
self._logger.debug("sent cmd: %s" % cmd['cmd'])
except socket.error as msg:
self._sock.close()
self._sock = None
- self._logger.error("Remote Device Error: Error sending data"\
- " to socket. cmd=%s; err=%s" % (cmd['cmd'], msg))
+ self._logger.error("Remote Device Error: Error sending data"
+ " to socket. cmd=%s; err=%s" % (cmd['cmd'], msg))
return False
# Check if the command should close the socket
shouldCloseSocket = self._shouldCmdCloseSocket(cmd['cmd'])
# Handle responses from commands
if self._cmdNeedsResponse(cmd['cmd']):
foundPrompt = False
@@ -238,28 +243,31 @@ class DeviceManagerSUT(DeviceManager):
timer = 0
if not temp:
socketClosed = True
errStr = 'connection closed'
timer += select_timeout
if timer > timeout:
self._sock.close()
self._sock = None
- raise DMError("Automation Error: Timeout in command %s" % cmd['cmd'], fatal=True)
+ raise DMError("Automation Error: Timeout in command %s" %
+ cmd['cmd'], fatal=True)
except socket.error as err:
socketClosed = True
errStr = str(err)
# This error shows up with we have our tegra rebooted.
if err[0] == errno.ECONNRESET:
errStr += ' - possible reboot'
if socketClosed:
self._sock.close()
self._sock = None
- raise DMError("Automation Error: Error receiving data from socket. cmd=%s; err=%s" % (cmd, errStr))
+ raise DMError(
+ "Automation Error: Error receiving data from socket. "
+ "cmd=%s; err=%s" % (cmd, errStr))
data += temp
# If something goes wrong in the agent it will send back a string that
# starts with '##AGENT-WARNING##'
if not commandFailed:
errorMatch = self._agentErrorRE.match(data)
if errorMatch:
@@ -271,18 +279,18 @@ class DeviceManagerSUT(DeviceManager):
if promptre.match(line):
foundPrompt = True
data = self._stripPrompt(data)
break
# periodically flush data to output file to make sure it doesn't get
# too big/unwieldly
if len(data) > 1024:
- outputfile.write(data[0:1024])
- data = data[1024:]
+ outputfile.write(data[0:1024])
+ data = data[1024:]
if commandFailed:
raise DMError("Automation Error: Error processing command '%s'; err='%s'" %
(cmd['cmd'], errorMatch.group(1)), fatal=True)
# Write any remaining data to outputfile
outputfile.write(data)
@@ -292,17 +300,17 @@ class DeviceManagerSUT(DeviceManager):
self._sock = None
except:
self._sock = None
raise DMError("Automation Error: Error closing socket")
def _setupDeviceRoot(self, deviceRoot):
if not deviceRoot:
deviceRoot = "%s/tests" % self._runCmds(
- [{ 'cmd': 'testroot' }]).strip()
+ [{'cmd': 'testroot'}]).strip()
self.mkDir(deviceRoot)
return deviceRoot
def shell(self, cmd, outputfile, env=None, cwd=None, timeout=None, root=False):
cmdline = self._escapedCommandLine(cmd)
if env:
cmdline = '%s %s' % (self._formatEnvString(env), cmdline)
@@ -322,62 +330,63 @@ class DeviceManagerSUT(DeviceManager):
cmd = "exec"
if cwd:
cmd += "cwd"
if root and haveExecSu:
cmd += "su"
if cwd:
- self._sendCmds([{ 'cmd': '%s %s %s' % (cmd, cwd, cmdline) }], outputfile, timeout)
+ self._sendCmds([{'cmd': '%s %s %s' % (cmd, cwd, cmdline)}], outputfile, timeout)
else:
if (not root) or haveExecSu:
- self._sendCmds([{ 'cmd': '%s %s' % (cmd, cmdline) }], outputfile, timeout)
+ self._sendCmds([{'cmd': '%s %s' % (cmd, cmdline)}], outputfile, timeout)
else:
# need to manually inject su -c for backwards compatibility (this may
# not work on ICS or above!!)
# (FIXME: this backwards compatibility code is really ugly and should
# be deprecated at some point in the future)
- self._sendCmds([ { 'cmd': '%s su -c "%s"' % (cmd, cmdline) }], outputfile,
+ self._sendCmds([{'cmd': '%s su -c "%s"' % (cmd, cmdline)}], outputfile,
timeout)
# dig through the output to get the return code
lastline = _pop_last_line(outputfile)
if lastline:
m = re.search('return code \[([0-9]+)\]', lastline)
if m:
return int(m.group(1))
# woops, we couldn't find an end of line/return value
- raise DMError("Automation Error: Error finding end of line/return value when running '%s'" % cmdline)
+ raise DMError(
+ "Automation Error: Error finding end of line/return value when running '%s'" % cmdline)
def pushFile(self, localname, destname, retryLimit=None, createDir=True):
retryLimit = retryLimit or self.retryLimit
if createDir:
self.mkDirs(destname)
try:
filesize = os.path.getsize(localname)
with open(localname, 'rb') as f:
- remoteHash = self._runCmds([{ 'cmd': 'push ' + destname + ' ' + str(filesize),
- 'data': f.read() }], retryLimit=retryLimit).strip()
+ remoteHash = self._runCmds([{'cmd': 'push ' + destname + ' ' + str(filesize),
+ 'data': f.read()}], retryLimit=retryLimit).strip()
except OSError:
raise DMError("DeviceManager: Error reading file to push")
self._logger.debug("push returned: %s" % remoteHash)
localHash = self._getLocalHash(localname)
if localHash != remoteHash:
raise DMError("Automation Error: Push File failed to Validate! (localhash: %s, "
"remotehash: %s)" % (localHash, remoteHash))
def mkDir(self, name):
if not self.dirExists(name):
- self._runCmds([{ 'cmd': 'mkdr ' + name }])
+ self._runCmds([{'cmd': 'mkdr ' + name}])
def pushDir(self, localDir, remoteDir, retryLimit=None, timeout=None):
retryLimit = retryLimit or self.retryLimit
self._logger.info("pushing directory: %s to %s" % (localDir, remoteDir))
existentDirectories = []
for root, dirs, files in os.walk(localDir, followlinks=True):
_, subpath = root.split(localDir)
@@ -389,20 +398,21 @@ class DeviceManagerSUT(DeviceManager):
if subpath == "":
remoteRoot = remoteDir
parent = os.path.dirname(remoteName)
if parent not in existentDirectories:
self.mkDirs(remoteName)
existentDirectories.append(parent)
- self.pushFile(os.path.join(root, f), remoteName, retryLimit=retryLimit, createDir=False)
+ self.pushFile(os.path.join(root, f), remoteName,
+ retryLimit=retryLimit, createDir=False)
def dirExists(self, remotePath):
- ret = self._runCmds([{ 'cmd': 'isdir ' + remotePath }]).strip()
+ ret = self._runCmds([{'cmd': 'isdir ' + remotePath}]).strip()
if not ret:
raise DMError('Automation Error: DeviceManager isdir returned null')
return ret == 'TRUE'
def fileExists(self, filepath):
# Because we always have / style paths we make this a lot easier with some
# assumptions
@@ -413,42 +423,42 @@ class DeviceManagerSUT(DeviceManager):
return self.dirExists(filepath)
(containingpath, filename) = posixpath.split(filepath)
return filename in self.listFiles(containingpath)
def listFiles(self, rootdir):
rootdir = posixpath.normpath(rootdir)
if not self.dirExists(rootdir):
return []
- data = self._runCmds([{ 'cmd': 'cd ' + rootdir }, { 'cmd': 'ls' }])
+ data = self._runCmds([{'cmd': 'cd ' + rootdir}, {'cmd': 'ls'}])
files = filter(lambda x: x, data.splitlines())
if len(files) == 1 and files[0] == '<empty>':
# special case on the agent: empty directories return just the
# string "<empty>"
return []
return files
def removeFile(self, filename):
self._logger.info("removing file: " + filename)
if self.fileExists(filename):
- self._runCmds([{ 'cmd': 'rm ' + filename }])
+ self._runCmds([{'cmd': 'rm ' + filename}])
def removeDir(self, remoteDir):
if self.dirExists(remoteDir):
- self._runCmds([{ 'cmd': 'rmdr ' + remoteDir }])
+ self._runCmds([{'cmd': 'rmdr ' + remoteDir}])
def moveTree(self, source, destination):
- self._runCmds([{ 'cmd': 'mv %s %s' % (source, destination) }])
+ self._runCmds([{'cmd': 'mv %s %s' % (source, destination)}])
def copyTree(self, source, destination):
- self._runCmds([{ 'cmd': 'dd if=%s of=%s' % (source, destination) }])
+ self._runCmds([{'cmd': 'dd if=%s of=%s' % (source, destination)}])
def getProcessList(self):
- data = self._runCmds([{ 'cmd': 'ps' }])
+ data = self._runCmds([{'cmd': 'ps'}])
processTuples = []
for line in data.splitlines():
if line:
pidproc = line.strip().split()
try:
if (len(pidproc) == 2):
processTuples += [[pidproc[0], pidproc[1]]]
@@ -473,22 +483,22 @@ class DeviceManagerSUT(DeviceManager):
DEPRECATED: Use shell() or launchApplication() for new code
"""
if not appname:
raise DMError("Automation Error: fireProcess called with no command to run")
self._logger.info("FIRE PROC: '%s'" % appname)
- if (self.processExist(appname) != None):
+ if (self.processExist(appname) is None):
self._logger.warning("process %s appears to be running already\n" % appname)
if (failIfRunning):
raise DMError("Automation Error: Process is already running")
- self._runCmds([{ 'cmd': 'exec ' + appname }])
+ self._runCmds([{'cmd': 'exec ' + appname}])
# The 'exec' command may wait for the process to start and end, so checking
# for the process here may result in process = None.
# The normal case is to launch the process and return right away
# There is one case with robotium (am instrument) where exec returns at the end
pid = None
waited = 0
while pid is None and waited < maxWaitTime:
@@ -496,17 +506,17 @@ class DeviceManagerSUT(DeviceManager):
if pid:
break
time.sleep(1)
waited += 1
self._logger.debug("got pid: %s for process: %s" % (pid, appname))
return pid
- def launchProcess(self, cmd, outputFile = "process.txt", cwd = '', env = '', failIfRunning=False):
+ def launchProcess(self, cmd, outputFile="process.txt", cwd='', env='', failIfRunning=False):
"""
Launches a process, redirecting output to standard out
Returns output filename
WARNING: Does not work how you expect on Android! The application's
own output will be flushed elsewhere.
@@ -540,42 +550,42 @@ class DeviceManagerSUT(DeviceManager):
return outputFile
def killProcess(self, appname, sig=None):
if sig:
pid = self.processExist(appname)
if pid and pid > 0:
try:
self.shellCheckOutput(['kill', '-%d' % sig, str(pid)],
- root=True)
+ root=True)
except DMError as err:
self._logger.warning("unable to kill -%d %s (pid %s)" %
- (sig, appname, str(pid)))
+ (sig, appname, str(pid)))
self._logger.debug(err)
raise err
else:
self._logger.warning("unable to kill -%d %s -- not running?" %
- (sig, appname))
+ (sig, appname))
else:
retries = 0
while retries < self.retryLimit:
try:
if self.processExist(appname):
- self._runCmds([{ 'cmd': 'kill ' + appname }])
+ self._runCmds([{'cmd': 'kill ' + appname}])
return
except DMError as err:
retries += 1
self._logger.warning("try %d of %d failed to kill %s" %
- (retries, self.retryLimit, appname))
+ (retries, self.retryLimit, appname))
self._logger.debug(err)
if retries >= self.retryLimit:
raise err
def getTempDir(self):
- return self._runCmds([{ 'cmd': 'tmpd' }]).strip()
+ return self._runCmds([{'cmd': 'tmpd'}]).strip()
def pullFile(self, remoteFile, offset=None, length=None):
# The "pull" command is different from other commands in that DeviceManager
# has to read a certain number of bytes instead of just reading to the
# next prompt. This is more robust than the "cat" command, which will be
# confused if the prompt string exists within the file being catted.
# However it means we can't use the response-handling logic in sendCMD().
@@ -600,17 +610,17 @@ class DeviceManagerSUT(DeviceManager):
err(error_msg)
return data
except:
err(error_msg)
def read_until_char(c, buf, error_msg):
""" read until 'c' is found; buffer rest """
- while not c in buf:
+ while c not in buf:
data = uread(1024, error_msg)
buf += data
return buf.partition(c)
def read_exact(total_to_recv, buf, error_msg):
""" read exact number of 'total_to_recv' bytes """
while len(buf) < total_to_recv:
to_recv = min(total_to_recv - len(buf), 1024)
@@ -626,20 +636,20 @@ class DeviceManagerSUT(DeviceManager):
# or, if error,
# <filename>,-1\n<error message>
# just send the command first, we read the response inline below
if offset is not None and length is not None:
cmd = 'pull %s %d %d' % (remoteFile, offset, length)
elif offset is not None:
cmd = 'pull %s %d' % (remoteFile, offset)
- else:
+ else:
cmd = 'pull %s' % remoteFile
- self._runCmds([{ 'cmd': cmd }])
+ self._runCmds([{'cmd': cmd}])
# read metadata; buffer the rest
metadata, sep, buf = read_until_char('\n', buf, 'could not find metadata')
if not metadata:
return None
self._logger.debug('metadata: %s' % metadata)
filename, sep, filesizestr = metadata.partition(',')
@@ -653,17 +663,18 @@ class DeviceManagerSUT(DeviceManager):
if filesize == -1:
# read error message
error_str, sep, buf = read_until_char('\n', buf, 'could not find error message')
if not error_str:
err("blank error message")
# prompt should follow
read_exact(len(prompt), buf, 'could not find prompt')
# failures are expected, so don't use "Remote Device Error" or we'll RETRY
- raise DMError("DeviceManager: pulling file '%s' unsuccessful: %s" % (remoteFile, error_str))
+ raise DMError("DeviceManager: pulling file '%s' unsuccessful: %s" %
+ (remoteFile, error_str))
# read file data
total_to_recv = filesize + len(prompt)
buf = read_exact(total_to_recv, buf, 'could not get all file data')
if buf[-len(prompt):] != prompt:
err('no prompt found after file data--DeviceManager may be out of sync with agent')
return buf
return buf[:-len(prompt)]
@@ -698,43 +709,43 @@ class DeviceManagerSUT(DeviceManager):
self.getDirectory(remotePath, localPath, False)
else:
self.getFile(remotePath, localPath)
def validateFile(self, remoteFile, localFile):
remoteHash = self._getRemoteHash(remoteFile)
localHash = self._getLocalHash(localFile)
- if (remoteHash == None):
+ if (remoteHash is None):
return False
if (remoteHash == localHash):
return True
return False
def _getRemoteHash(self, filename):
- data = self._runCmds([{ 'cmd': 'hash ' + filename }]).strip()
+ data = self._runCmds([{'cmd': 'hash ' + filename}]).strip()
self._logger.debug("remote hash returned: '%s'" % data)
return data
def unpackFile(self, filePath, destDir=None):
"""
Unzips a bundle to a location on the device
If destDir is not specified, the bundle is extracted in the same directory
"""
# if no destDir is passed in just set it to filePath's folder
if not destDir:
destDir = posixpath.dirname(filePath)
if destDir[-1] != '/':
destDir += '/'
- self._runCmds([{ 'cmd': 'unzp %s %s' % (filePath, destDir)}])
+ self._runCmds([{'cmd': 'unzp %s %s' % (filePath, destDir)}])
def _getRebootServerSocket(self, ipAddr):
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.settimeout(60.0)
serverSocket.bind((ipAddr, 0))
serverSocket.listen(1)
self._logger.debug('Created reboot callback server at %s:%d' %
@@ -765,17 +776,16 @@ class DeviceManagerSUT(DeviceManager):
if not data:
raise DMError('Timed out waiting for reboot callback.')
self._logger.info("Sleeping for %s seconds to wait for device "
"to 'settle'" % self.reboot_settling_time)
time.sleep(self.reboot_settling_time)
-
def reboot(self, ipAddr=None, port=30000, wait=False):
# port ^^^ is here for backwards compatibility only, we now
# determine a port automatically and safely
wait = (wait or ipAddr)
cmd = 'rebt'
self._logger.info("Rebooting device")
@@ -804,24 +814,24 @@ class DeviceManagerSUT(DeviceManager):
if wait:
self._waitForRebootPing(serverSocket)
def getInfo(self, directive=None):
data = None
result = {}
collapseSpaces = re.compile(' +')
- directives = ['os','id','uptime','uptimemillis','systime','screen',
- 'rotation','memory','process','disk','power','sutuserinfo',
+ directives = ['os', 'id', 'uptime', 'uptimemillis', 'systime', 'screen',
+ 'rotation', 'memory', 'process', 'disk', 'power', 'sutuserinfo',
'temperature']
if (directive in directives):
directives = [directive]
for d in directives:
- data = self._runCmds([{ 'cmd': 'info ' + d }])
+ data = self._runCmds([{'cmd': 'info ' + d}])
data = collapseSpaces.sub(' ', data)
result[d] = data.split('\n')
# Get rid of any 0 length members of the arrays
for k, v in result.iteritems():
result[k] = filter(lambda x: x != '', result[k])
@@ -836,38 +846,38 @@ class DeviceManagerSUT(DeviceManager):
self._logger.debug("results: %s" % result)
return result
def installApp(self, appBundlePath, destPath=None):
cmd = 'inst ' + appBundlePath
if destPath:
cmd += ' ' + destPath
- data = self._runCmds([{ 'cmd': cmd }])
+ data = self._runCmds([{'cmd': cmd}])
if 'installation complete [0]' not in data:
raise DMError("Remove Device Error: Error installing app. Error message: %s" % data)
def uninstallApp(self, appName, installPath=None):
cmd = 'uninstall ' + appName
if installPath:
cmd += ' ' + installPath
- data = self._runCmds([{ 'cmd': cmd }])
+ data = self._runCmds([{'cmd': cmd}])
status = data.split('\n')[0].strip()
self._logger.debug("uninstallApp: '%s'" % status)
if status == 'Success':
return
raise DMError("Remote Device Error: uninstall failed for %s" % appName)
def uninstallAppAndReboot(self, appName, installPath=None):
cmd = 'uninst ' + appName
if installPath:
cmd += ' ' + installPath
- data = self._runCmds([{ 'cmd': cmd }])
+ data = self._runCmds([{'cmd': cmd}])
self._logger.debug("uninstallAppAndReboot: %s" % data)
return
def updateApp(self, appBundlePath, processName=None, destPath=None,
ipAddr=None, port=30000, wait=False):
# port ^^^ is here for backwards compatibility only, we now
# determine a port automatically and safely
@@ -892,69 +902,74 @@ class DeviceManagerSUT(DeviceManager):
self._logger.debug("updateApp using command: " % cmd)
self._runCmds([{'cmd': cmd}])
if wait:
self._waitForRebootPing(serverSocket)
def getCurrentTime(self):
- return int(self._runCmds([{ 'cmd': 'clok' }]).strip())
+ return int(self._runCmds([{'cmd': 'clok'}]).strip())
def _formatEnvString(self, env):
"""
Returns a properly formatted env string for the agent.
Input - env, which is either None, '', or a dict
Output - a quoted string of the form: '"envvar1=val1,envvar2=val2..."'
If env is None or '' return '' (empty quoted string)
"""
- if (env == None or env == ''):
+ if (env is None or env == ''):
return ''
retVal = '"%s"' % ','.join(map(lambda x: '%s=%s' % (x[0], x[1]), env.iteritems()))
if (retVal == '""'):
return ''
return retVal
def adjustResolution(self, width=1680, height=1050, type='hdmi'):
"""
Adjust the screen resolution on the device, REBOOT REQUIRED
NOTE: this only works on a tegra ATM
- supported resolutions: 640x480, 800x600, 1024x768, 1152x864, 1200x1024, 1440x900, 1680x1050, 1920x1080
+ supported resolutions: 640x480, 800x600, 1024x768, 1152x864, 1200x1024, 1440x900,
+ 1680x1050, 1920x1080
"""
if self.getInfo('os')['os'][0].split()[0] != 'harmony-eng':
self._logger.warning("unable to adjust screen resolution on non Tegra device")
return False
results = self.getInfo('screen')
parts = results['screen'][0].split(':')
- self._logger.debug("we have a current resolution of %s, %s" % (parts[1].split()[0], parts[2].split()[0]))
+ self._logger.debug("we have a current resolution of %s, %s" %
+ (parts[1].split()[0], parts[2].split()[0]))
- #verify screen type is valid, and set it to the proper value (https://bugzilla.mozilla.org/show_bug.cgi?id=632895#c4)
+ # verify screen type is valid, and set it to the proper value
+ # (https://bugzilla.mozilla.org/show_bug.cgi?id=632895#c4)
screentype = -1
if (type == 'hdmi'):
screentype = 5
elif (type == 'vga' or type == 'crt'):
screentype = 3
else:
return False
- #verify we have numbers
+ # verify we have numbers
if not (isinstance(width, int) and isinstance(height, int)):
return False
if (width < 100 or width > 9999):
return False
if (height < 100 or height > 9999):
return False
self._logger.debug("adjusting screen resolution to %s, %s and rebooting" % (width, height))
- self._runCmds([{ 'cmd': "exec setprop persist.tegra.dpy%s.mode.width %s" % (screentype, width) }])
- self._runCmds([{ 'cmd': "exec setprop persist.tegra.dpy%s.mode.height %s" % (screentype, height) }])
+ self._runCmds(
+ [{'cmd': "exec setprop persist.tegra.dpy%s.mode.width %s" % (screentype, width)}])
+ self._runCmds(
+ [{'cmd': "exec setprop persist.tegra.dpy%s.mode.height %s" % (screentype, height)}])
def chmodDir(self, remoteDir, **kwargs):
- self._runCmds([{ 'cmd': "chmod "+remoteDir }])
+ self._runCmds([{'cmd': "chmod " + remoteDir}])
--- a/testing/mozbase/mozdevice/mozdevice/dmcli.py
+++ b/testing/mozbase/mozdevice/mozdevice/dmcli.py
@@ -11,131 +11,138 @@ import logging
import os
import posixpath
import StringIO
import sys
import mozdevice
import mozlog
import argparse
+
class DMCli(object):
def __init__(self):
- self.commands = { 'deviceroot': { 'function': self.deviceroot,
- 'help': 'get device root directory for storing temporary files' },
- 'install': { 'function': self.install,
- 'args': [ { 'name': 'file' } ],
- 'help': 'push this package file to the device and install it' },
- 'uninstall': { 'function': self.uninstall,
- 'args': [ { 'name': 'packagename' } ],
- 'help': 'uninstall the named app from the device' },
- 'killapp': { 'function': self.kill,
- 'args': [ { 'name': 'process_name', 'nargs': '*' } ],
- 'help': 'kills any processes with name(s) on device' },
- 'launchapp': { 'function': self.launchapp,
- 'args': [ { 'name': 'appname' },
- { 'name': 'activity_name' },
- { 'name': '--intent',
- 'action': 'store',
- 'default': 'android.intent.action.VIEW' },
- { 'name': '--url',
- 'action': 'store' },
- { 'name': '--no-fail-if-running',
- 'action': 'store_true',
- 'help': 'Don\'t fail if application is already running' }
+ self.commands = {'deviceroot': {'function': self.deviceroot,
+ 'help': 'get device root directory for storing temporary '
+ 'files'},
+ 'install': {'function': self.install,
+ 'args': [{'name': 'file'}],
+ 'help': 'push this package file to the device'
+ ' and install it'},
+ 'uninstall': {'function': self.uninstall,
+ 'args': [{'name': 'packagename'}],
+ 'help': 'uninstall the named app from the device'},
+ 'killapp': {'function': self.kill,
+ 'args': [{'name': 'process_name', 'nargs': '*'}],
+ 'help': 'kills any processes with name(s) on device'},
+ 'launchapp': {'function': self.launchapp,
+ 'args': [{'name': 'appname'},
+ {'name': 'activity_name'},
+ {'name': '--intent',
+ 'action': 'store',
+ 'default': 'android.intent.action.VIEW'},
+ {'name': '--url',
+ 'action': 'store'},
+ {'name': '--no-fail-if-running',
+ 'action': 'store_true',
+ 'help': 'Don\'t fail if application is'
+ ' already running'}
],
- 'help': 'launches application on device' },
- 'listapps': { 'function': self.listapps,
- 'help': 'list applications on device' },
- 'push': { 'function': self.push,
- 'args': [ { 'name': 'local_file' },
- { 'name': 'remote_file' }
- ],
- 'help': 'copy file/dir to device' },
- 'pull': { 'function': self.pull,
- 'args': [ { 'name': 'local_file' },
- { 'name': 'remote_file', 'nargs': '?' } ],
- 'help': 'copy file/dir from device' },
- 'shell': { 'function': self.shell,
- 'args': [ { 'name': 'command', 'nargs': argparse.REMAINDER },
- { 'name': '--root', 'action': 'store_true',
- 'help': 'Run command as root' }],
- 'help': 'run shell command on device' },
- 'info': { 'function': self.getinfo,
- 'args': [ { 'name': 'directive', 'nargs': '?' } ],
- 'help': 'get information on specified '
- 'aspect of the device (if no argument '
- 'given, print all available information)'
+ 'help': 'launches application on device'},
+ 'listapps': {'function': self.listapps,
+ 'help': 'list applications on device'},
+ 'push': {'function': self.push,
+ 'args': [{'name': 'local_file'},
+ {'name': 'remote_file'}
+ ],
+ 'help': 'copy file/dir to device'},
+ 'pull': {'function': self.pull,
+ 'args': [{'name': 'local_file'},
+ {'name': 'remote_file', 'nargs': '?'}],
+ 'help': 'copy file/dir from device'},
+ 'shell': {'function': self.shell,
+ 'args': [{'name': 'command', 'nargs': argparse.REMAINDER},
+ {'name': '--root', 'action': 'store_true',
+ 'help': 'Run command as root'}],
+ 'help': 'run shell command on device'},
+ 'info': {'function': self.getinfo,
+ 'args': [{'name': 'directive', 'nargs': '?'}],
+ 'help': 'get information on specified '
+ 'aspect of the device (if no argument '
+ 'given, print all available information)'
+ },
+ 'ps': {'function': self.processlist,
+ 'help': 'get information on running processes on device'
+ },
+ 'logcat': {'function': self.logcat,
+ 'help': 'get logcat from device'
},
- 'ps': { 'function': self.processlist,
- 'help': 'get information on running processes on device'
- },
- 'logcat' : { 'function': self.logcat,
- 'help': 'get logcat from device'
- },
- 'ls': { 'function': self.listfiles,
- 'args': [ { 'name': 'remote_dir' } ],
- 'help': 'list files on device'
+ 'ls': {'function': self.listfiles,
+ 'args': [{'name': 'remote_dir'}],
+ 'help': 'list files on device'
},
- 'rm': { 'function': self.removefile,
- 'args': [ { 'name': 'remote_file' } ],
- 'help': 'remove file from device'
- },
- 'isdir': { 'function': self.isdir,
- 'args': [ { 'name': 'remote_dir' } ],
- 'help': 'print if remote file is a directory'
- },
- 'mkdir': { 'function': self.mkdir,
- 'args': [ { 'name': 'remote_dir' } ],
- 'help': 'makes a directory on device'
+ 'rm': {'function': self.removefile,
+ 'args': [{'name': 'remote_file'}],
+ 'help': 'remove file from device'
},
- 'rmdir': { 'function': self.rmdir,
- 'args': [ { 'name': 'remote_dir' } ],
- 'help': 'recursively remove directory from device'
- },
- 'screencap': { 'function': self.screencap,
- 'args': [ { 'name': 'png_file' } ],
- 'help': 'capture screenshot of device in action'
- },
- 'sutver': { 'function': self.sutver,
- 'help': 'SUTAgent\'s product name and version (SUT only)'
+ 'isdir': {'function': self.isdir,
+ 'args': [{'name': 'remote_dir'}],
+ 'help': 'print if remote file is a directory'
+ },
+ 'mkdir': {'function': self.mkdir,
+ 'args': [{'name': 'remote_dir'}],
+ 'help': 'makes a directory on device'
},
- 'clearlogcat': { 'function': self.clearlogcat,
- 'help': 'clear the logcat'
+ 'rmdir': {'function': self.rmdir,
+ 'args': [{'name': 'remote_dir'}],
+ 'help': 'recursively remove directory from device'
+ },
+ 'screencap': {'function': self.screencap,
+ 'args': [{'name': 'png_file'}],
+ 'help': 'capture screenshot of device in action'
+ },
+ 'sutver': {'function': self.sutver,
+ 'help': 'SUTAgent\'s product name and version (SUT only)'
+ },
+ 'clearlogcat': {'function': self.clearlogcat,
+ 'help': 'clear the logcat'
},
- 'reboot': { 'function': self.reboot,
- 'help': 'reboot the device',
- 'args': [ { 'name': '--wait',
- 'action': 'store_true',
- 'help': 'Wait for device to come back up before exiting' } ]
+ 'reboot': {'function': self.reboot,
+ 'help': 'reboot the device',
+ 'args': [{'name': '--wait',
+ 'action': 'store_true',
+ 'help': 'Wait for device to come back up'
+ ' before exiting'}]
- },
- 'isfile': { 'function': self.isfile,
- 'args': [ { 'name': 'remote_file' } ],
- 'help': 'check whether a file exists on the device'
- },
- 'launchfennec': { 'function': self.launchfennec,
- 'args': [ { 'name': 'appname' },
- { 'name': '--intent', 'action': 'store',
- 'default': 'android.intent.action.VIEW' },
- { 'name': '--url', 'action': 'store' },
- { 'name': '--extra-args', 'action': 'store' },
- { 'name': '--mozenv', 'action': 'store',
- 'help': 'Gecko environment variables to set in "KEY1=VAL1 KEY2=VAL2" format' },
- { 'name': '--no-fail-if-running',
- 'action': 'store_true',
- 'help': 'Don\'t fail if application is already running' }
- ],
- 'help': 'launch fennec'
- },
- 'getip': { 'function': self.getip,
- 'args': [ { 'name': 'interface', 'nargs': '*' } ],
- 'help': 'get the ip address of the device'
+ },
+ 'isfile': {'function': self.isfile,
+ 'args': [{'name': 'remote_file'}],
+ 'help': 'check whether a file exists on the device'
+ },
+ 'launchfennec': {'function': self.launchfennec,
+ 'args': [{'name': 'appname'},
+ {'name': '--intent', 'action': 'store',
+ 'default': 'android.intent.action.VIEW'},
+ {'name': '--url', 'action': 'store'},
+ {'name': '--extra-args', 'action': 'store'},
+ {'name': '--mozenv', 'action': 'store',
+ 'help': 'Gecko environment variables to set'
+ ' in "KEY1=VAL1 KEY2=VAL2" format'},
+ {'name': '--no-fail-if-running',
+ 'action': 'store_true',
+ 'help': 'Don\'t fail if application is '
+ 'already running'}
+ ],
+ 'help': 'launch fennec'
+ },
+ 'getip': {'function': self.getip,
+ 'args': [{'name': 'interface', 'nargs': '*'}],
+ 'help': 'get the ip address of the device'
}
- }
+ }
self.parser = argparse.ArgumentParser()
self.add_options(self.parser)
self.add_commands(self.parser)
mozlog.commandline.add_logging_group(self.parser)
def run(self, args=sys.argv[1:]):
args = self.parser.parse_args()
@@ -157,28 +164,28 @@ class DMCli(object):
sys.exit(ret)
def add_options(self, parser):
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbose output from DeviceManager",
default=bool(os.environ.get('VERBOSE')))
parser.add_argument("--host", action="store",
- help="Device hostname (only if using TCP/IP, " \
- "defaults to TEST_DEVICE environment " \
- "variable if present)",
+ help="Device hostname (only if using TCP/IP, "
+ "defaults to TEST_DEVICE environment "
+ "variable if present)",
default=os.environ.get('TEST_DEVICE'))
parser.add_argument("-p", "--port", action="store",
type=int,
help="Custom device port (if using SUTAgent or "
"adb-over-tcp)", default=None)
parser.add_argument("-m", "--dmtype", action="store",
- help="DeviceManager type (adb or sut, defaults " \
- "to DM_TRANS environment variable, if " \
- "present, or adb)",
+ help="DeviceManager type (adb or sut, defaults "
+ "to DM_TRANS environment variable, if "
+ "present, or adb)",
default=os.environ.get('DM_TRANS', 'adb'))
parser.add_argument("-d", "--hwid", action="store",
help="HWID", default=None)
parser.add_argument("--package-name", action="store",
help="Packagename (if using DeviceManagerADB)",
default=None)
def add_commands(self, parser):
@@ -360,15 +367,16 @@ class DMCli(object):
failIfRunning=(not args.no_fail_if_running))
def getip(self, args):
if args.interface:
print(self.dm.getIP(args.interface))
else:
print(self.dm.getIP())
+
def cli(args=sys.argv[1:]):
# process the command line
cli = DMCli()
cli.run(args)
if __name__ == '__main__':
cli()
--- a/testing/mozbase/mozdevice/mozdevice/droid.py
+++ b/testing/mozbase/mozdevice/mozdevice/droid.py
@@ -11,16 +11,17 @@ import time
import version_codes
from Zeroconf import Zeroconf, ServiceBrowser
from devicemanager import ZeroconfListener
from devicemanagerADB import DeviceManagerADB
from devicemanagerSUT import DeviceManagerSUT
from devicemanager import DMError
+
class DroidMixin(object):
"""Mixin to extend DeviceManager with Android-specific functionality"""
_stopApplicationNeedsRoot = True
def _getExtraAmStartArgs(self):
return []
@@ -41,17 +42,17 @@ class DroidMixin(object):
# If failIfRunning is True, we throw an exception here. Only one
# instance of an application can be running at once on Android,
# starting a new instance may not be what we want depending on what
# we want to do
if failIfRunning and self.processExist(appName):
raise DMError("Only one instance of an application may be running "
"at once")
- acmd = [ "am", "start" ] + self._getExtraAmStartArgs() + \
+ acmd = ["am", "start"] + self._getExtraAmStartArgs() + \
["-W" if wait else '', "-n", "%s/%s" % (appName, activityName)]
if intent:
acmd.extend(["-a", intent])
if extras:
for (key, val) in extras.iteritems():
if type(val) is int:
@@ -98,17 +99,18 @@ class DroidMixin(object):
for (envCnt, (envkey, envval)) in enumerate(mozEnv.iteritems()):
extras["env" + str(envCnt)] = envkey + "=" + envval
# Additional command line arguments that fennec will read and use (e.g.
# with a custom profile)
if extraArgs:
extras['args'] = " ".join(extraArgs)
- self.launchApplication(appName, "org.mozilla.gecko.BrowserApp", intent, url=url, extras=extras,
+ self.launchApplication(appName, "org.mozilla.gecko.BrowserApp", intent, url=url,
+ extras=extras,
wait=wait, failIfRunning=failIfRunning)
def getInstalledApps(self):
"""
Lists applications installed on this Android device
Returns a list of application names in the form [ 'org.mozilla.fennec', ... ]
"""
@@ -129,51 +131,54 @@ class DroidMixin(object):
we simply try to manually kill the processes started by the app
repeatedly until none is around any more. This is less reliable and
does require root.
:param appName: Name of application (e.g. `com.android.chrome`)
"""
version = self.shellCheckOutput(["getprop", "ro.build.version.sdk"])
if int(version) >= version_codes.HONEYCOMB:
- self.shellCheckOutput([ "am", "force-stop", appName ], root=self._stopApplicationNeedsRoot)
+ self.shellCheckOutput(["am", "force-stop", appName],
+ root=self._stopApplicationNeedsRoot)
else:
num_tries = 0
max_tries = 5
while self.processExist(appName):
if num_tries > max_tries:
raise DMError("Couldn't successfully kill %s after %s "
"tries" % (appName, max_tries))
self.killProcess(appName)
num_tries += 1
# sleep for a short duration to make sure there are no
# additional processes in the process of being launched
# (this is not 100% guaranteed to work since it is inherently
# racey, but it's the best we can do)
time.sleep(1)
+
class DroidADB(DeviceManagerADB, DroidMixin):
_stopApplicationNeedsRoot = False
def getTopActivity(self):
package = None
data = None
try:
- data = self.shellCheckOutput(["dumpsys", "window", "windows"], timeout=self.short_timeout)
+ data = self.shellCheckOutput(
+ ["dumpsys", "window", "windows"], timeout=self.short_timeout)
except:
# dumpsys seems to intermittently fail (seen on 4.3 emulator), producing
# no output.
return ""
# "dumpsys window windows" produces many lines of input. The top/foreground
# activity is indicated by something like:
- # mFocusedApp=AppWindowToken{483e6db0 token=HistoryRecord{484dcad8 com.mozilla.SUTAgentAndroid/.SUTAgentAndroid}}
+ # mFocusedApp=AppWindowToken{483e6db0 token=HistoryRecord{484dcad8 com.mozilla.SUTAgentAndroid/.SUTAgentAndroid}} # noqa
# or, on other devices:
- # FocusedApplication: name='AppWindowToken{41a65340 token=ActivityRecord{418fbd68 org.mozilla.fennec_mozdev/org.mozilla.gecko.BrowserApp}}', dispatchingTimeout=5000.000ms
+ # FocusedApplication: name='AppWindowToken{41a65340 token=ActivityRecord{418fbd68 org.mozilla.fennec_mozdev/org.mozilla.gecko.BrowserApp}}', dispatchingTimeout=5000.000ms # noqa
# Extract this line, ending in the forward slash:
m = re.search('mFocusedApp(.+)/', data)
if not m:
m = re.search('FocusedApplication(.+)/', data)
if m:
line = m.group(0)
# Extract package name: string of non-whitespace ending in forward slash
m = re.search('(\S+)/$', line)
@@ -189,50 +194,54 @@ class DroidADB(DeviceManagerADB, DroidMi
def getAppRoot(self, packageName):
"""
Returns the root directory for the specified android application
"""
# relying on convention
return '/data/data/%s' % packageName
+
class DroidSUT(DeviceManagerSUT, DroidMixin):
def _getExtraAmStartArgs(self):
# in versions of android in jellybean and beyond, the agent may run as
# a different process than the one that started the app. In this case,
# we need to get back the original user serial number and then pass
# that to the 'am start' command line
if not hasattr(self, '_userSerial'):
infoDict = self.getInfo(directive="sutuserinfo")
if infoDict.get('sutuserinfo') and \
len(infoDict['sutuserinfo']) > 0:
- userSerialString = infoDict['sutuserinfo'][0]
- # user serial always an integer, see: http://developer.android.com/reference/android/os/UserManager.html#getSerialNumberForUser%28android.os.UserHandle%29
- m = re.match('User Serial:([0-9]+)', userSerialString)
- if m:
- self._userSerial = m.group(1)
- else:
- self._userSerial = None
+ userSerialString = infoDict['sutuserinfo'][0]
+ # user serial always an integer, see:
+ # http://developer.android.com/reference/android/os/UserManager.html#getSerialNumberForUser%28android.os.UserHandle%29
+ m = re.match('User Serial:([0-9]+)', userSerialString)
+ if m:
+ self._userSerial = m.group(1)
+ else:
+ self._userSerial = None
else:
self._userSerial = None
if self._userSerial is not None:
- return [ "--user", self._userSerial ]
+ return ["--user", self._userSerial]
return []
def getTopActivity(self):
- return self._runCmds([{ 'cmd': "activity" }]).strip()
+ return self._runCmds([{'cmd': "activity"}]).strip()
def getAppRoot(self, packageName):
- return self._runCmds([{ 'cmd': 'getapproot %s' % packageName }]).strip()
+ return self._runCmds([{'cmd': 'getapproot %s' % packageName}]).strip()
+
def DroidConnectByHWID(hwid, timeout=30, **kwargs):
- """Try to connect to the given device by waiting for it to show up using mDNS with the given timeout."""
+ """Try to connect to the given device by waiting for it to show up using
+ mDNS with the given timeout."""
zc = Zeroconf(moznetwork.get_ip())
evt = threading.Event()
listener = ZeroconfListener(hwid, evt)
sb = ServiceBrowser(zc, "_sutagent._tcp.local.", listener)
foundIP = None
if evt.wait(timeout):
# we found the hwid
--- a/testing/mozbase/mozdevice/mozdevice/sutini.py
+++ b/testing/mozbase/mozdevice/mozdevice/sutini.py
@@ -18,16 +18,17 @@ SCHEMA = {'Registration Server': (('IPAd
('PORT', '28001'),
('HARDWARE', ''),
('POOL', '')),
'Network Settings': (('SSID', ''),
('AUTH', ''),
('ENCR', ''),
('EAP', ''))}
+
def get_cfg(d, ini_path):
cfg = ConfigParser.RawConfigParser()
try:
cfg.readfp(StringIO.StringIO(d.pullFile(ini_path)), 'SUTAgent.ini')
except DMError:
# assume this is due to a missing file...
pass
return cfg
--- a/testing/mozbase/mozdevice/mozdevice/version_codes.py
+++ b/testing/mozbase/mozdevice/mozdevice/version_codes.py
@@ -12,19 +12,19 @@ See http://developer.android.com/referen
# not yet turned into an official release.
CUR_DEVELOPMENT = 10000
# October 2008: The original, first, version of Android
BASE = 1
# February 2009: First Android update, officially called 1.1
BASE_1_1 = 2
# May 2009: Android 1.5
-CUPCAKE = 3
+CUPCAKE = 3
# September 2009: Android 1.6
-DONUT = 4
+DONUT = 4
# November 2009: Android 2.0
ECLAIR = 5
# December 2009: Android 2.0.1
ECLAIR_0_1 = 6
# January 2010: Android 2.1
ECLAIR_MR1 = 7
# June 2010: Android 2.2
FROYO = 8
--- a/testing/mozbase/mozdevice/setup.py
+++ b/testing/mozbase/mozdevice/setup.py
@@ -6,23 +6,23 @@ from setuptools import setup
PACKAGE_NAME = 'mozdevice'
PACKAGE_VERSION = '0.48'
deps = ['mozfile >= 1.0',
'mozlog >= 3.0',
'moznetwork >= 0.24',
'mozprocess >= 0.19',
- ]
+ ]
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Mozilla-authored device management",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozdevice'],
include_package_data=True,
zip_safe=False,
--- a/testing/mozbase/mozdevice/sut_tests/dmunit.py
+++ b/testing/mozbase/mozdevice/sut_tests/dmunit.py
@@ -9,16 +9,17 @@ import unittest
from mozdevice import devicemanager
from mozdevice import devicemanagerSUT
ip = ''
port = 0
heartbeat_port = 0
log_level = logging.ERROR
+
class DeviceManagerTestCase(unittest.TestCase):
"""DeviceManager tests should subclass this.
"""
"""Set to False in your derived class if this test
should not be run on the Python agent.
"""
runs_on_test_device = True
@@ -43,12 +44,12 @@ class DeviceManagerTestLoader(unittest.T
"""Loads tests from modules unless the SUT is a test device and
the test case has runs_on_test_device set to False
"""
tests = []
module = __import__(module_name)
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, (type, types.ClassType)) and
- issubclass(obj, unittest.TestCase)) and \
- (not self.isTestDevice or obj.runs_on_test_device):
+ issubclass(obj, unittest.TestCase)) and \
+ (not self.isTestDevice or obj.runs_on_test_device):
tests.append(self.loadTestsFromTestCase(obj))
return self.suiteClass(tests)
--- a/testing/mozbase/mozdevice/sut_tests/runtests.py
+++ b/testing/mozbase/mozdevice/sut_tests/runtests.py
@@ -41,17 +41,17 @@ def main(ip, port, heartbeat_port, scrip
testLoader = dmunit.DeviceManagerTestLoader(isTestDevice)
for s in scripts:
suite.addTest(testLoader.loadTestsFromModuleName(s))
unittest.TextTestRunner(verbosity=2).run(suite)
genfiles.clean_test_files()
-if __name__ == "__main__":
+if __name__ == "__main__":
default_ip = '127.0.0.1'
default_port = 20701
env_ip, _, env_port = os.getenv('TEST_DEVICE', '').partition(':')
if env_port:
try:
env_port = int(env_port)
--- a/testing/mozbase/mozdevice/sut_tests/test_datachannel.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_datachannel.py
@@ -3,16 +3,17 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import socket
from time import strptime
from dmunit import DeviceManagerTestCase, heartbeat_port
+
class DataChannelTestCase(DeviceManagerTestCase):
runs_on_test_device = False
def runTest(self):
"""This tests the heartbeat and the data channel.
"""
ip = self.dm.host
@@ -31,22 +32,22 @@ class DataChannelTestCase(DeviceManagerT
data = self._datasock.recv(1024)
print data
self.assertNotEqual(len(data), 0)
# Check for the header
if not capturedHeader:
m = re.match(r"(.*?) trace output", data)
self.assertNotEqual(m, None,
- 'trace output line does not match. The line: ' + str(data))
+ 'trace output line does not match. The line: ' + str(data))
capturedHeader = True
# Check for standard heartbeat messsage
m = re.match(r"(.*?) Thump thump - (.*)", data)
- if m == None:
+ if m is None:
# This isn't an error, it usually means we've obtained some
# unexpected data from the device
continue
# Ensure it matches our format
mHeartbeatTime = m.group(1)
mHeartbeatTime = strptime(mHeartbeatTime, "%Y%m%d-%H:%M:%S")
numbeats = numbeats + 1
--- a/testing/mozbase/mozdevice/sut_tests/test_exec.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_exec.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import posixpath
from StringIO import StringIO
from dmunit import DeviceManagerTestCase
+
class ExecTestCase(DeviceManagerTestCase):
def runTest(self):
"""Simple exec test, does not use env vars."""
out = StringIO()
filename = posixpath.join(self.dm.deviceRoot, 'test_exec_file')
# Make sure the file was not already there
self.dm.removeFile(filename)
--- a/testing/mozbase/mozdevice/sut_tests/test_exec_env.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_exec_env.py
@@ -3,16 +3,17 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import posixpath
from StringIO import StringIO
from dmunit import DeviceManagerTestCase
+
class ExecEnvTestCase(DeviceManagerTestCase):
def runTest(self):
"""Exec test with env vars."""
# Push the file
localfile = os.path.join('test-files', 'test_script.sh')
remotefile = posixpath.join(self.dm.deviceRoot, 'test_script.sh')
self.dm.pushFile(localfile, remotefile)
--- a/testing/mozbase/mozdevice/sut_tests/test_fileExists.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_fileExists.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import tempfile
import posixpath
from dmunit import DeviceManagerTestCase
+
class FileExistsTestCase(DeviceManagerTestCase):
"""This tests the "fileExists" command.
"""
def testOnRoot(self):
self.assertTrue(self.dm.fileExists('/'))
def testOnNonexistent(self):
@@ -29,9 +30,8 @@ class FileExistsTestCase(DeviceManagerTe
remote_path = posixpath.join(self.dm.deviceRoot, 'testDir')
remote_path_file = posixpath.join(remote_path, 'testFile')
self.assertFalse(self.dm.fileExists(remote_path))
with tempfile.NamedTemporaryFile() as f:
self.dm.pushFile(f.name, remote_path_file)
self.assertTrue(self.dm.fileExists(remote_path))
self.dm.removeFile(remote_path_file)
self.dm.removeDir(remote_path)
-
--- a/testing/mozbase/mozdevice/sut_tests/test_getdir.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_getdir.py
@@ -5,16 +5,17 @@
import os
import posixpath
import shutil
import tempfile
from mozdevice.devicemanager import DMError
from dmunit import DeviceManagerTestCase
+
class GetDirectoryTestCase(DeviceManagerTestCase):
def _setUp(self):
self.localsrcdir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.localsrcdir, 'push1', 'sub.1', 'sub.2'))
path = os.path.join(self.localsrcdir,
'push1', 'sub.1', 'sub.2', 'testfile')
file(path, 'w').close()
@@ -41,10 +42,10 @@ class GetDirectoryTestCase(DeviceManager
self.dm.getDirectory(posixpath.join(testroot, 'push1'),
os.path.join(self.localdestdir, 'push1'))
self.assertTrue(os.path.exists(
os.path.join(self.localdestdir,
'push1', 'sub.1', 'sub.2', 'testfile')))
self.assertTrue(os.path.exists(
os.path.join(self.localdestdir, 'push1', 'emptysub')))
self.assertRaises(DMError, self.dm.getDirectory,
- '/dummy', os.path.join(self.localdestdir, '/none'))
+ '/dummy', os.path.join(self.localdestdir, '/none'))
self.assertFalse(os.path.exists(self.localdestdir + '/none'))
--- a/testing/mozbase/mozdevice/sut_tests/test_info.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_info.py
@@ -1,14 +1,15 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from dmunit import DeviceManagerTestCase
+
class InfoTestCase(DeviceManagerTestCase):
runs_on_test_device = False
def runTest(self):
"""This tests the "info" command.
"""
cmds = ('os', 'id', 'systime', 'uptime', 'screen', 'memory', 'power')
--- a/testing/mozbase/mozdevice/sut_tests/test_prompt.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_prompt.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import socket
from dmunit import DeviceManagerTestCase
+
class PromptTestCase(DeviceManagerTestCase):
def tearDown(self):
if self.sock:
self.sock.close()
def runTest(self):
"""This tests getting a prompt from the device.
--- a/testing/mozbase/mozdevice/sut_tests/test_ps.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_ps.py
@@ -1,14 +1,15 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from dmunit import DeviceManagerTestCase
+
class ProcessListTestCase(DeviceManagerTestCase):
def runTest(self):
"""This tests getting a process list from the device.
"""
proclist = self.dm.getProcessList()
# This returns a process list of the form:
@@ -19,9 +20,8 @@ class ProcessListTestCase(DeviceManagerT
self.assertNotEqual(len(proclist), 0)
for item in proclist:
self.assertIsInstance(item[0], int)
self.assertIsInstance(item[1], str)
self.assertGreater(len(item[1]), 0)
if len(item) > 2:
self.assertIsInstance(item[2], int)
-
--- a/testing/mozbase/mozdevice/sut_tests/test_pull.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_pull.py
@@ -4,16 +4,17 @@
import hashlib
import os
import posixpath
from dmunit import DeviceManagerTestCase
from mozdevice.devicemanager import DMError
+
class PullTestCase(DeviceManagerTestCase):
def runTest(self):
"""Tests the "pull" command with a binary file.
"""
orig = hashlib.md5()
new = hashlib.md5()
local_test_file = os.path.join('test-files', 'mybinary.zip')
--- a/testing/mozbase/mozdevice/sut_tests/test_push1.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_push1.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import posixpath
from dmunit import DeviceManagerTestCase
+
class Push1TestCase(DeviceManagerTestCase):
def runTest(self):
"""This tests copying a directory structure to the device.
"""
dvroot = self.dm.deviceRoot
dvpath = posixpath.join(dvroot, 'infratest')
self.dm.removeDir(dvpath)
--- a/testing/mozbase/mozdevice/sut_tests/test_push2.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_push2.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import posixpath
from dmunit import DeviceManagerTestCase
+
class Push2TestCase(DeviceManagerTestCase):
def runTest(self):
"""This tests copying a directory structure with files to the device.
"""
testroot = posixpath.join(self.dm.deviceRoot, 'infratest')
self.dm.removeDir(testroot)
self.dm.mkDir(testroot)
--- a/testing/mozbase/mozdevice/sut_tests/test_pushbinary.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_pushbinary.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import posixpath
from dmunit import DeviceManagerTestCase
+
class PushBinaryTestCase(DeviceManagerTestCase):
def runTest(self):
"""This tests copying a binary file.
"""
testroot = self.dm.deviceRoot
self.dm.removeFile(posixpath.join(testroot, 'mybinary.zip'))
self.dm.pushFile(os.path.join('test-files', 'mybinary.zip'),
--- a/testing/mozbase/mozdevice/sut_tests/test_pushsmalltext.py
+++ b/testing/mozbase/mozdevice/sut_tests/test_pushsmalltext.py
@@ -2,16 +2,17 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import posixpath
from dmunit import DeviceManagerTestCase
+
class PushSmallTextTestCase(DeviceManagerTestCase):
def runTest(self):
"""This tests copying a small text file.
"""
testroot = self.dm.deviceRoot
self.dm.removeFile(posixpath.join(testroot, 'smalltext.txt'))
self.dm.pushFile(os.path.join('test-files', 'smalltext.txt'),
--- a/testing/mozbase/mozdevice/tests/droidsut_launch.py
+++ b/testing/mozbase/mozdevice/tests/droidsut_launch.py
@@ -1,35 +1,36 @@
from sut import MockAgent
import mozdevice
import logging
import unittest
+
class LaunchTest(unittest.TestCase):
def test_nouserserial(self):
- a = MockAgent(self, commands = [("ps",
- "10029 549 com.android.launcher\n"
- "10066 1198 com.twitter.android"),
- ("info sutuserinfo", ""),
- ("exec am start -W -n "
- "org.mozilla.fennec/org.mozilla.gecko.BrowserApp -a "
- "android.intent.action.VIEW",
- "OK\nreturn code [0]")])
+ a = MockAgent(self, commands=[("ps",
+ "10029 549 com.android.launcher\n"
+ "10066 1198 com.twitter.android"),
+ ("info sutuserinfo", ""),
+ ("exec am start -W -n "
+ "org.mozilla.fennec/org.mozilla.gecko.BrowserApp -a "
+ "android.intent.action.VIEW",
+ "OK\nreturn code [0]")])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port, logLevel=logging.DEBUG)
d.launchFennec("org.mozilla.fennec")
a.wait()
def test_userserial(self):
- a = MockAgent(self, commands = [("ps",
- "10029 549 com.android.launcher\n"
- "10066 1198 com.twitter.android"),
- ("info sutuserinfo", "User Serial:0"),
- ("exec am start --user 0 -W -n "
- "org.mozilla.fennec/org.mozilla.gecko.BrowserApp -a "
- "android.intent.action.VIEW",
- "OK\nreturn code [0]")])
+ a = MockAgent(self, commands=[("ps",
+ "10029 549 com.android.launcher\n"
+ "10066 1198 com.twitter.android"),
+ ("info sutuserinfo", "User Serial:0"),
+ ("exec am start --user 0 -W -n "
+ "org.mozilla.fennec/org.mozilla.gecko.BrowserApp -a "
+ "android.intent.action.VIEW",
+ "OK\nreturn code [0]")])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port, logLevel=logging.DEBUG)
d.launchFennec("org.mozilla.fennec")
a.wait()
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozdevice/tests/sut.py
+++ b/testing/mozbase/mozdevice/tests/sut.py
@@ -4,22 +4,23 @@
# http://creativecommons.org/publicdomain/zero/1.0/
import datetime
import socket
import time
from threading import Thread
+
class MockAgent(object):
MAX_WAIT_TIME_SECONDS = 10
SOCKET_TIMEOUT_SECONDS = 5
- def __init__(self, tester, start_commands = None, commands = []):
+ def __init__(self, tester, start_commands=None, commands=[]):
if start_commands:
self.commands = start_commands
else:
self.commands = [("ver", "SUTAgentAndroid Version 1.14")]
self.commands = self.commands + commands
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(("127.0.0.1", 0))
@@ -55,21 +56,21 @@ class MockAgent(object):
data += conn.recv(1024)
except socket.timeout:
# We handle timeouts in the main loop.
pass
self.tester.assertEqual(data.strip(), command)
# send response and prompt separately to test for bug 789496
# FIXME: Improve the mock agent, since overloading the meaning
# of 'response' is getting confusing.
- if response is None: # code for "shut down"
+ if response is None: # code for "shut down"
conn.shutdown(socket.SHUT_RDWR)
conn.close()
conn = None
- elif type(response) is int: # code for "time out"
+ elif type(response) is int: # code for "time out"
max_timeout = 15.0
timeout = 0.0
interval = 0.1
while not self.should_stop and timeout < max_timeout:
time.sleep(interval)
timeout += interval
if timeout >= max_timeout:
raise Exception("Maximum timeout reached! This should not "
--- a/testing/mozbase/mozdevice/tests/sut_app.py
+++ b/testing/mozbase/mozdevice/tests/sut_app.py
@@ -1,9 +1,9 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import unittest
from sut import MockAgent
class TestApp(unittest.TestCase):
--- a/testing/mozbase/mozdevice/tests/sut_basic.py
+++ b/testing/mozbase/mozdevice/tests/sut_basic.py
@@ -1,13 +1,14 @@
from sut import MockAgent
import mozdevice
import logging
import unittest
+
class BasicTest(unittest.TestCase):
def test_init(self):
"""Tests DeviceManager initialization."""
a = MockAgent(self)
mozdevice.DroidSUT("127.0.0.1", port=a.port, logLevel=logging.DEBUG)
# all testing done in device's constructor
@@ -19,47 +20,47 @@ class BasicTest(unittest.TestCase):
self.assertRaises(mozdevice.DMError,
lambda: mozdevice.DroidSUT("127.0.0.1",
port=a.port,
logLevel=logging.DEBUG))
a.wait()
def test_timeout_normal(self):
"""Tests DeviceManager timeout, normal case."""
- a = MockAgent(self, commands = [("isdir /mnt/sdcard/tests", "TRUE"),
- ("cd /mnt/sdcard/tests", ""),
- ("ls", "test.txt"),
- ("rm /mnt/sdcard/tests/test.txt",
- "Removed the file")])
+ a = MockAgent(self, commands=[("isdir /mnt/sdcard/tests", "TRUE"),
+ ("cd /mnt/sdcard/tests", ""),
+ ("ls", "test.txt"),
+ ("rm /mnt/sdcard/tests/test.txt",
+ "Removed the file")])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port, logLevel=logging.DEBUG)
ret = d.removeFile('/mnt/sdcard/tests/test.txt')
- self.assertEqual(ret, None) # if we didn't throw an exception, we're ok
+ self.assertEqual(ret, None) # if we didn't throw an exception, we're ok
a.wait()
def test_timeout_timeout(self):
"""Tests DeviceManager timeout, timeout case."""
- a = MockAgent(self, commands = [("isdir /mnt/sdcard/tests", "TRUE"),
- ("cd /mnt/sdcard/tests", ""),
- ("ls", "test.txt"),
- ("rm /mnt/sdcard/tests/test.txt", 0)])
+ a = MockAgent(self, commands=[("isdir /mnt/sdcard/tests", "TRUE"),
+ ("cd /mnt/sdcard/tests", ""),
+ ("ls", "test.txt"),
+ ("rm /mnt/sdcard/tests/test.txt", 0)])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port, logLevel=logging.DEBUG)
d.default_timeout = 1
exceptionThrown = False
try:
d.removeFile('/mnt/sdcard/tests/test.txt')
except mozdevice.DMError:
exceptionThrown = True
self.assertEqual(exceptionThrown, True)
a.should_stop = True
a.wait()
def test_shell(self):
"""Tests shell command"""
- for cmd in [ ("exec foobar", False), ("execsu foobar", True) ]:
- for retcode in [ 1, 2 ]:
+ for cmd in [("exec foobar", False), ("execsu foobar", True)]:
+ for retcode in [1, 2]:
a = MockAgent(self, commands=[(cmd[0],
"\nreturn code [%s]" % retcode)])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port)
exceptionThrown = False
try:
d.shellCheckOutput(["foobar"], root=cmd[1])
except mozdevice.DMError:
exceptionThrown = True
--- a/testing/mozbase/mozdevice/tests/sut_chmod.py
+++ b/testing/mozbase/mozdevice/tests/sut_chmod.py
@@ -1,21 +1,22 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import unittest
from sut import MockAgent
class TestChmod(unittest.TestCase):
def test_chmod(self):
- command = [('chmod /mnt/sdcard/test', 'Changing permissions for /storage/emulated/legacy/Test\n'
- ' <empty>\n'
- 'chmod /storage/emulated/legacy/Test ok\n')]
+ command = [('chmod /mnt/sdcard/test',
+ 'Changing permissions for /storage/emulated/legacy/Test\n'
+ ' <empty>\n'
+ 'chmod /storage/emulated/legacy/Test ok\n')]
m = MockAgent(self, commands=command)
d = mozdevice.DroidSUT('127.0.0.1', port=m.port, logLevel=logging.DEBUG)
self.assertEqual(None, d.chmodDir('/mnt/sdcard/test'))
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozdevice/tests/sut_copytree.py
+++ b/testing/mozbase/mozdevice/tests/sut_copytree.py
@@ -5,61 +5,63 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import mozdevice
import logging
import unittest
from sut import MockAgent
+
class CopyTreeTest(unittest.TestCase):
+
def test_copyFile(self):
commands = [('dd if=/mnt/sdcard/tests/test.txt of=/mnt/sdcard/tests/test2.txt', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'test.txt\ntest2.txt')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
self.assertEqual(None, d.copyTree('/mnt/sdcard/tests/test.txt',
- '/mnt/sdcard/tests/test2.txt'))
+ '/mnt/sdcard/tests/test2.txt'))
expected = (commands[3][1].strip()).split('\n')
self.assertEqual(expected, d.listFiles('/mnt/sdcard/tests'))
def test_copyDir(self):
commands = [('dd if=/mnt/sdcard/tests/foo of=/mnt/sdcard/tests/bar', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'foo\nbar')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port,
- logLevel=logging.DEBUG)
+ logLevel=logging.DEBUG)
self.assertEqual(None, d.copyTree('/mnt/sdcard/tests/foo',
- '/mnt/sdcard/tests/bar'))
+ '/mnt/sdcard/tests/bar'))
expected = (commands[3][1].strip()).split('\n')
self.assertEqual(expected, d.listFiles('/mnt/sdcard/tests'))
def test_copyNonEmptyDir(self):
commands = [('isdir /mnt/sdcard/tests/foo/bar', 'TRUE'),
('dd if=/mnt/sdcard/tests/foo of=/mnt/sdcard/tests/foo2', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'foo\nfoo2'),
('isdir /mnt/sdcard/tests/foo2', 'TRUE'),
('cd /mnt/sdcard/tests/foo2', ''),
('ls', 'bar')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port,
- logLevel=logging.DEBUG)
+ logLevel=logging.DEBUG)
self.assertTrue(d.dirExists('/mnt/sdcard/tests/foo/bar'))
self.assertEqual(None, d.copyTree('/mnt/sdcard/tests/foo',
- '/mnt/sdcard/tests/foo2'))
+ '/mnt/sdcard/tests/foo2'))
expected = (commands[4][1].strip()).split('\n')
self.assertEqual(expected, d.listFiles('/mnt/sdcard/tests'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/foo2/bar'))
if __name__ == "__main__":
unittest.main()
--- a/testing/mozbase/mozdevice/tests/sut_fileExists.py
+++ b/testing/mozbase/mozdevice/tests/sut_fileExists.py
@@ -1,12 +1,13 @@
from sut import MockAgent
import mozdevice
import unittest
+
class FileExistsTest(unittest.TestCase):
commands = [('isdir /', 'TRUE'),
('cd /', ''),
('ls', 'init')]
def test_onRoot(self):
root_commands = [('isdir /', 'TRUE')]
@@ -21,9 +22,8 @@ class FileExistsTest(unittest.TestCase):
def test_onRegularFile(self):
a = MockAgent(self, commands=self.commands)
d = mozdevice.DroidSUT("127.0.0.1", port=a.port)
self.assertTrue(d.fileExists('/init'))
if __name__ == '__main__':
unittest.main()
-
--- a/testing/mozbase/mozdevice/tests/sut_info.py
+++ b/testing/mozbase/mozdevice/tests/sut_info.py
@@ -1,9 +1,9 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import re
import unittest
from sut import MockAgent
class TestGetInfo(unittest.TestCase):
--- a/testing/mozbase/mozdevice/tests/sut_ip.py
+++ b/testing/mozbase/mozdevice/tests/sut_ip.py
@@ -1,9 +1,9 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import unittest
from sut import MockAgent
class TestGetIP(unittest.TestCase):
""" class to test IP methods """
--- a/testing/mozbase/mozdevice/tests/sut_list.py
+++ b/testing/mozbase/mozdevice/tests/sut_list.py
@@ -1,9 +1,9 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import unittest
from sut import MockAgent
class TestListFiles(unittest.TestCase):
commands = [("isdir /mnt/sdcard", "TRUE"),
--- a/testing/mozbase/mozdevice/tests/sut_logcat.py
+++ b/testing/mozbase/mozdevice/tests/sut_logcat.py
@@ -6,37 +6,38 @@ import unittest
from sut import MockAgent
class TestLogCat(unittest.TestCase):
""" Class to test methods associated with logcat """
def test_getLogcat(self):
- logcat_output = ("07-17 00:51:10.377 I/SUTAgentAndroid( 2933): onCreate\r\n"
- "07-17 00:51:10.457 D/dalvikvm( 2933): GC_CONCURRENT freed 351K, 17% free 2523K/3008K, paused 5ms+2ms, total 38ms\r\n"
- "07-17 00:51:10.497 I/SUTAgentAndroid( 2933): Caught exception creating file in /data/local/tmp: open failed: EACCES (Permission denied)\r\n"
- "07-17 00:51:10.507 E/SUTAgentAndroid( 2933): ERROR: Cannot access world writeable test root\r\n"
- "07-17 00:51:10.547 D/GeckoHealthRec( 3253): Initializing profile cache.\r\n"
- "07-17 00:51:10.607 D/GeckoHealthRec( 3253): Looking for /data/data/org.mozilla.fennec/files/mozilla/c09kfhne.default/times.json\r\n"
- "07-17 00:51:10.637 D/GeckoHealthRec( 3253): Using times.json for profile creation time.\r\n"
- "07-17 00:51:10.707 D/GeckoHealthRec( 3253): Incorporating environment: times.json profile creation = 1374026758604\r\n"
- "07-17 00:51:10.507 D/GeckoHealthRec( 3253): Requested prefs.\r\n"
- "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): \r\n"
- "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): Total Private Dirty Memory 3176 kb\r\n"
- "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): Total Proportional Set Size Memory 5679 kb\r\n"
- "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): Total Shared Dirty Memory 9216 kb\r\n"
- "07-17 06:55:21.627 I/SUTAgentAndroid( 3876): 127.0.0.1 : execsu /system/bin/logcat -v time -d dalvikvm:I "
- "ConnectivityService:S WifiMonitor:S WifiStateTracker:S wpa_supplicant:S NetworkStateTracker:S\r\n"
- "07-17 06:55:21.827 I/dalvikvm-heap( 3876): Grow heap (frag case) to 3.019MB for 102496-byte allocation\r\n"
- "return code [0]")
+ logcat_output = (
+ "07-17 00:51:10.377 I/SUTAgentAndroid( 2933): onCreate\r\n"
+ "07-17 00:51:10.457 D/dalvikvm( 2933): GC_CONCURRENT freed 351K, 17% free 2523K/3008K, paused 5ms+2ms, total 38ms\r\n" # noqa
+ "07-17 00:51:10.497 I/SUTAgentAndroid( 2933): Caught exception creating file in /data/local/tmp: open failed: EACCES (Permission denied)\r\n" # noqa
+ "07-17 00:51:10.507 E/SUTAgentAndroid( 2933): ERROR: Cannot access world writeable test root\r\n" # noqa
+ "07-17 00:51:10.547 D/GeckoHealthRec( 3253): Initializing profile cache.\r\n"
+ "07-17 00:51:10.607 D/GeckoHealthRec( 3253): Looking for /data/data/org.mozilla.fennec/files/mozilla/c09kfhne.default/times.json\r\n" # noqa
+ "07-17 00:51:10.637 D/GeckoHealthRec( 3253): Using times.json for profile creation time.\r\n" # noqa
+ "07-17 00:51:10.707 D/GeckoHealthRec( 3253): Incorporating environment: times.json profile creation = 1374026758604\r\n" # noqa
+ "07-17 00:51:10.507 D/GeckoHealthRec( 3253): Requested prefs.\r\n"
+ "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): \r\n"
+ "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): Total Private Dirty Memory 3176 kb\r\n" # noqa
+ "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): Total Proportional Set Size Memory 5679 kb\r\n" # noqa
+ "07-17 06:50:54.907 I/SUTAgentAndroid( 3876): Total Shared Dirty Memory 9216 kb\r\n" # noqa
+ "07-17 06:55:21.627 I/SUTAgentAndroid( 3876): 127.0.0.1 : execsu /system/bin/logcat -v time -d dalvikvm:I " # noqa
+ "ConnectivityService:S WifiMonitor:S WifiStateTracker:S wpa_supplicant:S NetworkStateTracker:S\r\n" # noqa
+ "07-17 06:55:21.827 I/dalvikvm-heap( 3876): Grow heap (frag case) to 3.019MB for 102496-byte allocation\r\n" # noqa
+ "return code [0]")
inp = ("execsu /system/bin/logcat -v time -d "
- "dalvikvm:I ConnectivityService:S WifiMonitor:S "
- "WifiStateTracker:S wpa_supplicant:S NetworkStateTracker:S")
+ "dalvikvm:I ConnectivityService:S WifiMonitor:S "
+ "WifiStateTracker:S wpa_supplicant:S NetworkStateTracker:S")
commands = [(inp, logcat_output)]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
self.assertEqual(logcat_output[:-17].replace('\r\n', '\n').splitlines(True), d.getLogcat())
def test_recordLogcat(self):
--- a/testing/mozbase/mozdevice/tests/sut_mkdir.py
+++ b/testing/mozbase/mozdevice/tests/sut_mkdir.py
@@ -1,16 +1,17 @@
# Any copyright is dedicated to the Public Domain.
# http://creativecommons.org/publicdomain/zero/1.0/
import mozdevice
import logging
import unittest
from sut import MockAgent
+
class MkDirsTest(unittest.TestCase):
def test_mkdirs(self):
subTests = [{'cmds': [('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('info os', 'android'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/baz', 'FALSE'),
@@ -21,19 +22,20 @@ class MkDirsTest(unittest.TestCase):
'/mnt/sdcard/baz/boop successfully created')],
'expectException': False},
{'cmds': [('isdir /mnt/sdcard/baz/boop', 'FALSE'),
('info os', 'android'),
('isdir /mnt', 'TRUE'),
('isdir /mnt/sdcard', 'TRUE'),
('isdir /mnt/sdcard/baz', 'FALSE'),
('mkdr /mnt/sdcard/baz',
- '##AGENT-WARNING## Could not create the directory /mnt/sdcard/baz')],
+ "##AGENT-WARNING## "
+ "Could not create the directory /mnt/sdcard/baz")],
'expectException': True},
- ]
+ ]
for subTest in subTests:
a = MockAgent(self, commands=subTest['cmds'])
exceptionThrown = False
try:
d = mozdevice.DroidSUT('127.0.0.1', port=a.port,
logLevel=logging.DEBUG)
d.mkDirs('/mnt/sdcard/baz/boop/bip')
--- a/testing/mozbase/mozdevice/tests/sut_movetree.py
+++ b/testing/mozbase/mozdevice/tests/sut_movetree.py
@@ -5,59 +5,61 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import mozdevice
import logging
import unittest
from sut import MockAgent
+
class MoveTreeTest(unittest.TestCase):
+
def test_moveFile(self):
commands = [('mv /mnt/sdcard/tests/test.txt /mnt/sdcard/tests/test1.txt', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'test1.txt'),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'test1.txt')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
self.assertEqual(None, d.moveTree('/mnt/sdcard/tests/test.txt',
- '/mnt/sdcard/tests/test1.txt'))
+ '/mnt/sdcard/tests/test1.txt'))
self.assertFalse(d.fileExists('/mnt/sdcard/tests/test.txt'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/test1.txt'))
def test_moveDir(self):
commands = [("mv /mnt/sdcard/tests/foo /mnt/sdcard/tests/bar", ""),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'bar')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
self.assertEqual(None, d.moveTree('/mnt/sdcard/tests/foo',
- '/mnt/sdcard/tests/bar'))
+ '/mnt/sdcard/tests/bar'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/bar'))
def test_moveNonEmptyDir(self):
commands = [('isdir /mnt/sdcard/tests/foo/bar', 'TRUE'),
('mv /mnt/sdcard/tests/foo /mnt/sdcard/tests/foo2', ''),
('isdir /mnt/sdcard/tests', 'TRUE'),
('cd /mnt/sdcard/tests', ''),
('ls', 'foo2'),
('isdir /mnt/sdcard/tests/foo2', 'TRUE'),
('cd /mnt/sdcard/tests/foo2', ''),
('ls', 'bar')]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port,
- logLevel=logging.DEBUG)
+ logLevel=logging.DEBUG)
self.assertTrue(d.dirExists('/mnt/sdcard/tests/foo/bar'))
self.assertEqual(None, d.moveTree('/mnt/sdcard/tests/foo',
- '/mnt/sdcard/tests/foo2'))
+ '/mnt/sdcard/tests/foo2'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/foo2'))
self.assertTrue(d.fileExists('/mnt/sdcard/tests/foo2/bar'))
if __name__ == "__main__":
unittest.main()
--- a/testing/mozbase/mozdevice/tests/sut_ps.py
+++ b/testing/mozbase/mozdevice/tests/sut_ps.py
@@ -1,12 +1,13 @@
from sut import MockAgent
import mozdevice
import unittest
+
class PsTest(unittest.TestCase):
pscommands = [('ps',
"10029 549 com.android.launcher\n"
"10066 1198 com.twitter.android")]
bad_pscommands = [('ps',
"abcdef 549 com.android.launcher\n"
--- a/testing/mozbase/mozdevice/tests/sut_pull.py
+++ b/testing/mozbase/mozdevice/tests/sut_pull.py
@@ -1,48 +1,47 @@
from sut import MockAgent
import mozdevice
import logging
import unittest
+
class PullTest(unittest.TestCase):
def test_pull_success(self):
- for count in [ 1, 4, 1024, 2048 ]:
+ for count in [1, 4, 1024, 2048]:
cheeseburgers = ""
for i in range(count):
cheeseburgers += "cheeseburgers"
# pull file is kind of gross, make sure we can still execute commands after it's done
remoteName = "/mnt/sdcard/cheeseburgers"
- a = MockAgent(self, commands = [("pull %s" % remoteName,
- "%s,%s\n%s" % (remoteName,
- len(cheeseburgers),
- cheeseburgers)),
- ("isdir /mnt/sdcard", "TRUE")])
+ a = MockAgent(self, commands=[("pull %s" % remoteName,
+ "%s,%s\n%s" % (remoteName,
+ len(cheeseburgers),
+ cheeseburgers)),
+ ("isdir /mnt/sdcard", "TRUE")])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port,
logLevel=logging.DEBUG)
pulledData = d.pullFile("/mnt/sdcard/cheeseburgers")
self.assertEqual(pulledData, cheeseburgers)
d.dirExists('/mnt/sdcard')
def test_pull_failure(self):
# this test simulates only receiving a few bytes of what we expect
# to be larger file
remoteName = "/mnt/sdcard/cheeseburgers"
- a = MockAgent(self, commands = [("pull %s" % remoteName,
- "%s,15\n%s" % (remoteName,
- "cheeseburgh"))])
+ a = MockAgent(self, commands=[("pull %s" % remoteName,
+ "%s,15\n%s" % (remoteName,
+ "cheeseburgh"))])
d = mozdevice.DroidSUT("127.0.0.1", port=a.port,
logLevel=logging.DEBUG)
exceptionThrown = False
try:
d.pullFile("/mnt/sdcard/cheeseburgers")
except mozdevice.DMError:
exceptionThrown = True
self.assertTrue(exceptionThrown)
if __name__ == '__main__':
unittest.main()
-
-
--- a/testing/mozbase/mozdevice/tests/sut_push.py
+++ b/testing/mozbase/mozdevice/tests/sut_push.py
@@ -2,29 +2,30 @@ from sut import MockAgent
import mozfile
import mozdevice
import logging
import unittest
import hashlib
import tempfile
import os
+
class PushTest(unittest.TestCase):
def test_push(self):
pushfile = "1234ABCD"
mdsum = hashlib.md5()
mdsum.update(pushfile)
expectedResponse = mdsum.hexdigest()
# (good response, no exception), (bad response, exception)
- for response in [ (expectedResponse, False), ("BADHASH", True) ]:
+ for response in [(expectedResponse, False), ("BADHASH", True)]:
cmd = "push /mnt/sdcard/foobar %s\r\n%s" % (len(pushfile), pushfile)
- a = MockAgent(self, commands = [("isdir /mnt/sdcard", "TRUE"),
- (cmd, response[0])])
+ a = MockAgent(self, commands=[("isdir /mnt/sdcard", "TRUE"),
+ (cmd, response[0])])
exceptionThrown = False
with tempfile.NamedTemporaryFile() as f:
try:
f.write(pushfile)
f.flush()
d = mozdevice.DroidSUT("127.0.0.1", port=a.port)
d.pushFile(f.name, '/mnt/sdcard/foobar')
except mozdevice.DMError:
@@ -41,39 +42,39 @@ class PushTest(unittest.TestCase):
tempdir = tempfile.mkdtemp()
self.addCleanup(mozfile.remove, tempdir)
complex_path = os.path.join(tempdir, "baz")
os.mkdir(complex_path)
f = tempfile.NamedTemporaryFile(dir=complex_path)
f.write(pushfile)
f.flush()
- subTests = [ { 'cmds': [ ("isdir /mnt/sdcard/baz", "TRUE"),
- ("push /mnt/sdcard/baz/%s %s\r\n%s" %
- (os.path.basename(f.name), len(pushfile),
- pushfile),
- expectedFileResponse) ],
- 'expectException': False },
- { 'cmds': [ ("isdir /mnt/sdcard/baz", "TRUE"),
- ("push /mnt/sdcard/baz/%s %s\r\n%s" %
- (os.path.basename(f.name), len(pushfile),
- pushfile),
- "BADHASH") ],
- 'expectException': True },
- { 'cmds': [ ("isdir /mnt/sdcard/baz", "FALSE"),
- ('info os', 'android'),
- ("isdir /mnt", "FALSE"),
- ("mkdr /mnt",
- "##AGENT-WARNING## Could not create the directory /mnt") ],
- 'expectException': True },
+ subTests = [{'cmds': [("isdir /mnt/sdcard/baz", "TRUE"),
+ ("push /mnt/sdcard/baz/%s %s\r\n%s" %
+ (os.path.basename(f.name), len(pushfile),
+ pushfile),
+ expectedFileResponse)],
+ 'expectException': False},
+ {'cmds': [("isdir /mnt/sdcard/baz", "TRUE"),
+ ("push /mnt/sdcard/baz/%s %s\r\n%s" %
+ (os.path.basename(f.name), len(pushfile),
+ pushfile),
+ "BADHASH")],
+ 'expectException': True},
+ {'cmds': [("isdir /mnt/sdcard/baz", "FALSE"),
+ ('info os', 'android'),
+ ("isdir /mnt", "FALSE"),
+ ("mkdr /mnt",
+ "##AGENT-WARNING## Could not create the directory /mnt")],
+ 'expectException': True},
- ]
+ ]
for subTest in subTests:
- a = MockAgent(self, commands = subTest['cmds'])
+ a = MockAgent(self, commands=subTest['cmds'])
exceptionThrown = False
try:
d = mozdevice.DroidSUT("127.0.0.1", port=a.port,
logLevel=logging.DEBUG)
d.pushDir(tempdir, "/mnt/sdcard")
except mozdevice.DMError:
exceptionThrown = True
--- a/testing/mozbase/mozdevice/tests/sut_remove.py
+++ b/testing/mozbase/mozdevice/tests/sut_remove.py
@@ -1,24 +1,24 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import unittest
from sut import MockAgent
class TestRemove(unittest.TestCase):
def test_removeDir(self):
commands = [("isdir /mnt/sdcard/test", "TRUE"),
("rmdr /mnt/sdcard/test", "Deleting file(s) from "
- "/storage/emulated/legacy/Moztest\n"
- " <empty>\n"
- "Deleting directory "
- "/storage/emulated/legacy/Moztest\n")]
+ "/storage/emulated/legacy/Moztest\n"
+ " <empty>\n"
+ "Deleting directory "
+ "/storage/emulated/legacy/Moztest\n")]
m = MockAgent(self, commands=commands)
d = mozdevice.DroidSUT("127.0.0.1", port=m.port, logLevel=logging.DEBUG)
# No error implies we're all good
self.assertEqual(None, d.removeDir("/mnt/sdcard/test"))
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozdevice/tests/sut_time.py
+++ b/testing/mozbase/mozdevice/tests/sut_time.py
@@ -1,9 +1,9 @@
-#/usr/bin/env python
+#!/usr/bin/env python
import mozdevice
import logging
import unittest
from sut import MockAgent
class TestGetCurrentTime(unittest.TestCase):
--- a/testing/mozbase/mozfile/mozfile/__init__.py
+++ b/testing/mozbase/mozfile/mozfile/__init__.py
@@ -1,7 +1,8 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from .mozfile import *
--- a/testing/mozbase/mozfile/mozfile/mozfile.py
+++ b/testing/mozbase/mozfile/mozfile/mozfile.py
@@ -22,17 +22,18 @@ import warnings
'load',
'move',
'remove',
'rmtree',
'tree',
'NamedTemporaryFile',
'TemporaryDirectory']
-### utilities for extracting archives
+# utilities for extracting archives
+
def extract_tarball(src, dest):
"""extract a .tar file"""
import tarfile
bundle = tarfile.open(src)
namelist = bundle.getnames()
@@ -117,17 +118,17 @@ def extract(src, dest=None):
if index != -1:
root = os.path.join(dest, name[:index])
if root not in top_level_files:
top_level_files.append(root)
return top_level_files
-### utilities for removal of files and directories
+# utilities for removal of files and directories
def rmtree(dir):
"""Deprecated wrapper method to remove a directory tree.
Ensure to update your code to use mozfile.remove() directly
:param dir: directory to be removed
"""
@@ -156,17 +157,17 @@ def _call_windows_retry(func, args=(), r
raise
if retry_count == retry_max:
raise
retry_count += 1
print '%s() failed for "%s". Reason: %s (%s). Retrying...' % \
- (func.__name__, args, e.strerror, e.errno)
+ (func.__name__, args, e.strerror, e.errno)
time.sleep(retry_count * retry_delay)
else:
# If no exception has been thrown it should be done
break
def remove(path):
"""Removes the specified file, link, or directory tree.
@@ -256,27 +257,28 @@ def depth(directory):
level += 1
if not remainder:
break
return level
# ASCII delimeters
ascii_delimeters = {
- 'vertical_line' : '|',
- 'item_marker' : '+',
- 'last_child' : '\\'
- }
+ 'vertical_line': '|',
+ 'item_marker': '+',
+ 'last_child': '\\'
+}
# unicode delimiters
unicode_delimeters = {
- 'vertical_line' : '│',
- 'item_marker' : '├',
- 'last_child' : 'â””'
- }
+ 'vertical_line': '│',
+ 'item_marker': '├',
+ 'last_child': 'â””'
+}
+
def tree(directory,
item_marker=unicode_delimeters['item_marker'],
vertical_line=unicode_delimeters['vertical_line'],
last_child=unicode_delimeters['last_child'],
sort_key=lambda x: x.lower()):
"""
display tree directory structure for `directory`
@@ -314,31 +316,31 @@ def tree(directory,
indent[-1] = ' '
elif not indent:
dirpath_mark = ''
else:
dirpath_mark = item_marker
# append the directory and piece of tree structure
# if the top-level entry directory, print as passed
- retval.append('%s%s%s'% (''.join(indent[:-1]),
- dirpath_mark,
- basename if retval else directory))
+ retval.append('%s%s%s' % (''.join(indent[:-1]),
+ dirpath_mark,
+ basename if retval else directory))
# add the files
if filenames:
last_file = filenames[-1]
retval.extend([('%s%s%s' % (''.join(indent),
files_end if filename == last_file else item_marker,
filename))
- for index, filename in enumerate(filenames)])
+ for index, filename in enumerate(filenames)])
return '\n'.join(retval)
-### utilities for temporary resources
+# utilities for temporary resources
class NamedTemporaryFile(object):
"""
Like tempfile.NamedTemporaryFile except it works on Windows
in the case where you open the created file a second time.
This behaves very similarly to tempfile.NamedTemporaryFile but may
not behave exactly the same. For example, this function does not
@@ -348,16 +350,17 @@ class NamedTemporaryFile(object):
with NamedTemporaryFile() as fh:
fh.write(b'foobar')
print('Filename: %s' % fh.name)
see https://bugzilla.mozilla.org/show_bug.cgi?id=821362
"""
+
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='tmp',
dir=None, delete=True):
import tempfile
fd, path = tempfile.mkstemp(suffix, prefix, dir, 't' in mode)
os.close(fd)
self.file = open(path, mode)
@@ -405,31 +408,32 @@ def TemporaryDirectory():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
-### utilities dealing with URLs
+# utilities dealing with URLs
def is_url(thing):
"""
Return True if thing looks like a URL.
"""
import urlparse
parsed = urlparse.urlparse(thing)
if 'scheme' in parsed:
return len(parsed.scheme) >= 2
else:
return len(parsed[0]) >= 2
+
def load(resource):
"""
open a file or URL for reading. If the passed resource string is not a URL,
or begins with 'file://', return a ``file``. Otherwise, return the
result of urllib2.urlopen()
"""
import urllib2
@@ -438,9 +442,8 @@ def load(resource):
if resource.startswith('file://'):
resource = resource[len('file://'):]
if not is_url(resource):
# if no scheme is given, it is a file path
return file(resource)
return urllib2.urlopen(resource)
-
--- a/testing/mozbase/mozfile/setup.py
+++ b/testing/mozbase/mozfile/setup.py
@@ -6,17 +6,17 @@ from setuptools import setup
PACKAGE_NAME = 'mozfile'
PACKAGE_VERSION = '1.2'
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Library of file utilities for use in Mozilla testing",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozfile'],
include_package_data=True,
zip_safe=False,
--- a/testing/mozbase/mozfile/tests/test_extract.py
+++ b/testing/mozbase/mozfile/tests/test_extract.py
@@ -113,17 +113,17 @@ class TestExtract(unittest.TestCase):
mozfile.extract(filename, dest)
except Exception as exception:
pass
finally:
os.remove(filename)
os.rmdir(dest)
self.assertTrue(isinstance(exception, Exception))
- ### utility functions
+ # utility functions
def create_tarball(self):
"""create a stub tarball for testing"""
tempdir = stubs.create_stub()
filename = tempfile.mktemp(suffix='.tar')
archive = tarfile.TarFile(filename, mode='w')
try:
for path in stubs.files:
--- a/testing/mozbase/mozfile/tests/test_move_remove.py
+++ b/testing/mozbase/mozfile/tests/test_move_remove.py
@@ -21,16 +21,17 @@ def mark_readonly(path):
:param path: path of directory/file of which modes must be changed
"""
mode = os.stat(path)[stat.ST_MODE]
os.chmod(path, mode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH)
class FileOpenCloseThread(threading.Thread):
"""Helper thread for asynchronous file handling"""
+
def __init__(self, path, delay, delete=False):
threading.Thread.__init__(self)
self.file_opened = threading.Event()
self.delay = delay
self.path = path
self.delete = delete
def run(self):
@@ -196,16 +197,17 @@ class MozfileRemoveTestCase(unittest.Tes
mozfile.remove(not_existing_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
self.fail("removing non existing path must not raise error")
raise
class MozFileMoveTestCase(unittest.TestCase):
+
def setUp(self):
# Generate a stub
self.tempdir = stubs.create_stub()
self.addCleanup(mozfile.rmtree, self.tempdir)
def test_move_file(self):
file_path = os.path.join(self.tempdir, *stubs.files[1])
moved_path = file_path + '.moved'
--- a/testing/mozbase/mozhttpd/mozhttpd/__init__.py
+++ b/testing/mozbase/mozhttpd/mozhttpd/__init__.py
@@ -39,8 +39,10 @@ content from the current directory, defi
'function': resource_get } ])
print "Serving '%s' at %s:%s" % (httpd.docroot, httpd.host, httpd.port)
httpd.start(block=True)
"""
from mozhttpd import MozHttpd, Request, RequestHandler, main
from handlers import json_response
+
+__all__ = ['MozHttpd', 'Request', 'RequestHandler', 'main', 'json_response']
--- a/testing/mozbase/mozhttpd/mozhttpd/handlers.py
+++ b/testing/mozbase/mozhttpd/mozhttpd/handlers.py
@@ -1,15 +1,16 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
+
def json_response(func):
""" Translates results of 'func' into a JSON response. """
def wrap(*a, **kw):
(code, data) = func(*a, **kw)
json_data = json.dumps(data)
- return (code, { 'Content-type': 'application/json',
- 'Content-Length': len(json_data) }, json_data)
+ return (code, {'Content-type': 'application/json',
+ 'Content-Length': len(json_data)}, json_data)
return wrap
--- a/testing/mozbase/mozhttpd/mozhttpd/mozhttpd.py
+++ b/testing/mozbase/mozhttpd/mozhttpd/mozhttpd.py
@@ -15,16 +15,17 @@ import sys
import os
import urllib
import urlparse
import re
import moznetwork
import time
from SocketServer import ThreadingMixIn
+
class EasyServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
def handle_error(self, request, client_address):
error = sys.exc_value
if ((isinstance(error, socket.error) and
@@ -57,31 +58,31 @@ class Request(object):
if body_len and rfile:
self.body = rfile.read(body_len)
else:
self.body = None
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
- docroot = os.getcwd() # current working directory at time of import
+ docroot = os.getcwd() # current working directory at time of import
proxy_host_dirs = False
request_log = []
log_requests = False
request = None
def __init__(self, *args, **kwargs):
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
self.extensions_map['.svg'] = 'image/svg+xml'
def _try_handler(self, method):
if self.log_requests:
- self.request_log.append({ 'method': method,
- 'path': self.request.path,
- 'time': time.time() })
+ self.request_log.append({'method': method,
+ 'path': self.request.path,
+ 'time': time.time()})
handlers = [handler for handler in self.urlhandlers
if handler['method'] == method]
for handler in handlers:
m = re.match(handler['path'], self.request.path)
if m:
(response_code, headerdict, data) = \
handler['function'](self.request, *m.groups())
@@ -157,21 +158,21 @@ class RequestHandler(SimpleHTTPServer.Si
# fragment and mangled the path for proxying, if required.
path = posixpath.normpath(urllib.unquote(self.path))
words = path.split('/')
words = filter(None, words)
path = self.disk_root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
- if word in (os.curdir, os.pardir): continue
+ if word in (os.curdir, os.pardir):
+ continue
path = os.path.join(path, word)
return path
-
# I found on my local network that calls to this were timing out
# I believe all of these calls are from log_message
def address_string(self):
return "a.b.c.d"
# This produces a LOT of noise
def log_message(self, format, *args):
pass
@@ -256,27 +257,27 @@ class MozHttpd(object):
server will be started on a separate thread that can be terminated by
a call to stop().
"""
self.httpd = EasyServer((self.host, self.port), self.handler_class)
if block:
self.httpd.serve_forever()
else:
self.server = threading.Thread(target=self.httpd.serve_forever)
- self.server.setDaemon(True) # don't hang on exit
+ self.server.setDaemon(True) # don't hang on exit
self.server.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.httpd:
- ### FIXME: There is no shutdown() method in Python 2.4...
+ # FIXME: There is no shutdown() method in Python 2.4...
try:
self.httpd.shutdown()
except AttributeError:
pass
self.httpd = None
def get_url(self, path="/"):
"""
--- a/testing/mozbase/mozhttpd/setup.py
+++ b/testing/mozbase/mozhttpd/setup.py
@@ -6,25 +6,24 @@ from setuptools import setup
PACKAGE_VERSION = '0.7'
deps = ['moznetwork >= 0.24']
setup(name='mozhttpd',
version=PACKAGE_VERSION,
description="Python webserver intended for use with Mozilla testing",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozhttpd'],
include_package_data=True,
zip_safe=False,
install_requires=deps,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
mozhttpd = mozhttpd:main
""",
)
-
--- a/testing/mozbase/mozhttpd/tests/api.py
+++ b/testing/mozbase/mozhttpd/tests/api.py
@@ -9,105 +9,106 @@ import mozhttpd
import urllib2
import os
import unittest
import json
import tempfile
here = os.path.dirname(os.path.abspath(__file__))
+
class ApiTest(unittest.TestCase):
resource_get_called = 0
resource_post_called = 0
resource_del_called = 0
@mozhttpd.handlers.json_response
def resource_get(self, request, objid):
self.resource_get_called += 1
- return (200, { 'called': self.resource_get_called,
- 'id': objid,
- 'query': request.query })
+ return (200, {'called': self.resource_get_called,
+ 'id': objid,
+ 'query': request.query})
@mozhttpd.handlers.json_response
def resource_post(self, request):
self.resource_post_called += 1
- return (201, { 'called': self.resource_post_called,
- 'data': json.loads(request.body),
- 'query': request.query })
+ return (201, {'called': self.resource_post_called,
+ 'data': json.loads(request.body),
+ 'query': request.query})
@mozhttpd.handlers.json_response
def resource_del(self, request, objid):
self.resource_del_called += 1
- return (200, { 'called': self.resource_del_called,
- 'id': objid,
- 'query': request.query })
+ return (200, {'called': self.resource_del_called,
+ 'id': objid,
+ 'query': request.query})
def get_url(self, path, server_port, querystr):
url = "http://127.0.0.1:%s%s" % (server_port, path)
if querystr:
url += "?%s" % querystr
return url
def try_get(self, server_port, querystr):
self.resource_get_called = 0
f = urllib2.urlopen(self.get_url('/api/resource/1', server_port, querystr))
try:
self.assertEqual(f.getcode(), 200)
except AttributeError:
pass # python 2.4
- self.assertEqual(json.loads(f.read()), { 'called': 1, 'id': str(1), 'query': querystr })
+ self.assertEqual(json.loads(f.read()), {'called': 1, 'id': str(1), 'query': querystr})
self.assertEqual(self.resource_get_called, 1)
def try_post(self, server_port, querystr):
self.resource_post_called = 0
- postdata = { 'hamburgers': '1234' }
+ postdata = {'hamburgers': '1234'}
try:
f = urllib2.urlopen(self.get_url('/api/resource/', server_port, querystr),
data=json.dumps(postdata))
except urllib2.HTTPError as e:
# python 2.4
self.assertEqual(e.code, 201)
body = e.fp.read()
else:
self.assertEqual(f.getcode(), 201)
body = f.read()
- self.assertEqual(json.loads(body), { 'called': 1,
- 'data': postdata,
- 'query': querystr })
+ self.assertEqual(json.loads(body), {'called': 1,
+ 'data': postdata,
+ 'query': querystr})
self.assertEqual(self.resource_post_called, 1)
def try_del(self, server_port, querystr):
self.resource_del_called = 0
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(self.get_url('/api/resource/1', server_port, querystr))
request.get_method = lambda: 'DEL'
f = opener.open(request)
try:
self.assertEqual(f.getcode(), 200)
except AttributeError:
pass # python 2.4
- self.assertEqual(json.loads(f.read()), { 'called': 1, 'id': str(1), 'query': querystr })
+ self.assertEqual(json.loads(f.read()), {'called': 1, 'id': str(1), 'query': querystr})
self.assertEqual(self.resource_del_called, 1)
def test_api(self):
httpd = mozhttpd.MozHttpd(port=0,
- urlhandlers = [ { 'method': 'GET',
- 'path': '/api/resource/([^/]+)/?',
- 'function': self.resource_get },
- { 'method': 'POST',
- 'path': '/api/resource/?',
- 'function': self.resource_post },
- { 'method': 'DEL',
- 'path': '/api/resource/([^/]+)/?',
- 'function': self.resource_del }
- ])
+ urlhandlers=[{'method': 'GET',
+ 'path': '/api/resource/([^/]+)/?',
+ 'function': self.resource_get},
+ {'method': 'POST',
+ 'path': '/api/resource/?',
+ 'function': self.resource_post},
+ {'method': 'DEL',
+ 'path': '/api/resource/([^/]+)/?',
+ 'function': self.resource_del}
+ ])
httpd.start(block=False)
server_port = httpd.httpd.server_port
# GET
self.try_get(server_port, '')
self.try_get(server_port, '?foo=bar')
@@ -164,48 +165,51 @@ class ApiTest(unittest.TestCase):
opener.open(request)
except urllib2.HTTPError:
self.assertEqual(e.code, 404)
exception_thrown = True
self.assertTrue(exception_thrown)
def test_api_with_docroot(self):
httpd = mozhttpd.MozHttpd(port=0, docroot=here,
- urlhandlers = [ { 'method': 'GET',
- 'path': '/api/resource/([^/]+)/?',
- 'function': self.resource_get } ])
+ urlhandlers=[{'method': 'GET',
+ 'path': '/api/resource/([^/]+)/?',
+ 'function': self.resource_get}])
httpd.start(block=False)
server_port = httpd.httpd.server_port
# We defined a docroot, so we expect a directory listing
f = urllib2.urlopen(self.get_url('/', server_port, None))
try:
self.assertEqual(f.getcode(), 200)
except AttributeError:
pass # python 2.4
self.assertTrue('Directory listing for' in f.read())
# Make sure API methods still work
self.try_get(server_port, '')
self.try_get(server_port, '?foo=bar')
+
class ProxyTest(unittest.TestCase):
def tearDown(self):
# reset proxy opener in case it changed
urllib2.install_opener(None)
def test_proxy(self):
docroot = tempfile.mkdtemp()
self.addCleanup(mozfile.remove, docroot)
hosts = ('mozilla.com', 'mozilla.org')
unproxied_host = 'notmozilla.org'
+
def url(host): return 'http://%s/' % host
index_filename = 'index.html'
+
def index_contents(host): return '%s index' % host
index = file(os.path.join(docroot, index_filename), 'w')
index.write(index_contents('*'))
index.close()
httpd = mozhttpd.MozHttpd(port=0, docroot=docroot)
httpd.start(block=False)
--- a/testing/mozbase/mozhttpd/tests/baseurl.py
+++ b/testing/mozbase/mozhttpd/tests/baseurl.py
@@ -1,18 +1,19 @@
import mozhttpd
import unittest
+
class BaseUrlTest(unittest.TestCase):
def test_base_url(self):
httpd = mozhttpd.MozHttpd(port=0)
self.assertEqual(httpd.get_url(), None)
httpd.start(block=False)
self.assertEqual("http://127.0.0.1:%s/" % httpd.httpd.server_port,
httpd.get_url())
- self.assertEqual("http://127.0.0.1:%s/cheezburgers.html" % \
- httpd.httpd.server_port,
+ self.assertEqual("http://127.0.0.1:%s/cheezburgers.html" %
+ httpd.httpd.server_port,
httpd.get_url(path="/cheezburgers.html"))
httpd.stop()
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozhttpd/tests/filelisting.py
+++ b/testing/mozbase/mozhttpd/tests/filelisting.py
@@ -7,33 +7,35 @@
import mozhttpd
import urllib2
import os
import unittest
import re
here = os.path.dirname(os.path.abspath(__file__))
+
class FileListingTest(unittest.TestCase):
def check_filelisting(self, path=''):
filelist = os.listdir(here)
httpd = mozhttpd.MozHttpd(port=0, docroot=here)
httpd.start(block=False)
f = urllib2.urlopen("http://%s:%s/%s" % ('127.0.0.1', httpd.httpd.server_port, path))
for line in f.readlines():
- webline = re.sub('\<[a-zA-Z0-9\-\_\.\=\"\'\/\\\%\!\@\#\$\^\&\*\(\) ]*\>', '', line.strip('\n')).strip('/').strip().strip('@')
+ webline = re.sub('\<[a-zA-Z0-9\-\_\.\=\"\'\/\\\%\!\@\#\$\^\&\*\(\) ]*\>',
+ '', line.strip('\n')).strip('/').strip().strip('@')
if webline and not webline.startswith("Directory listing for"):
self.assertTrue(webline in filelist,
"File %s in dir listing corresponds to a file" % webline)
filelist.remove(webline)
- self.assertFalse(filelist, "Should have no items in filelist (%s) unaccounted for" % filelist)
-
+ self.assertFalse(
+ filelist, "Should have no items in filelist (%s) unaccounted for" % filelist)
def test_filelist(self):
self.check_filelisting()
def test_filelist_params(self):
self.check_filelisting('?foo=bar&fleem=&foo=fleem')
--- a/testing/mozbase/mozhttpd/tests/paths.py
+++ b/testing/mozbase/mozhttpd/tests/paths.py
@@ -4,17 +4,19 @@
# http://creativecommons.org/publicdomain/zero/1.0/
from mozfile import TemporaryDirectory
import mozhttpd
import os
import unittest
import urllib2
+
class PathTest(unittest.TestCase):
+
def try_get(self, url, expected_contents):
f = urllib2.urlopen(url)
self.assertEqual(f.getcode(), 200)
self.assertEqual(f.read(), expected_contents)
def try_get_expect_404(self, url):
with self.assertRaises(urllib2.HTTPError) as cm:
urllib2.urlopen(url)
@@ -37,17 +39,17 @@ class PathTest(unittest.TestCase):
def test_substring_mappings(self):
"""Test that a path mapping that's a substring of another works."""
with TemporaryDirectory() as d1, TemporaryDirectory() as d2:
open(os.path.join(d1, "test1.txt"), "w").write("test 1 contents")
open(os.path.join(d2, "test2.txt"), "w").write("test 2 contents")
httpd = mozhttpd.MozHttpd(port=0,
path_mappings={'/abcxyz': d1,
- '/abc': d2,}
+ '/abc': d2, }
)
httpd.start(block=False)
self.try_get(httpd.get_url("/abcxyz/test1.txt"), "test 1 contents")
self.try_get(httpd.get_url("/abc/test2.txt"), "test 2 contents")
httpd.stop()
def test_multipart_path_mapping(self):
"""Test that a path mapping with multiple directories works."""
--- a/testing/mozbase/mozhttpd/tests/requestlog.py
+++ b/testing/mozbase/mozhttpd/tests/requestlog.py
@@ -4,16 +4,17 @@
import mozhttpd
import urllib2
import os
import unittest
here = os.path.dirname(os.path.abspath(__file__))
+
class RequestLogTest(unittest.TestCase):
def check_logging(self, log_requests=False):
httpd = mozhttpd.MozHttpd(port=0, docroot=here, log_requests=log_requests)
httpd.start(block=False)
url = "http://%s:%s/" % ('127.0.0.1', httpd.httpd.server_port)
f = urllib2.urlopen(url)
--- a/testing/mozbase/mozinfo/mozinfo/__init__.py
+++ b/testing/mozbase/mozinfo/mozinfo/__init__.py
@@ -1,8 +1,9 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
"""
interface to transform introspected system information to a format palatable to
--- a/testing/mozbase/mozinfo/mozinfo/mozinfo.py
+++ b/testing/mozbase/mozinfo/mozinfo/mozinfo.py
@@ -15,33 +15,38 @@ import platform
import re
import sys
from .string_version import StringVersion
# keep a copy of the os module since updating globals overrides this
_os = os
+
class unknown(object):
"""marker class for unknown information"""
+
def __nonzero__(self):
return False
+
def __str__(self):
return 'UNKNOWN'
-unknown = unknown() # singleton
+unknown = unknown() # singleton
+
def get_windows_version():
import ctypes
+
class OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
('dwMajorVersion', ctypes.c_ulong),
('dwMinorVersion', ctypes.c_ulong),
('dwBuildNumber', ctypes.c_ulong),
('dwPlatformId', ctypes.c_ulong),
- ('szCSDVersion', ctypes.c_wchar*128),
+ ('szCSDVersion', ctypes.c_wchar * 128),
('wServicePackMajor', ctypes.c_ushort),
('wServicePackMinor', ctypes.c_ushort),
('wSuiteMask', ctypes.c_ushort),
('wProductType', ctypes.c_byte),
('wReserved', ctypes.c_byte)]
os_version = OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
@@ -52,17 +57,17 @@ def get_windows_version():
return os_version.dwMajorVersion, os_version.dwMinorVersion, os_version.dwBuildNumber
# get system information
info = {'os': unknown,
'processor': unknown,
'version': unknown,
'os_version': unknown,
'bits': unknown,
- 'has_sandbox': unknown }
+ 'has_sandbox': unknown}
(system, node, release, version, machine, processor) = platform.uname()
(bits, linkage) = platform.architecture()
# get os information and related data
if system in ["Microsoft", "Windows"]:
info['os'] = 'win'
# There is a Python bug on Windows to determine platform values
# http://bugs.python.org/issue7860
@@ -133,17 +138,17 @@ if processor in ["i386", "i686"]:
elif processor.upper() == "AMD64":
bits = "64bit"
processor = "x86_64"
elif processor == "Power Macintosh":
processor = "ppc"
bits = re.search('(\d+)bit', bits).group(1)
info.update({'processor': processor,
'bits': int(bits),
- })
+ })
if info['os'] == 'linux':
import ctypes
import errno
PR_SET_SECCOMP = 22
SECCOMP_MODE_FILTER = 2
ctypes.CDLL("libc.so.6", use_errno=True).prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0)
info['has_sandbox'] = ctypes.get_errno() == errno.EFAULT
@@ -156,24 +161,26 @@ choices = {'os': ['linux', 'bsd', 'win',
'processor': ['x86', 'x86_64', 'ppc']}
def sanitize(info):
"""Do some sanitization of input values, primarily
to handle universal Mac builds."""
if "processor" in info and info["processor"] == "universal-x86-x86_64":
# If we're running on OS X 10.6 or newer, assume 64-bit
- if release[:4] >= "10.6": # Note this is a string comparison
+ if release[:4] >= "10.6": # Note this is a string comparison
info["processor"] = "x86_64"
info["bits"] = 64
else:
info["processor"] = "x86"
info["bits"] = 32
# method for updating information
+
+
def update(new_info):
"""
Update the info.
:param new_info: Either a dict containing the new info or a path/url
to a json file containing the new info.
"""
@@ -188,19 +195,20 @@ def update(new_info):
info.update(new_info)
sanitize(info)
globals().update(info)
# convenience data for os access
for os_name in choices['os']:
globals()['is' + os_name.title()] = info['os'] == os_name
# unix is special
- if isLinux or isBsd:
+ if isLinux or isBsd: # noqa
globals()['isUnix'] = True
+
def find_and_update_from_json(*dirs):
"""
Find a mozinfo.json file, load it, and update the info with the
contents.
:param dirs: Directories in which to look for the file. They will be
searched after first looking in the root of the objdir
if the current script is being run from a Mozilla objdir.
@@ -224,36 +232,38 @@ def find_and_update_from_json(*dirs):
d = _os.path.abspath(d)
json_path = _os.path.join(d, "mozinfo.json")
if _os.path.isfile(json_path):
update(json_path)
return json_path
return None
+
def output_to_file(path):
import json
with open(path, 'w') as f:
- f.write(json.dumps(info));
+ f.write(json.dumps(info))
update({})
# exports
__all__ = info.keys()
__all__ += ['is' + os_name.title() for os_name in choices['os']]
__all__ += [
'info',
'unknown',
'main',
'choices',
'update',
'find_and_update_from_json',
'output_to_file',
'StringVersion',
- ]
+]
+
def main(args=None):
# parse the command line
from optparse import OptionParser
parser = OptionParser(description=__doc__)
for key in choices:
parser.add_option('--%s' % key, dest=key,
@@ -274,16 +284,17 @@ def main(args=None):
# print out choices if requested
flag = False
for key, value in options.__dict__.items():
if value is True:
print '%s choices: %s' % (key, ' '.join([str(choice)
for choice in choices[key]]))
flag = True
- if flag: return
+ if flag:
+ return
# otherwise, print out all info
for key, value in info.items():
print '%s: %s' % (key, value)
if __name__ == '__main__':
main()
--- a/testing/mozbase/mozinfo/mozinfo/string_version.py
+++ b/testing/mozbase/mozinfo/mozinfo/string_version.py
@@ -4,16 +4,17 @@
from distutils.version import LooseVersion
class StringVersion(str):
"""
A string version that can be compared with comparison operators.
"""
+
def __init__(self, vstring):
str.__init__(self, vstring)
self.version = LooseVersion(vstring)
def __repr__(self):
return "StringVersion ('%s')" % self
def __to_version(self, other):
--- a/testing/mozbase/mozinfo/setup.py
+++ b/testing/mozbase/mozinfo/setup.py
@@ -8,17 +8,17 @@ PACKAGE_VERSION = '0.9'
# dependencies
deps = ['mozfile >= 0.12']
setup(name='mozinfo',
version=PACKAGE_VERSION,
description="Library to get system information for use in Mozilla testing",
long_description="see http://mozbase.readthedocs.org",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozinfo'],
include_package_data=True,
zip_safe=False,
--- a/testing/mozbase/mozinfo/tests/test.py
+++ b/testing/mozbase/mozinfo/tests/test.py
@@ -8,17 +8,19 @@ import json
import mock
import os
import shutil
import sys
import tempfile
import unittest
import mozinfo
+
class TestMozinfo(unittest.TestCase):
+
def setUp(self):
reload(mozinfo)
self.tempdir = os.path.abspath(tempfile.mkdtemp())
# When running from an objdir mozinfo will use a build generated json file
# instead of the ones created for testing. Prevent that from happening.
# See bug 896038 for details.
sys.modules['mozbuild'] = None
@@ -43,20 +45,20 @@ class TestMozinfo(unittest.TestCase):
j = os.path.join(self.tempdir, "mozinfo.json")
with open(j, "w") as f:
f.write(json.dumps({"foo": "xyz"}))
mozinfo.update(j)
self.assertEqual(mozinfo.info["foo"], "xyz")
def test_update_file_invalid_json(self):
"""Test that mozinfo.update handles invalid JSON correctly"""
- j = os.path.join(self.tempdir,'test.json')
+ j = os.path.join(self.tempdir, 'test.json')
with open(j, 'w') as f:
f.write('invalid{"json":')
- self.assertRaises(ValueError,mozinfo.update,[j])
+ self.assertRaises(ValueError, mozinfo.update, [j])
def test_find_and_update_file(self):
"""Test that mozinfo.find_and_update_from_json can
find mozinfo.json in a directory passed to it."""
j = os.path.join(self.tempdir, "mozinfo.json")
with open(j, "w") as f:
f.write(json.dumps({"foo": "abcdefg"}))
self.assertEqual(mozinfo.find_and_update_from_json(self.tempdir), j)
@@ -65,17 +67,16 @@ class TestMozinfo(unittest.TestCase):
def test_find_and_update_file_invalid_json(self):
"""Test that mozinfo.find_and_update_from_json can
handle invalid JSON"""
j = os.path.join(self.tempdir, "mozinfo.json")
with open(j, 'w') as f:
f.write('invalid{"json":')
self.assertRaises(ValueError, mozinfo.find_and_update_from_json, self.tempdir)
-
def test_find_and_update_file_mozbuild(self):
"""Test that mozinfo.find_and_update_from_json can
find mozinfo.json using the mozbuild module."""
j = os.path.join(self.tempdir, "mozinfo.json")
with open(j, "w") as f:
f.write(json.dumps({"foo": "123456"}))
m = mock.MagicMock()
# Mock the value of MozbuildObject.from_environment().topobjdir.
@@ -87,16 +88,17 @@ class TestMozinfo(unittest.TestCase):
def test_output_to_file(self):
"""Test that mozinfo.output_to_file works."""
path = os.path.join(self.tempdir, "mozinfo.json")
mozinfo.output_to_file(path)
self.assertEqual(open(path).read(), json.dumps(mozinfo.info))
class TestStringVersion(unittest.TestCase):
+
def test_os_version_is_a_StringVersion(self):
self.assertIsInstance(mozinfo.os_version, mozinfo.StringVersion)
def test_compare_to_string(self):
version = mozinfo.StringVersion('10.10')
self.assertGreater(version, '10.2')
self.assertGreater('11', version)
--- a/testing/mozbase/mozinstall/mozinstall/__init__.py
+++ b/testing/mozbase/mozinstall/mozinstall/__init__.py
@@ -1,5 +1,6 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from mozinstall import *
--- a/testing/mozbase/mozinstall/mozinstall/mozinstall.py
+++ b/testing/mozbase/mozinstall/mozinstall/mozinstall.py
@@ -335,9 +335,8 @@ def uninstall_cli(argv=sys.argv[1:]):
parser = OptionParser(usage="usage: %prog install_path")
(options, args) = parser.parse_args(argv)
if not len(args) == 1:
parser.error('An installation path has to be specified.')
# Run it
uninstall(argv[0])
-
--- a/testing/mozbase/mozinstall/setup.py
+++ b/testing/mozbase/mozinstall/setup.py
@@ -10,41 +10,41 @@ try:
description = file(os.path.join(here, 'README.md')).read()
except IOError:
description = None
PACKAGE_VERSION = '1.12'
deps = ['mozinfo >= 0.7',
'mozfile >= 1.0',
- ]
+ ]
setup(name='mozInstall',
version=PACKAGE_VERSION,
description="package for installing and uninstalling Mozilla applications",
long_description="see http://mozbase.readthedocs.org/",
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
- ],
+ ],
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL 2.0',
packages=['mozinstall'],
include_package_data=True,
zip_safe=False,
install_requires=deps,
- tests_require=['mozprocess >= 0.15',],
+ tests_require=['mozprocess >= 0.15', ],
# we have to generate two more executables for those systems that cannot run as Administrator
# and the filename containing "install" triggers the UAC
entry_points="""
# -*- Entry points: -*-
[console_scripts]
mozinstall = mozinstall:install_cli
mozuninstall = mozinstall:uninstall_cli
moz_add_to_system = mozinstall:install_cli
--- a/testing/mozbase/mozinstall/tests/test.py
+++ b/testing/mozbase/mozinstall/tests/test.py
@@ -9,16 +9,17 @@ import mozinstall
import mozfile
import os
import tempfile
import unittest
# Store file location at load time
here = os.path.dirname(os.path.abspath(__file__))
+
class TestMozInstall(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Setting up stub installers """
cls.dmg = os.path.join(here, 'Installer-Stubs', 'firefox.dmg')
# XXX: We have removed firefox.exe since it is not valid for mozinstall 1.12 and higher
# Bug 1157352 - We should grab a firefox.exe from the build process or download it
@@ -27,69 +28,71 @@ class TestMozInstall(unittest.TestCase):
cls.bz2 = os.path.join(here, 'Installer-Stubs', 'firefox.tar.bz2')
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
mozfile.rmtree(self.tempdir)
- @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe for mozinstall 1.12 and higher.")
+ @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe "
+ "for mozinstall 1.12 and higher.")
def test_get_binary(self):
""" Test mozinstall's get_binary method """
if mozinfo.isLinux:
installdir = mozinstall.install(self.bz2, self.tempdir)
binary = os.path.join(installdir, 'firefox')
self.assertEqual(binary, mozinstall.get_binary(installdir, 'firefox'))
elif mozinfo.isWin:
installdir_exe = mozinstall.install(self.exe,
os.path.join(self.tempdir, 'exe'))
binary_exe = os.path.join(installdir_exe, 'core', 'firefox.exe')
self.assertEqual(binary_exe, mozinstall.get_binary(installdir_exe,
- 'firefox'))
+ 'firefox'))
installdir_zip = mozinstall.install(self.zipfile,
os.path.join(self.tempdir, 'zip'))
binary_zip = os.path.join(installdir_zip, 'firefox.exe')
self.assertEqual(binary_zip, mozinstall.get_binary(installdir_zip,
- 'firefox'))
+ 'firefox'))
elif mozinfo.isMac:
installdir = mozinstall.install(self.dmg, self.tempdir)
binary = os.path.join(installdir, 'Contents', 'MacOS', 'firefox')
self.assertEqual(binary, mozinstall.get_binary(installdir, 'firefox'))
def test_get_binary_error(self):
""" Test an InvalidBinary error is raised """
tempdir_empty = tempfile.mkdtemp()
self.assertRaises(mozinstall.InvalidBinary, mozinstall.get_binary,
tempdir_empty, 'firefox')
mozfile.rmtree(tempdir_empty)
- @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe for mozinstall 1.12 and higher.")
+ @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe "
+ "for mozinstall 1.12 and higher.")
def test_is_installer(self):
""" Test we can identify a correct installer """
if mozinfo.isLinux:
self.assertTrue(mozinstall.is_installer(self.bz2))
if mozinfo.isWin:
# test zip installer
self.assertTrue(mozinstall.is_installer(self.zipfile))
# test exe installer
self.assertTrue(mozinstall.is_installer(self.exe))
try:
# test stub browser file
# without pefile on the system this test will fail
- import pefile
+ import pefile # noqa
stub_exe = os.path.join(here, 'build_stub', 'firefox.exe')
self.assertFalse(mozinstall.is_installer(stub_exe))
except ImportError:
pass
if mozinfo.isMac:
self.assertTrue(mozinstall.is_installer(self.dmg))
@@ -103,17 +106,18 @@ class TestMozInstall(unittest.TestCase):
elif mozinfo.isWin:
self.assertRaises(mozinstall.InvalidSource, mozinstall.install,
self.bz2, 'firefox')
elif mozinfo.isMac:
self.assertRaises(mozinstall.InvalidSource, mozinstall.install,
self.bz2, 'firefox')
- @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe for mozinstall 1.12 and higher.")
+ @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe "
+ "for mozinstall 1.12 and higher.")
def test_install(self):
""" Test mozinstall's install capability """
if mozinfo.isLinux:
installdir = mozinstall.install(self.bz2, self.tempdir)
self.assertEqual(os.path.join(self.tempdir, 'firefox'), installdir)
elif mozinfo.isWin:
@@ -127,17 +131,18 @@ class TestMozInstall(unittest.TestCase):
self.assertEqual(os.path.join(self.tempdir, 'zip', 'firefox'),
installdir_zip)
elif mozinfo.isMac:
installdir = mozinstall.install(self.dmg, self.tempdir)
self.assertEqual(os.path.join(os.path.realpath(self.tempdir),
'FirefoxStub.app'), installdir)
- @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe for mozinstall 1.12 and higher.")
+ @unittest.skipIf(mozinfo.isWin, "Bug 1157352 - We need a new firefox.exe "
+ "for mozinstall 1.12 and higher.")
def test_uninstall(self):
""" Test mozinstall's uninstall capabilites """
# Uninstall after installing
if mozinfo.isLinux:
installdir = mozinstall.install(self.bz2, self.tempdir)
mozinstall.uninstall(installdir)
self.assertFalse(os.path.exists(installdir))
--- a/testing/mozbase/mozleak/mozleak/__init__.py
+++ b/testing/mozbase/mozleak/mozleak/__init__.py
@@ -2,8 +2,10 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
mozleak is a library for extracting memory leaks from leak logs files.
"""
from .leaklog import process_leak_log
+
+__all__ = ['process_leak_log']
--- a/testing/mozbase/mozleak/mozleak/leaklog.py
+++ b/testing/mozbase/mozleak/mozleak/leaklog.py
@@ -1,19 +1,15 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
-import sys
-
-import mozinfo
-import mozrunner.utils
def _get_default_logger():
from mozlog import get_default_logger
log = get_default_logger(component='mozleak')
if not log:
import logging
@@ -66,18 +62,19 @@ def process_single_leak_file(leakLogFile
if numLeaked != 0 or name == "TOTAL":
log.info(line.rstrip())
# Analyse the leak log, but output later or it will interrupt the
# leak table
if name == "TOTAL":
# Multiple default processes can end up writing their bloat views into a single
# log, particularly on B2G. Eventually, these should be split into multiple
# logs (bug 1068869), but for now, we report the largest leak.
- if totalBytesLeaked != None:
- leakAnalysis.append("WARNING | leakcheck | %s multiple BloatView byte totals found"
+ if totalBytesLeaked is not None:
+ leakAnalysis.append("WARNING | leakcheck | %s "
+ "multiple BloatView byte totals found"
% processString)
else:
totalBytesLeaked = 0
if bytesLeaked > totalBytesLeaked:
totalBytesLeaked = bytesLeaked
# Throw out the information we had about the previous bloat
# view.
leakedObjectNames = []
@@ -189,17 +186,17 @@ def process_leak_log(leak_log_file, leak
# are not going to produce leak logs we will ever see.
knownProcessTypes = ["default", "plugin", "tab", "geckomediaplugin"]
for processType in knownProcessTypes:
log.info("TEST-INFO | leakcheck | %s process: leak threshold set at %d bytes"
% (processType, leakThresholds.get(processType, 0)))
for processType in leakThresholds:
- if not processType in knownProcessTypes:
+ if processType not in knownProcessTypes:
log.info("TEST-UNEXPECTED-FAIL | leakcheck | Unknown process type %s in leakThresholds"
% processType)
(leakLogFileDir, leakFileBase) = os.path.split(leakLogFile)
if leakFileBase[-4:] == ".log":
leakFileBase = leakFileBase[:-4]
fileNameRegExp = re.compile(r"_([a-z]*)_pid\d*.log$")
else:
@@ -208,15 +205,15 @@ def process_leak_log(leak_log_file, leak
for fileName in os.listdir(leakLogFileDir):
if fileName.find(leakFileBase) != -1:
thisFile = os.path.join(leakLogFileDir, fileName)
m = fileNameRegExp.search(fileName)
if m:
processType = m.group(1)
else:
processType = "default"
- if not processType in knownProcessTypes:
+ if processType not in knownProcessTypes:
log.info("TEST-UNEXPECTED-FAIL | leakcheck | Leak log with unknown process type %s"
% processType)
leakThreshold = leakThresholds.get(processType, 0)
process_single_leak_file(thisFile, processType, leakThreshold,
processType in ignoreMissingLeaks,
log=log, stackFixer=stack_fixer)
--- a/testing/mozbase/mozleak/setup.py
+++ b/testing/mozbase/mozleak/setup.py
@@ -9,17 +9,17 @@ PACKAGE_NAME = 'mozleak'
PACKAGE_VERSION = '0.1'
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Library for extracting memory leaks from leak logs files",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozleak'],
zip_safe=False,
install_requires=[],
--- a/testing/mozbase/mozlog/mozlog/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/__init__.py
@@ -19,8 +19,12 @@ from . import commandline
from . import structuredlog
from . import unstructured
from .structuredlog import get_default_logger, set_default_logger
from .proxy import get_proxy_logger
# Backwards compatibility shim for consumers that use mozlog.structured
structured = sys.modules[__name__]
sys.modules['{}.structured'.format(__name__)] = structured
+
+__all__ = ['commandline', 'structuredlog', 'unstructured',
+ 'get_default_logger', 'set_default_logger', 'get_proxy_logger',
+ 'structured']
--- a/testing/mozbase/mozlog/mozlog/commandline.py
+++ b/testing/mozbase/mozlog/mozlog/commandline.py
@@ -20,37 +20,43 @@ log_formatters = {
'mach': (formatters.MachFormatter, "Human-readable output"),
'tbpl': (formatters.TbplFormatter, "TBPL style log format"),
'errorsummary': (formatters.ErrorSummaryFormatter, argparse.SUPPRESS),
}
TEXT_FORMATTERS = ('raw', 'mach')
"""a subset of formatters for non test harnesses related applications"""
+
def level_filter_wrapper(formatter, level):
return handlers.LogLevelFilter(formatter, level)
+
def verbose_wrapper(formatter, verbose):
formatter.verbose = verbose
return formatter
+
def compact_wrapper(formatter, compact):
formatter.compact = compact
return formatter
+
def buffer_handler_wrapper(handler, buffer_limit):
if buffer_limit == "UNLIMITED":
buffer_limit = None
else:
buffer_limit = int(buffer_limit)
return handlers.BufferHandler(handler, buffer_limit)
+
def valgrind_handler_wrapper(handler):
return handlers.ValgrindHandler(handler)
+
def default_formatter_options(log_type, overrides):
formatter_option_defaults = {
"raw": {
"level": "debug"
}
}
rv = {"verbose": False,
"level": "info"}
@@ -66,17 +72,18 @@ fmt_options = {
# "action" is used by the commandline parser in use.
'verbose': (verbose_wrapper,
"Enables verbose mode for the given formatter.",
["mach"], "store_true"),
'compact': (compact_wrapper,
"Enables compact mode for the given formatter.",
["tbpl"], "store_true"),
'level': (level_filter_wrapper,
- "A least log level to subscribe to for the given formatter (debug, info, error, etc.)",
+ "A least log level to subscribe to for the given formatter "
+ "(debug, info, error, etc.)",
["mach", "raw", "tbpl"], "store"),
'buffer': (buffer_handler_wrapper,
"If specified, enables message buffering at the given buffer size limit.",
["mach", "tbpl"], "store"),
}
def log_file(name):
@@ -129,18 +136,18 @@ def add_logging_group(parser, include_fo
opt_log_type = log_file
group_add = group.add_argument
for name, (cls, help_str) in log_formatters.iteritems():
if name in include_formatters:
group_add("--log-" + name, action="append", type=opt_log_type,
help=help_str)
- for optname, (cls, help_str, formatters, action) in fmt_options.iteritems():
- for fmt in formatters:
+ for optname, (cls, help_str, formatters_, action) in fmt_options.iteritems():
+ for fmt in formatters_:
# make sure fmt is in log_formatters and is accepted
if fmt in log_formatters and fmt in include_formatters:
group_add("--log-%s-%s" % (fmt, optname), action=action,
help=help_str, default=None)
def setup_handlers(logger, formatters, formatter_options, allow_unused_options=False):
"""
@@ -177,17 +184,18 @@ def setup_handlers(logger, formatters, f
for value in streams:
handler = handlers.StreamHandler(stream=value, formatter=formatter)
for wrapper, wrapper_args in handler_wrappers_and_options:
handler = wrapper(handler, *wrapper_args)
logger.add_handler(handler)
-def setup_logging(logger, args, defaults=None, formatter_defaults=None, allow_unused_options=False):
+def setup_logging(logger, args, defaults=None, formatter_defaults=None,
+ allow_unused_options=False):
"""
Configure a structuredlogger based on command line arguments.
The created structuredlogger will also be set as the default logger, and
can be retrieved with :py:func:`~mozlog.get_default_logger`.
:param logger: A StructuredLogger instance or string name. If a string, a
new StructuredLogger instance will be created using
@@ -245,17 +253,17 @@ def setup_logging(logger, args, defaults
formatters[formatter].append(value)
if len(parts) == 3:
_, formatter, opt = parts
if formatter not in formatter_options:
formatter_options[formatter] = default_formatter_options(formatter,
formatter_defaults)
formatter_options[formatter][opt] = values
- #If there is no user-specified logging, go with the default options
+ # If there is no user-specified logging, go with the default options
if not found:
for name, value in defaults.iteritems():
formatters[name].append(value)
elif not found_stdout_logger and sys.stdout in defaults.values():
for name, value in defaults.iteritems():
if value == sys.stdout:
formatters[name].append(value)
--- a/testing/mozbase/mozlog/mozlog/formatters/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/__init__.py
@@ -12,8 +12,12 @@ from errorsummary import ErrorSummaryFor
try:
import ujson as json
except ImportError:
import json
def JSONFormatter():
return lambda x: json.dumps(x) + "\n"
+
+__all__ = ['UnittestFormatter', 'XUnitFormatter', 'HTMLFormatter',
+ 'MachFormatter', 'TbplFormatter', 'ErrorSummaryFormatter',
+ 'JSONFormatter']
--- a/testing/mozbase/mozlog/mozlog/formatters/base.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/base.py
@@ -1,14 +1,15 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ..reader import LogHandler
+
class BaseFormatter(LogHandler):
"""Base class for implementing non-trivial formatters.
Subclasses are expected to provide a method for each action type they
wish to handle, each taking a single argument for the test data.
For example a trivial subclass that just produces the id of each test as
it starts might be::
--- a/testing/mozbase/mozlog/mozlog/formatters/errorsummary.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/errorsummary.py
@@ -1,17 +1,19 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from base import BaseFormatter
+
class ErrorSummaryFormatter(BaseFormatter):
+
def __init__(self):
self.line_count = 0
def __call__(self, data):
rv = BaseFormatter.__call__(self, data)
self.line_count += 1
return rv
--- a/testing/mozbase/mozlog/mozlog/formatters/html/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/__init__.py
@@ -1,1 +1,3 @@
from html import HTMLFormatter
+
+__all__ = ['HTMLFormatter']
--- a/testing/mozbase/mozlog/mozlog/formatters/html/html.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/html.py
@@ -12,25 +12,27 @@ from .. import base
from collections import defaultdict
html = None
raw = None
base_path = os.path.split(__file__)[0]
+
def do_defered_imports():
global html
global raw
from .xmlgen import html, raw
class HTMLFormatter(base.BaseFormatter):
"""Formatter that produces a simple HTML-formatted report."""
+
def __init__(self):
do_defered_imports()
self.suite_name = None
self.result_rows = []
self.test_count = defaultdict(int)
self.start_times = {}
self.suite_times = {"start": None,
"end": None}
@@ -47,39 +49,44 @@ class HTMLFormatter(base.BaseFormatter):
html.style(raw(f.read())))
date_format = "%d %b %Y %H:%M:%S"
version_info = data.get("version_info")
if version_info:
self.env["Device identifier"] = version_info.get("device_id")
self.env["Device firmware (base)"] = version_info.get("device_firmware_version_base")
self.env["Device firmware (date)"] = (
- datetime.utcfromtimestamp(int(version_info.get("device_firmware_date"))).strftime(date_format) if
+ datetime.utcfromtimestamp(int(version_info.get("device_firmware_date")))
+ .strftime(date_format) if
"device_firmware_date" in version_info else None)
- self.env["Device firmware (incremental)"] = version_info.get("device_firmware_version_incremental")
- self.env["Device firmware (release)"] = version_info.get("device_firmware_version_release")
+ self.env["Device firmware (incremental)"] = version_info.get(
+ "device_firmware_version_incremental")
+ self.env["Device firmware (release)"] = version_info.get(
+ "device_firmware_version_release")
self.env["Gaia date"] = (
- datetime.utcfromtimestamp(int(version_info.get("gaia_date"))).strftime(date_format) if
+ datetime.utcfromtimestamp(int(version_info.get("gaia_date")))
+ .strftime(date_format) if
"gaia_date" in version_info else None)
self.env["Gecko version"] = version_info.get("application_version")
self.env["Gecko build"] = version_info.get("application_buildid")
if version_info.get("application_changeset"):
self.env["Gecko revision"] = version_info.get("application_changeset")
if version_info.get("application_repository"):
self.env["Gecko revision"] = html.a(
version_info.get("application_changeset"),
href="/".join([version_info.get("application_repository"),
version_info.get("application_changeset")]),
target="_blank")
if version_info.get("gaia_changeset"):
self.env["Gaia revision"] = html.a(
version_info.get("gaia_changeset")[:12],
- href="https://github.com/mozilla-b2g/gaia/commit/%s" % version_info.get("gaia_changeset"),
+ href="https://github.com/mozilla-b2g/gaia/commit/%s" % version_info.get(
+ "gaia_changeset"),
target="_blank")
device_info = data.get("device_info")
if device_info:
self.env["Device uptime"] = device_info.get("uptime")
self.env["Device memory"] = device_info.get("memtotal")
self.env["Device serial"] = device_info.get("id")
@@ -97,18 +104,18 @@ class HTMLFormatter(base.BaseFormatter):
tc_time = (data["time"] - self.start_times.pop(data["test"])) / 1000.
additional_html = []
debug = data.get("extra", {})
# Add support for log exported from wptrunner. The structure of
# reftest_screenshots is listed in wptrunner/executors/base.py.
if debug.get('reftest_screenshots'):
log_data = debug.get("reftest_screenshots", {})
debug = {
- 'image1':'data:image/png;base64,' + log_data[0].get("screenshot", {}),
- 'image2':'data:image/png;base64,' + log_data[2].get("screenshot", {}),
+ 'image1': 'data:image/png;base64,' + log_data[0].get("screenshot", {}),
+ 'image2': 'data:image/png;base64,' + log_data[2].get("screenshot", {}),
'differences': "Not Implemented",
}
links_html = []
status = status_name = data["status"]
expected = data.get("expected", status)
@@ -117,18 +124,18 @@ class HTMLFormatter(base.BaseFormatter):
elif status not in ("PASS", "SKIP"):
status_name = "EXPECTED_" + status
self.test_count[status_name] += 1
if status in ['SKIP', 'FAIL', 'ERROR']:
if debug.get('differences'):
images = [
- ('image1','Image 1 (test)'),
- ('image2','Image 2 (reference)')
+ ('image1', 'Image 1 (test)'),
+ ('image2', 'Image 2 (reference)')
]
for title, description in images:
screenshot = '%s' % debug[title]
additional_html.append(html.div(
html.a(html.img(src=screenshot), href="#"),
html.br(),
html.a(description),
class_='screenshot'))
@@ -191,35 +198,39 @@ class HTMLFormatter(base.BaseFormatter):
self.head,
html.body(
html.script(raw(main_f.read())),
html.p('Report generated on %s at %s' % (
generated.strftime('%d-%b-%Y'),
generated.strftime('%H:%M:%S'))),
html.h2('Environment'),
html.table(
- [html.tr(html.td(k), html.td(v)) for k, v in sorted(self.env.items()) if v],
+ [html.tr(html.td(k), html.td(v))
+ for k, v in sorted(self.env.items()) if v],
id='environment'),
html.h2('Summary'),
html.p('%i tests ran in %.1f seconds.' % (sum(self.test_count.itervalues()),
(self.suite_times["end"] -
self.suite_times["start"]) / 1000.),
html.br(),
html.span('%i passed' % self.test_count["PASS"], class_='pass'), ', ',
html.span('%i skipped' % self.test_count["SKIP"], class_='skip'), ', ',
- html.span('%i failed' % self.test_count["UNEXPECTED_FAIL"], class_='fail'), ', ',
- html.span('%i errors' % self.test_count["UNEXPECTED_ERROR"], class_='error'), '.',
+ html.span('%i failed' % self.test_count[
+ "UNEXPECTED_FAIL"], class_='fail'), ', ',
+ html.span('%i errors' % self.test_count[
+ "UNEXPECTED_ERROR"], class_='error'), '.',
html.br(),
html.span('%i expected failures' % self.test_count["EXPECTED_FAIL"],
class_='expected_fail'), ', ',
html.span('%i unexpected passes' % self.test_count["UNEXPECTED_PASS"],
class_='unexpected_pass'), '.'),
html.h2('Results'),
html.table([html.thead(
html.tr([
html.th('Result', class_='sortable', col='result'),
html.th('Test', class_='sortable', col='name'),
html.th('Duration', class_='sortable numeric', col='duration'),
html.th('Links')]), id='results-table-head'),
- html.tbody(self.result_rows, id='results-table-body')], id='results-table')))
+ html.tbody(self.result_rows,
+ id='results-table-body')], id='results-table')))
return u"<!DOCTYPE html>\n" + doc.unicode(indent=2)
--- a/testing/mozbase/mozlog/mozlog/formatters/html/xmlgen.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/html/xmlgen.py
@@ -16,49 +16,55 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE F
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This file is originally from: https://bitbucket.org/hpk42/py, specifically:
https://bitbucket.org/hpk42/py/src/980c8d526463958ee7cae678a7e4e9b054f36b94/py/_xmlgen.py?at=default
by holger krekel, holger at merlinux eu. 2009
"""
-import sys, re
+import sys
+import re
-if sys.version_info >= (3,0):
+if sys.version_info >= (3, 0):
def u(s):
return s
+
def unicode(x):
if hasattr(x, '__unicode__'):
return x.__unicode__()
return str(x)
else:
def u(s):
return unicode(s)
unicode = unicode
class NamespaceMetaclass(type):
+
def __getattr__(self, name):
if name[:1] == '_':
raise AttributeError(name)
if self == Namespace:
raise ValueError("Namespace class is abstract")
tagspec = self.__tagspec__
if tagspec is not None and name not in tagspec:
raise AttributeError(name)
classattr = {}
if self.__stickyname__:
classattr['xmlname'] = name
cls = type(name, (self.__tagclass__,), classattr)
setattr(self, name, cls)
return cls
+
class Tag(list):
+
class Attr(object):
+
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __init__(self, *args, **kwargs):
super(Tag, self).__init__(args)
self.attr = self.Attr(**kwargs)
def __unicode__(self):
@@ -75,58 +81,66 @@ class Tag(list):
return "<%r tag object %d>" % (name, id(self))
Namespace = NamespaceMetaclass('Namespace', (object, ), {
'__tagspec__': None,
'__tagclass__': Tag,
'__stickyname__': False,
})
+
class HtmlTag(Tag):
+
def unicode(self, indent=2):
l = []
HtmlVisitor(l.append, indent, shortempty=False).visit(self)
return u("").join(l)
# exported plain html namespace
+
+
class html(Namespace):
__tagclass__ = HtmlTag
__stickyname__ = True
- __tagspec__ = dict([(x,1) for x in (
+ __tagspec__ = dict([(x, 1) for x in (
'a,abbr,acronym,address,applet,area,b,bdo,big,blink,'
'blockquote,body,br,button,caption,center,cite,code,col,'
'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,'
'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,'
'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,'
'map,marquee,menu,meta,multicol,nobr,noembed,noframes,'
'noscript,object,ol,optgroup,option,p,pre,q,s,script,'
'select,small,span,strike,strong,style,sub,sup,table,'
'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,'
'base,basefont,frame,hr,isindex,param,samp,var'
).split(',') if x])
class Style(object):
+
def __init__(self, **kw):
for x, y in kw.items():
x = x.replace('_', '-')
setattr(self, x, y)
class raw(object):
"""just a box that can contain a unicode string that will be
included directly in the output"""
+
def __init__(self, uniobj):
self.uniobj = uniobj
+
class SimpleUnicodeVisitor(object):
""" recursive visitor to write unicode. """
+
def __init__(self, write, indent=0, curindent=0, shortempty=True):
self.write = write
self.cache = {}
- self.visited = {} # for detection of recursion
+ self.visited = {} # for detection of recursion
self.indent = indent
self.curindent = curindent
self.parents = []
self.shortempty = shortempty # short empty tags or not
def visit(self, node):
""" dispatcher on node's class/bases name. """
cls = node.__class__
@@ -140,17 +154,17 @@ class SimpleUnicodeVisitor(object):
else:
visitmethod = self.__object
self.cache[cls] = visitmethod
visitmethod(node)
# the default fallback handler is marked private
# to avoid clashes with the tag name object
def __object(self, obj):
- #self.write(obj)
+ # self.write(obj)
self.write(escape(unicode(obj)))
def raw(self, obj):
self.write(obj.uniobj)
def list(self, obj):
assert id(obj) not in self.visited
self.visited[id(obj)] = 1
@@ -172,17 +186,17 @@ class SimpleUnicodeVisitor(object):
self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
self.parents.append(tag)
for x in tag:
self.visit(x)
self.parents.pop()
self.write(u('</%s>') % tagname)
self.curindent -= self.indent
else:
- nameattr = tagname+self.attributes(tag)
+ nameattr = tagname + self.attributes(tag)
if self._issingleton(tagname):
self.write(u('<%s/>') % (nameattr,))
else:
self.write(u('<%s></%s>') % (nameattr, tagname))
def attributes(self, tag):
# serialize attributes
attrlist = dir(tag.attr)
@@ -208,57 +222,59 @@ class SimpleUnicodeVisitor(object):
def getstyle(self, tag):
""" return attribute list suitable for styling. """
try:
styledict = tag.style.__dict__
except AttributeError:
return []
else:
- stylelist = [x+': ' + y for x,y in styledict.items()]
+ stylelist = [x + ': ' + y for x, y in styledict.items()]
return [u(' style="%s"') % u('; ').join(stylelist)]
def _issingleton(self, tagname):
"""can (and will) be overridden in subclasses"""
return self.shortempty
def _isinline(self, tagname):
"""can (and will) be overridden in subclasses"""
return False
+
class HtmlVisitor(SimpleUnicodeVisitor):
single = dict([(x, 1) for x in
- ('br,img,area,param,col,hr,meta,link,base,'
+ ('br,img,area,param,col,hr,meta,link,base,'
'input,frame').split(',')])
inline = dict([(x, 1) for x in
- ('a abbr acronym b basefont bdo big br cite code dfn em font '
- 'i img input kbd label q s samp select small span strike '
- 'strong sub sup textarea tt u var'.split(' '))])
+ ('a abbr acronym b basefont bdo big br cite code dfn em font '
+ 'i img input kbd label q s samp select small span strike '
+ 'strong sub sup textarea tt u var'.split(' '))])
def repr_attribute(self, attrs, name):
if name == 'class_':
value = getattr(attrs, name)
if value is None:
return
return super(HtmlVisitor, self).repr_attribute(attrs, name)
def _issingleton(self, tagname):
return tagname in self.single
def _isinline(self, tagname):
return tagname in self.inline
class _escape:
+
def __init__(self):
self.escape = {
- u('"') : u('"'), u('<') : u('<'), u('>') : u('>'),
- u('&') : u('&'), u("'") : u('''),
- }
+ u('"'): u('"'), u('<'): u('<'), u('>'): u('>'),
+ u('&'): u('&'), u("'"): u('''),
+ }
self.charef_rex = re.compile(u("|").join(self.escape.keys()))
def _replacer(self, match):
return self.escape[match.group(0)]
def __call__(self, ustring):
""" xml-escape the given unicode string. """
ustring = unicode(ustring)
--- a/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/machformatter.py
@@ -8,29 +8,34 @@ from collections import defaultdict
try:
import blessings
except ImportError:
blessings = None
import base
from .process import strstatus
+
def format_seconds(total):
"""Format number of seconds to MM:SS.DD form."""
minutes, seconds = divmod(total, 60)
return '%2d:%05.2f' % (minutes, seconds)
+
class NullTerminal(object):
+
def __getattr__(self, name):
return self._id
def _id(self, value):
return value
+
class MachFormatter(base.BaseFormatter):
+
def __init__(self, start_time=None, write_interval=False, write_times=True,
terminal=None, disable_colors=False):
if disable_colors:
terminal = None
elif terminal is None and blessings is not None:
terminal = blessings.Terminal()
@@ -128,17 +133,17 @@ class MachFormatter(base.BaseFormatter):
else:
rv.append("Ran %i tests" % self.summary_values["tests"])
rv.append("Expected results: %i" % self.summary_values["expected"])
unexpected_count = sum(self.summary_values["unexpected"].values())
if unexpected_count > 0:
unexpected_str = " (%s)" % ", ".join("%s: %i" % (key, value) for key, value in
- sorted(self.summary_values["unexpected"].items()))
+ sorted(self.summary_values["unexpected"].items()))
else:
unexpected_str = ""
rv.append("Unexpected results: %i%s" % (unexpected_count, unexpected_str))
if self.summary_values["skipped"] > 0:
rv.append("Skipped: %i" % self.summary_values["skipped"])
rv.append("")
@@ -203,17 +208,17 @@ class MachFormatter(base.BaseFormatter):
expected_str = ""
test = self._get_test_id(data)
if unexpected:
self.summary_unexpected.append((test, unexpected))
self._update_summary(data)
- #Reset the counts to 0
+ # Reset the counts to 0
self.status_buffer[test] = {"count": 0, "unexpected": [], "pass": 0}
self.has_unexpected[test] = bool(unexpected)
if subtests["count"] != 0:
rv = "Harness %s%s. Subtests passed %i/%i. Unexpected %s" % (
data["status"], expected_str, subtests["pass"], subtests["count"],
len(unexpected))
else:
@@ -353,39 +358,38 @@ class MachFormatter(base.BaseFormatter):
if "stack" in data:
rv += "\n%s" % data["stack"]
return rv
def lint(self, data):
term = self.terminal if self.terminal is not None else NullTerminal()
- fmt = "{path} {c1}{lineno}{column} {c2}{level}{normal} {message} {c1}{rule}({linter}){normal}"
+ fmt = "{path} {c1}{lineno}{column} {c2}{level}{normal} {message}" \
+ " {c1}{rule}({linter}){normal}"
message = fmt.format(
path=data["path"],
normal=term.normal,
c1=term.grey,
c2=term.red if data["level"] == 'error' else term.yellow,
lineno=str(data["lineno"]),
column=(":" + str(data["column"])) if data.get("column") else "",
level=data["level"],
message=data["message"],
rule='{} '.format(data["rule"]) if data.get("rule") else "",
linter=data["linter"].lower() if data.get("linter") else "",
)
return message
-
def _get_subtest_data(self, data):
test = self._get_test_id(data)
return self.status_buffer.get(test, {"count": 0, "unexpected": [], "pass": 0})
def _time(self, data):
entry_time = data["time"]
if self.write_interval and self.last_time is not None:
t = entry_time - self.last_time
self.last_time = entry_time
else:
t = entry_time - self.start_time
return t / 1000.
-
--- a/testing/mozbase/mozlog/mozlog/formatters/process.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/process.py
@@ -16,23 +16,23 @@ def strsig(n):
global _SIG_NAME
if _SIG_NAME is None:
# cache signal names
_SIG_NAME = {}
for k in dir(signal):
if (k.startswith("SIG")
and not k.startswith("SIG_")
- and k != "SIGCLD" and k != "SIGPOLL"):
+ and k != "SIGCLD" and k != "SIGPOLL"):
_SIG_NAME[getattr(signal, k)] = k
# Realtime signals mostly have no names
if hasattr(signal, "SIGRTMIN") and hasattr(signal, "SIGRTMAX"):
- for r in range(signal.SIGRTMIN+1, signal.SIGRTMAX+1):
+ for r in range(signal.SIGRTMIN + 1, signal.SIGRTMAX + 1):
_SIG_NAME[r] = "SIGRTMIN+" + str(r - signal.SIGRTMIN)
if n < 0 or n >= signal.NSIG:
return "out-of-range signal, number %s" % n
try:
return _SIG_NAME[n]
except KeyError:
return "unrecognized signal, number %s" % n
--- a/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/tbplformatter.py
@@ -3,16 +3,17 @@
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import functools
from collections import deque
from .base import BaseFormatter
from .process import strstatus
+
def output_subtests(func):
@functools.wraps(func)
def inner(self, data):
if self.subtests_count:
return self._format_subtests(data.get("component")) + func(self, data)
else:
return func(self, data)
return inner
@@ -186,24 +187,26 @@ class TbplFormatter(BaseFormatter):
message += "\n%s" % data["stack"]
if message and message[-1] == "\n":
message = message[:-1]
extra = data.get("extra", {})
if "reftest_screenshots" in extra:
screenshots = extra["reftest_screenshots"]
if len(screenshots) == 3:
- message += ("\nREFTEST IMAGE 1 (TEST): data:image/png;base64,%s\n"
- "REFTEST IMAGE 2 (REFERENCE): data:image/png;base64,%s") % (screenshots[0]["screenshot"],
- screenshots[2]["screenshot"])
+ message += ("\nREFTEST IMAGE 1 (TEST): data:image/png;base64,%s\n"
+ "REFTEST IMAGE 2 (REFERENCE): data:image/png;base64,%s") % (
+ screenshots[0]["screenshot"],
+ screenshots[2]["screenshot"])
elif len(screenshots) == 1:
- message += "\nREFTEST IMAGE: data:image/png;base64,%(image1)s" % screenshots[0]["screenshot"]
+ message += "\nREFTEST IMAGE: data:image/png;base64,%(image1)s" \
+ % screenshots[0]["screenshot"]
failure_line = "TEST-UNEXPECTED-%s | %s | %s\n" % (
- data["status"], test_id, message)
+ data["status"], test_id, message)
if data["expected"] not in ("PASS", "OK"):
expected_msg = "expected %s | " % data["expected"]
else:
expected_msg = ""
info_line = "TEST-INFO %s%s\n" % (expected_msg, duration_msg)
return failure_line + info_line
@@ -233,9 +236,9 @@ class TbplFormatter(BaseFormatter):
rv = rv + line + "\n"
return rv
def lint(self, data):
fmt = "TEST-UNEXPECTED-{level} | {path}:{lineno}{column} | {message} ({rule})"
data["column"] = ":%s" % data["column"] if data["column"] else ""
data['rule'] = data['rule'] or data['linter'] or ""
- message.append(fmt.format(**data))
+ return fmt.append(fmt.format(**data))
--- a/testing/mozbase/mozlog/mozlog/formatters/unittest.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/unittest.py
@@ -1,18 +1,20 @@
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import base
+
class UnittestFormatter(base.BaseFormatter):
"""Formatter designed to produce output in a format like that used by
the ``unittest`` module in the standard library."""
+
def __init__(self):
self.fails = []
self.errors = []
self.tests_run = 0
self.start_time = None
self.end_time = None
def suite_start(self, data):
--- a/testing/mozbase/mozlog/mozlog/formatters/xunit.py
+++ b/testing/mozbase/mozlog/mozlog/formatters/xunit.py
@@ -1,21 +1,22 @@
import types
from xml.etree import ElementTree
import base
+
def format_test_id(test_id):
"""Take a test id and return something that looks a bit like
a class path"""
if type(test_id) not in types.StringTypes:
- #Not sure how to deal with reftests yet
+ # Not sure how to deal with reftests yet
raise NotImplementedError
- #Turn a path into something like a class heirachy
+ # Turn a path into something like a class heirachy
return test_id.replace('.', '_').replace('/', ".")
class XUnitFormatter(base.BaseFormatter):
"""Formatter that produces XUnit-style XML output.
The tree is created in-memory so this formatter may be problematic
with very large log files.
--- a/testing/mozbase/mozlog/mozlog/handlers/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/__init__.py
@@ -1,8 +1,11 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import LogLevelFilter, StreamHandler, BaseHandler
from .statushandler import StatusHandler
from .bufferhandler import BufferHandler
from .valgrindhandler import ValgrindHandler
+
+__all__ = ['LogLevelFilter', 'StreamHandler', 'BaseHandler',
+ 'StatusHandler', 'BufferHandler', 'ValgrindHandler']
--- a/testing/mozbase/mozlog/mozlog/handlers/base.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/base.py
@@ -46,24 +46,25 @@ class BaseHandler(object):
class LogLevelFilter(BaseHandler):
"""Handler that filters out messages with action of log and a level
lower than some specified level.
:param inner: Handler to use for messages that pass this filter
:param level: Minimum log level to process
"""
+
def __init__(self, inner, level):
BaseHandler.__init__(self, inner)
self.inner = inner
self.level = log_levels[level.upper()]
def __call__(self, item):
if (item["action"] != "log" or
- log_levels[item["level"].upper()] <= self.level):
+ log_levels[item["level"].upper()] <= self.level):
return self.inner(item)
class StreamHandler(BaseHandler):
"""Handler for writing to a file-like object
:param stream: File-like object to write log messages to
:param formatter: formatter to convert messages to string format
--- a/testing/mozbase/mozlog/mozlog/handlers/bufferhandler.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/bufferhandler.py
@@ -1,14 +1,15 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import BaseHandler
+
class BufferHandler(BaseHandler):
"""Handler that maintains a circular buffer of messages based on the
size and actions specified by a user.
:param inner: The underlying handler used to emit messages.
:param message_limit: The maximum number of messages to retain for
context. If None, the buffer will grow without limit.
:param buffered_actions: The set of actions to include in the buffer
--- a/testing/mozbase/mozlog/mozlog/handlers/statushandler.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/statushandler.py
@@ -2,53 +2,51 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from collections import (
defaultdict,
namedtuple,
)
-from mozlog.structuredlog import log_levels
RunSummary = namedtuple("RunSummary",
("unexpected_statuses",
"expected_statuses",
"log_level_counts",
"action_counts"))
+
class StatusHandler(object):
"""A handler used to determine an overall status for a test run according
to a sequence of log messages."""
def __init__(self):
# The count of each type of unexpected result status (includes tests and subtests)
self.unexpected_statuses = defaultdict(int)
# The count of each type of expected result status (includes tests and subtests)
self.expected_statuses = defaultdict(int)
# The count of actions logged
self.action_counts = defaultdict(int)
# The count of messages logged at each log level
self.log_level_counts = defaultdict(int)
-
def __call__(self, data):
action = data['action']
self.action_counts[action] += 1
if action == 'log':
self.log_level_counts[data['level']] += 1
if action in ('test_status', 'test_end'):
status = data['status']
if 'expected' in data:
self.unexpected_statuses[status] += 1
else:
self.expected_statuses[status] += 1
-
def summarize(self):
return RunSummary(
dict(self.unexpected_statuses),
dict(self.expected_statuses),
dict(self.log_level_counts),
dict(self.action_counts),
)
--- a/testing/mozbase/mozlog/mozlog/handlers/valgrindhandler.py
+++ b/testing/mozbase/mozlog/mozlog/handlers/valgrindhandler.py
@@ -1,39 +1,42 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import BaseHandler
import re
+
class ValgrindHandler(BaseHandler):
def __init__(self, inner):
BaseHandler.__init__(self, inner)
self.inner = inner
self.vFilter = ValgrindFilter()
def __call__(self, data):
tmp = self.vFilter(data)
if tmp is not None:
self.inner(tmp)
+
class ValgrindFilter(object):
'''
A class for handling Valgrind output.
Valgrind errors look like this:
- ==60741== 40 (24 direct, 16 indirect) bytes in 1 blocks are definitely lost in loss record 2,746 of 5,235
+ ==60741== 40 (24 direct, 16 indirect) bytes in 1 blocks are definitely lost in loss
+ record 2,746 of 5,235
==60741== at 0x4C26B43: calloc (vg_replace_malloc.c:593)
==60741== by 0x63AEF65: PR_Calloc (prmem.c:443)
==60741== by 0x69F236E: PORT_ZAlloc_Util (secport.c:117)
==60741== by 0x69F1336: SECITEM_AllocItem_Util (secitem.c:28)
- ==60741== by 0xA04280B: ffi_call_unix64 (in /builds/slave/m-in-l64-valgrind-000000000000/objdir/toolkit/library/libxul.so)
+ ==60741== by 0xA04280B: ffi_call_unix64 (in /builds/slave/m-in-l64-valgrind-000000000000/objdir/toolkit/library/libxul.so) # noqa
==60741== by 0xA042443: ffi_call (ffi64.c:485)
For each such error, this class extracts most or all of the first (error
kind) line, plus the function name in each of the first few stack entries.
With this data it constructs and prints a TEST-UNEXPECTED-FAIL message that
TBPL will highlight.
It buffers these lines from which text is extracted so that the
@@ -49,30 +52,30 @@ class ValgrindFilter(object):
appropriately.
'''
def __init__(self):
# The regexps in this list match all of Valgrind's errors. Note that
# Valgrind is English-only, so we don't have to worry about
# localization.
self.re_error = \
- re.compile( \
- r'==\d+== (' + \
- r'(Use of uninitialised value of size \d+)|' + \
- r'(Conditional jump or move depends on uninitialised value\(s\))|' + \
- r'(Syscall param .* contains uninitialised byte\(s\))|' + \
- r'(Syscall param .* points to (unaddressable|uninitialised) byte\(s\))|' + \
- r'((Unaddressable|Uninitialised) byte\(s\) found during client check request)|' + \
- r'(Invalid free\(\) / delete / delete\[\] / realloc\(\))|' + \
- r'(Mismatched free\(\) / delete / delete \[\])|' + \
- r'(Invalid (read|write) of size \d+)|' + \
- r'(Jump to the invalid address stated on the next line)|' + \
- r'(Source and destination overlap in .*)|' + \
- r'(.* bytes in .* blocks are .* lost)' + \
- r')' \
+ re.compile(
+ r'==\d+== (' +
+ r'(Use of uninitialised value of size \d+)|' +
+ r'(Conditional jump or move depends on uninitialised value\(s\))|' +
+ r'(Syscall param .* contains uninitialised byte\(s\))|' +
+ r'(Syscall param .* points to (unaddressable|uninitialised) byte\(s\))|' +
+ r'((Unaddressable|Uninitialised) byte\(s\) found during client check request)|' +
+ r'(Invalid free\(\) / delete / delete\[\] / realloc\(\))|' +
+ r'(Mismatched free\(\) / delete / delete \[\])|' +
+ r'(Invalid (read|write) of size \d+)|' +
+ r'(Jump to the invalid address stated on the next line)|' +
+ r'(Source and destination overlap in .*)|' +
+ r'(.* bytes in .* blocks are .* lost)' +
+ r')'
)
# Match identifer chars, plus ':' for namespaces, and '\?' in order to
# match "???" which Valgrind sometimes produces.
self.re_stack_entry = \
re.compile(r'^==\d+==.*0x[A-Z0-9]+: ([A-Za-z0-9_:\?]+)')
self.re_suppression = \
re.compile(r' *<insert_a_suppression_name_here>')
self.error_count = 0
@@ -114,24 +117,24 @@ class ValgrindFilter(object):
if self.number_of_stack_entries_to_get != 0:
self.curr_failure_msg += ' / '
else:
# We've finished getting the first few stack entries. Emit
# the failure action, comprising the primary message and the
# buffered lines, and then reset state. Copy the mandatory
# fields from the incoming message, since there's nowhere
# else to get them from.
- output_message = { # Mandatory fields
- u"action": "valgrind_error",
- u"time": msg["time"],
- u"thread": msg["thread"],
- u"pid": msg["pid"],
- u"source": msg["source"],
- # valgrind_error specific fields
- u"primary": self.curr_failure_msg,
- u"secondary": self.buffered_lines }
+ output_message = { # Mandatory fields
+ u"action": "valgrind_error",
+ u"time": msg["time"],
+ u"thread": msg["thread"],
+ u"pid": msg["pid"],
+ u"source": msg["source"],
+ # valgrind_error specific fields
+ u"primary": self.curr_failure_msg,
+ u"secondary": self.buffered_lines}
self.curr_failure_msg = ""
self.buffered_lines = []
if re.match(self.re_suppression, line):
self.suppression_count += 1
return output_message
--- a/testing/mozbase/mozlog/mozlog/logtypes.py
+++ b/testing/mozbase/mozlog/mozlog/logtypes.py
@@ -1,17 +1,19 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
convertor_registry = {}
missing = object()
no_default = object()
+
class log_action(object):
+
def __init__(self, *args):
self.args = {}
self.args_no_default = []
self.args_with_default = []
# These are the required fields in a log message that usually aren't
# supplied by the caller, but can be in the case of log_raw
@@ -32,17 +34,16 @@ class log_action(object):
if arg.name in self.args:
raise ValueError("Repeated argument name %s" % arg.name)
self.args[arg.name] = arg
for extra in self.default_args:
self.args[extra.name] = extra
-
def __call__(self, f):
convertor_registry[f.__name__] = self
converter = self
def inner(self, *args, **kwargs):
data = converter.convert(*args, **kwargs)
return f(self, data)
@@ -67,17 +68,16 @@ class log_action(object):
raise TypeError("Too many arguments")
for i, name in enumerate(positional_no_default):
values[name] = args[i]
positional_with_default = [self.args_with_default[i]
for i in range(len(args) - num_no_default)]
-
for i, name in enumerate(positional_with_default):
if name in values:
raise TypeError("Argument %s specified twice" % name)
values[name] = args[i + num_no_default]
# Fill in missing arguments
for name in self.args_with_default:
if name not in values:
@@ -93,17 +93,19 @@ class log_action(object):
return data
def convert_known(self, **kwargs):
known_kwargs = {name: value for name, value in kwargs.iteritems()
if name in self.args}
return self.convert(**known_kwargs)
+
class DataType(object):
+
def __init__(self, name, default=no_default, optional=False):
self.name = name
self.default = default
if default is no_default and optional is not False:
raise ValueError("optional arguments require a default value")
self.optional = optional
@@ -115,73 +117,87 @@ class DataType(object):
return self.default
try:
return self.convert(value)
except:
raise ValueError("Failed to convert value %s of type %s for field %s to type %s" %
(value, type(value).__name__, self.name, self.__class__.__name__))
+
class Unicode(DataType):
+
def convert(self, data):
if isinstance(data, unicode):
return data
if isinstance(data, str):
return data.decode("utf8", "replace")
return unicode(data)
+
class TestId(DataType):
+
def convert(self, data):
if isinstance(data, unicode):
return data
elif isinstance(data, bytes):
return data.decode("utf-8", "replace")
elif isinstance(data, (tuple, list)):
# This is really a bit of a hack; should really split out convertors from the
# fields they operate on
func = Unicode(None).convert
return tuple(func(item) for item in data)
else:
raise ValueError
+
class Status(DataType):
allowed = ["PASS", "FAIL", "OK", "ERROR", "TIMEOUT", "CRASH", "ASSERT", "SKIP"]
+
def convert(self, data):
value = data.upper()
if value not in self.allowed:
raise ValueError
return value
+
class SubStatus(Status):
allowed = ["PASS", "FAIL", "ERROR", "TIMEOUT", "ASSERT", "NOTRUN", "SKIP"]
+
class Dict(DataType):
+
def convert(self, data):
return dict(data)
class List(DataType):
+
def __init__(self, name, item_type, default=no_default, optional=False):
DataType.__init__(self, name, default, optional)
self.item_type = item_type(None)
def convert(self, data):
return [self.item_type.convert(item) for item in data]
+
class Int(DataType):
+
def convert(self, data):
return int(data)
class Any(DataType):
+
def convert(self, data):
return data
class Tuple(DataType):
+
def __init__(self, name, item_types, default=no_default, optional=False):
DataType.__init__(self, name, default, optional)
self.item_types = item_types
def convert(self, data):
if len(data) != len(self.item_types):
raise ValueError("Expected %i items got %i" % (len(self.item_types), len(data)))
return tuple(item_type.convert(value)
--- a/testing/mozbase/mozlog/mozlog/proxy.py
+++ b/testing/mozbase/mozlog/mozlog/proxy.py
@@ -10,16 +10,17 @@ class ProxyLogger(object):
A ProxyLogger behaves like a
:class:`mozlog.structuredlog.StructuredLogger`.
Each method and attribute access will be forwarded to the underlying
StructuredLogger.
RuntimeError will be raised when the default logger is not yet initialized.
"""
+
def __init__(self, component=None):
self.logger = None
self._component = component
def __getattr__(self, name):
if self.logger is None:
self.logger = get_default_logger(component=self._component)
if self.logger is None:
--- a/testing/mozbase/mozlog/mozlog/pytest_mozlog/plugin.py
+++ b/testing/mozbase/mozlog/mozlog/pytest_mozlog/plugin.py
@@ -1,16 +1,17 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mozlog
import os
import time
+
def pytest_addoption(parser):
# We can't simply use mozlog.commandline.add_logging_group(parser) here because
# Pytest's parser doesn't have the add_argument_group method Mozlog expects.
group = parser.getgroup('mozlog')
for name, (_class, _help) in mozlog.commandline.log_formatters.iteritems():
group.addoption('--log-{0}'.format(name), action='append', help=_help)
@@ -26,16 +27,17 @@ def pytest_addoption(parser):
def pytest_configure(config):
# If using pytest-xdist for parallelization, only register plugin on master process
if not hasattr(config, 'slaveinput'):
config.pluginmanager.register(MozLog())
class MozLog(object):
+
def __init__(self):
self.results = {}
self.start_time = int(time.time() * 1000) # in ms for Mozlog compatibility
def format_nodeid(self, nodeid):
'''Helper to Reformat/shorten a "::"-separated pytest test nodeid'''
testfile, testname = nodeid.split("::")
return " ".join([os.path.basename(testfile), testname])
--- a/testing/mozbase/mozlog/mozlog/reader.py
+++ b/testing/mozbase/mozlog/mozlog/reader.py
@@ -1,13 +1,14 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
+
def read(log_f, raise_on_error=False):
"""Return a generator that will return the entries in a structured log file.
Note that the caller must not close the file whilst the generator is still
in use.
:param log_f: file-like object containing the raw log entries, one per line
:param raise_on_error: boolean indicating whether ValueError should be raised
for lines that cannot be decoded."""
@@ -30,28 +31,30 @@ def imap_log(log_iter, action_map):
:param log_iter: Iterator returning structured log entries
:param action_map: Dictionary mapping action name to callback function. Log items
with actions not in this dictionary will be skipped.
"""
for item in log_iter:
if item["action"] in action_map:
yield action_map[item["action"]](item)
+
def each_log(log_iter, action_map):
"""Call a callback for each item in an iterable containing structured
log entries
:param log_iter: Iterator returning structured log entries
:param action_map: Dictionary mapping action name to callback function. Log items
with actions not in this dictionary will be skipped.
"""
for item in log_iter:
if item["action"] in action_map:
action_map[item["action"]](item)
+
class LogHandler(object):
"""Base class for objects that act as log handlers. A handler is a callable
that takes a log entry as the only argument.
Subclasses are expected to provide a method for each action type they
wish to handle, each taking a single argument for the test data.
For example a trivial subclass that just produces the id of each test as
it starts might be::
@@ -62,12 +65,13 @@ class LogHandler(object):
return data["test"]
"""
def __call__(self, data):
if hasattr(self, data["action"]):
handler = getattr(self, data["action"])
return handler(data)
+
def handle_log(log_iter, handler):
"""Call a handler for each item in a log, discarding the return value"""
for item in log_iter:
handler(item)
--- a/testing/mozbase/mozlog/mozlog/scripts/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/scripts/__init__.py
@@ -1,15 +1,16 @@
#!/usr/bin/env python
import argparse
import unstable
import format as formatlog
import logmerge
+
def get_parser():
parser = argparse.ArgumentParser("structlog",
description="Tools for dealing with structured logs")
commands = {"unstable": (unstable.get_parser, unstable.main),
"format": (formatlog.get_parser, formatlog.main),
"logmerge": (logmerge.get_parser, logmerge.main)}
@@ -19,12 +20,13 @@ def get_parser():
parent = parser_func(False)
command_parser = sub_parser.add_parser(command,
description=parent.description,
parents=[parent])
command_parser.set_defaults(func=main_func)
return parser
+
def main():
parser = get_parser()
args = parser.parse_args()
args.func(**vars(args))
--- a/testing/mozbase/mozlog/mozlog/scripts/format.py
+++ b/testing/mozbase/mozlog/mozlog/scripts/format.py
@@ -1,24 +1,27 @@
import argparse
import sys
from .. import handlers, commandline, reader
+
def get_parser(add_help=True):
parser = argparse.ArgumentParser("format",
- description="Format a structured log stream", add_help=add_help)
+ description="Format a structured log stream",
+ add_help=add_help)
parser.add_argument("--input", action="store", default=None,
help="Filename to read from, defaults to stdin")
parser.add_argument("--output", action="store", default=None,
help="Filename to write to, defaults to stdout")
parser.add_argument("format", choices=commandline.log_formatters.keys(),
help="Format to use")
return parser
+
def main(**kwargs):
if kwargs["input"] is None:
input_file = sys.stdin
else:
input_file = open(kwargs["input"])
if kwargs["output"] is None:
output_file = sys.stdout
else:
--- a/testing/mozbase/mozlog/mozlog/scripts/logmerge.py
+++ b/testing/mozbase/mozlog/mozlog/scripts/logmerge.py
@@ -44,17 +44,18 @@ def validate_start_events(events):
def merge_start_events(events):
for start in events[1:]:
events[0]["tests"].extend(start["tests"])
return events[0]
def get_parser(add_help=True):
- parser = argparse.ArgumentParser("logmerge", description='Merge multiple log files.', add_help=add_help)
+ parser = argparse.ArgumentParser(
+ "logmerge", description='Merge multiple log files.', add_help=add_help)
parser.add_argument('-o', dest='output', help='output file, defaults to stdout')
parser.add_argument('files', metavar='File', type=str, nargs='+', help='file to be merged')
return parser
def main(**kwargs):
if kwargs["output"] is None:
output = sys.stdout
@@ -69,14 +70,13 @@ def main(**kwargs):
end_events = [process_until_suite_end(reader, output) for reader in readers]
dump_entry(fill_process_info(end_events[0]), output)
for reader in readers:
for entry in reader:
dump_entry(entry, output)
-
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
kwargs = vars(args)
main(**kwargs)
--- a/testing/mozbase/mozlog/mozlog/scripts/unstable.py
+++ b/testing/mozbase/mozlog/mozlog/scripts/unstable.py
@@ -1,98 +1,110 @@
import argparse
from collections import defaultdict
import json
from mozlog import reader
+
class StatusHandler(reader.LogHandler):
+
def __init__(self):
self.run_info = None
- self.statuses = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda: defaultdict(int))))
+ self.statuses = defaultdict(lambda: defaultdict(
+ lambda: defaultdict(lambda: defaultdict(int))))
def test_id(self, test):
if type(test) in (str, unicode):
return test
else:
return tuple(test)
def suite_start(self, item):
self.run_info = tuple(sorted(item.get("run_info", {}).items()))
def test_status(self, item):
- self.statuses[self.run_info][self.test_id(item["test"])][item["subtest"]][item["status"]] += 1
+ self.statuses[self.run_info][self.test_id(item["test"])][item["subtest"]][
+ item["status"]] += 1
def test_end(self, item):
self.statuses[self.run_info][self.test_id(item["test"])][None][item["status"]] += 1
def suite_end(self, item):
self.run_info = None
+
def get_statuses(filenames):
handler = StatusHandler()
for filename in filenames:
with open(filename) as f:
reader.handle_log(reader.read(f), handler)
return handler.statuses
+
def _filter(results_cmp):
def inner(statuses):
- rv = defaultdict(lambda:defaultdict(dict))
+ rv = defaultdict(lambda: defaultdict(dict))
for run_info, tests in statuses.iteritems():
for test, subtests in tests.iteritems():
for name, results in subtests.iteritems():
if results_cmp(results):
rv[run_info][test][name] = results
return rv
return inner
filter_unstable = _filter(lambda x: len(x) > 1)
filter_stable = _filter(lambda x: len(x) == 1)
+
def group_results(data):
rv = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for run_info, tests in data.iteritems():
for test, subtests in tests.iteritems():
for name, results in subtests.iteritems():
for status, number in results.iteritems():
rv[test][name][status] += number
return rv
+
def print_results(data):
for run_info, tests in data.iteritems():
- run_str = " ".join("%s:%s" % (k,v) for k,v in run_info) if run_info else "No Run Info"
+ run_str = " ".join("%s:%s" % (k, v) for k, v in run_info) if run_info else "No Run Info"
print run_str
print "=" * len(run_str)
print_run(tests)
+
def print_run(tests):
for test, subtests in sorted(tests.items()):
print "\n" + str(test)
print "-" * len(test)
for name, results in subtests.iteritems():
print "[%s]: %s" % (name if name is not None else "",
- " ".join("%s (%i)" % (k,v) for k,v in results.iteritems()))
+ " ".join("%s (%i)" % (k, v) for k, v in results.iteritems()))
+
def get_parser(add_help=True):
parser = argparse.ArgumentParser("unstable",
- description="List tests that don't give consistent results from one or more runs.", add_help=add_help)
+ description="List tests that don't give consistent "
+ "results from one or more runs.", add_help=add_help)
parser.add_argument("--json", action="store_true", default=False,
help="Output in JSON format")
parser.add_argument("--group", action="store_true", default=False,
help="Group results from different run types")
parser.add_argument("log_file", nargs="+",
help="Log files to read")
return parser
+
def main(**kwargs):
unstable = filter_unstable(get_statuses(kwargs["log_file"]))
if kwargs["group"]:
unstable = group_results(unstable)
if kwargs["json"]:
print json.dumps(unstable)
else:
--- a/testing/mozbase/mozlog/mozlog/stdadapter.py
+++ b/testing/mozbase/mozlog/mozlog/stdadapter.py
@@ -1,40 +1,45 @@
import logging
from structuredlog import StructuredLogger, log_levels
+
class UnstructuredHandler(logging.Handler):
+
def __init__(self, name=None, level=logging.NOTSET):
self.structured = StructuredLogger(name)
logging.Handler.__init__(self, level=level)
def emit(self, record):
if record.levelname in log_levels:
log_func = getattr(self.structured, record.levelname.lower())
else:
log_func = self.logger.debug
log_func(record.msg)
def handle(self, record):
self.emit(record)
+
class LoggingWrapper(object):
+
def __init__(self, wrapped):
self.wrapped = wrapped
self.wrapped.addHandler(UnstructuredHandler(self.wrapped.name,
logging.getLevelName(self.wrapped.level)))
def add_handler(self, handler):
self.addHandler(handler)
def remove_handler(self, handler):
self.removeHandler(handler)
def __getattr__(self, name):
return getattr(self.wrapped, name)
+
def std_logging_adapter(logger):
"""Adapter for stdlib logging so that it produces structured
messages rather than standard logging messages
:param logger: logging.Logger to wrap"""
return LoggingWrapper(logger)
--- a/testing/mozbase/mozlog/mozlog/structuredlog.py
+++ b/testing/mozbase/mozlog/mozlog/structuredlog.py
@@ -57,29 +57,31 @@ Subfields for all messages:
thread - name for the thread emitting the message
pid - id of the python process in which the logger is running
source - name for the source emitting the message
component - name of the subcomponent emitting the message
"""
_default_logger_name = None
+
def get_default_logger(component=None):
"""Gets the default logger if available, optionally tagged with component
name. Will return None if not yet set
:param component: The component name to tag log messages with
"""
global _default_logger_name
if not _default_logger_name:
return None
return StructuredLogger(_default_logger_name, component=component)
+
def set_default_logger(default_logger):
"""Sets the default logger to logger.
It can then be retrieved with :py:func:`get_default_logger`
Note that :py:func:`~mozlog.commandline.setup_logging` will
set a default logger for you, so there should be no need to call this
function if you're using setting up logging that way (recommended).
@@ -90,31 +92,37 @@ def set_default_logger(default_logger):
_default_logger_name = default_logger.name
log_levels = dict((k.upper(), v) for v, k in
enumerate(["critical", "error", "warning", "info", "debug"]))
lint_levels = ["ERROR", "WARNING"]
+
def log_actions():
"""Returns the set of actions implemented by mozlog."""
return set(convertor_registry.keys())
+
class LoggerState(object):
+
def __init__(self):
self.handlers = []
self.running_tests = set()
self.suite_started = False
self.component_states = {}
+
class ComponentState(object):
+
def __init__(self):
self.filter_ = None
+
class StructuredLogger(object):
_lock = Lock()
_logger_states = {}
"""Create a structured logger with the given name
:param name: The name of the logger.
:param component: A subcomponent that the logger belongs to (typically a library name)
"""
@@ -181,17 +189,17 @@ class StructuredLogger(object):
if k not in converted_data:
converted_data[k] = v
data = self._make_log_data(action, converted_data)
if action in ("test_status", "test_end"):
if (data["expected"] == data["status"] or
data["status"] == "SKIP" or
- "expected" not in raw_data):
+ "expected" not in raw_data):
del data["expected"]
if not self._ensure_suite_state(action, data):
return
self._handle_log(data)
def _log_data(self, action, data=None):
@@ -248,17 +256,18 @@ class StructuredLogger(object):
Dict("version_info", default=None, optional=True),
Dict("device_info", default=None, optional=True),
Dict("extra", default=None, optional=True))
def suite_start(self, data):
"""Log a suite_start message
:param list tests: Test identifiers that will be run in the suite.
:param dict run_info: Optional information typically provided by mozinfo.
- :param dict version_info: Optional target application version information provided by mozversion.
+ :param dict version_info: Optional target application version information provided
+ by mozversion.
:param dict device_info: Optional target device information provided by mozdevice.
"""
if not self._ensure_suite_state('suite_start', data):
return
self._log_data("suite_start", data)
@log_action(Dict("extra", default=None, optional=True))
@@ -306,17 +315,17 @@ class StructuredLogger(object):
:param status: Status string indicating the subtest result
:param expected: Status string indicating the expected subtest result.
:param message: String containing a message associated with the result.
:param stack: a stack trace encountered during test execution.
:param extra: suite-specific data associated with the test result.
"""
if (data["expected"] == data["status"] or
- data["status"] == "SKIP"):
+ data["status"] == "SKIP"):
del data["expected"]
if data["test"] not in self._state.running_tests:
self.error("test_status for %s logged while not in progress. "
"Logged with data: %s" % (data["test"], json.dumps(data)))
return
self._log_data("test_status", data)
@@ -338,17 +347,17 @@ class StructuredLogger(object):
:param status: Status string indicating the test result
:param expected: Status string indicating the expected test result.
:param message: String containing a message associated with the result.
:param stack: a stack trace encountered during test execution.
:param extra: suite-specific data associated with the test result.
"""
if (data["expected"] == data["status"] or
- data["status"] == "SKIP"):
+ data["status"] == "SKIP"):
del data["expected"]
if data["test"] not in self._state.running_tests:
self.error("test_end for %s logged while not in progress. "
"Logged with data: %s" % (data["test"], json.dumps(data)))
else:
self._state.running_tests.remove(data["test"])
self._log_data("test_end", data)
@@ -435,16 +444,17 @@ def _log_func(level_name):
:param exc_info: Either a boolean indicating whether to include a traceback
derived from sys.exc_info() or a three-item tuple in the
same format as sys.exc_info() containing exception information
to log.
""" % level_name
log.__name__ = str(level_name).lower()
return log
+
def _lint_func(level_name):
@log_action(Unicode("path"),
Unicode("message", default=""),
Int("lineno", default=0),
Int("column", default=None, optional=True),
Unicode("hint", default=None, optional=True),
Unicode("source", default=None, optional=True),
Unicode("rule", default=None, optional=True),
@@ -474,37 +484,38 @@ def _lint_func(level_name):
for level_name in log_levels:
setattr(StructuredLogger, level_name.lower(), _log_func(level_name))
for level_name in lint_levels:
level_name = level_name.lower()
name = "lint_%s" % level_name
setattr(StructuredLogger, name, _lint_func(level_name))
+
class StructuredLogFileLike(object):
"""Wrapper for file-like objects to redirect writes to logger
instead. Each call to `write` becomes a single log entry of type `log`.
When using this it is important that the callees i.e. the logging
handlers do not themselves try to write to the wrapped file as this
will cause infinite recursion.
:param logger: `StructuredLogger` to which to redirect the file write operations.
:param level: log level to use for each write.
:param prefix: String prefix to prepend to each log entry.
"""
+
def __init__(self, logger, level="info", prefix=None):
self.logger = logger
self.log_func = getattr(self.logger, level)
self.prefix = prefix
def write(self, data):
if data.endswith("\n"):
data = data[:-1]
if data.endswith("\r"):
data = data[:-1]
if self.prefix is not None:
data = "%s: %s" % (self.prefix, data)
self.log_func(data)
def flush(self):
pass
-
--- a/testing/mozbase/mozlog/mozlog/unstructured/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/unstructured/__init__.py
@@ -1,7 +1,8 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from .logger import *
from .loglistener import LogMessageServer
from .loggingmixin import LoggingMixin
--- a/testing/mozbase/mozlog/mozlog/unstructured/logger.py
+++ b/testing/mozbase/mozlog/mozlog/unstructured/logger.py
@@ -1,44 +1,47 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from logging import getLogger as getSysLogger
from logging import *
# Some of the build slave environments don't see the following when doing
# 'from logging import *'
# see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35
from logging import getLoggerClass, addLevelName, setLoggerClass, shutdown, debug, info, basicConfig
import json
_default_level = INFO
_LoggerClass = getLoggerClass()
# Define mozlog specific log levels
-START = _default_level + 1
-END = _default_level + 2
-PASS = _default_level + 3
+START = _default_level + 1
+END = _default_level + 2
+PASS = _default_level + 3
KNOWN_FAIL = _default_level + 4
-FAIL = _default_level + 5
-CRASH = _default_level + 6
+FAIL = _default_level + 5
+CRASH = _default_level + 6
# Define associated text of log levels
addLevelName(START, 'TEST-START')
addLevelName(END, 'TEST-END')
addLevelName(PASS, 'TEST-PASS')
addLevelName(KNOWN_FAIL, 'TEST-KNOWN-FAIL')
addLevelName(FAIL, 'TEST-UNEXPECTED-FAIL')
addLevelName(CRASH, 'PROCESS-CRASH')
+
class MozLogger(_LoggerClass):
"""
MozLogger class which adds some convenience log levels
related to automated testing in Mozilla and ability to
output structured log messages.
"""
+
def testStart(self, message, *args, **kwargs):
"""Logs a test start message"""
self.log(START, message, *args, **kwargs)
def testEnd(self, message, *args, **kwargs):
"""Logs a test end message"""
self.log(END, message, *args, **kwargs)
@@ -80,16 +83,17 @@ class MozLogger(_LoggerClass):
params['action'] = action
# The can message be None. This is expected, and shouldn't cause
# unstructured formatters to fail.
message = params.get('_message')
self.log(level, message, extra={'params': params})
+
class JSONFormatter(Formatter):
"""Log formatter for emitting structured JSON entries."""
def format(self, record):
# Default values determined by logger metadata
output = {
'_time': int(round(record.created * 1000, 0)),
'_namespace': record.name,
@@ -103,16 +107,17 @@ class JSONFormatter(Formatter):
if record.msg and output.get('_message') is None:
# For compatibility with callers using the printf like
# API exposed by python logging, call the default formatter.
output['_message'] = Formatter.format(self, record)
return json.dumps(output, indent=output.get('indent'))
+
class MozFormatter(Formatter):
"""
MozFormatter class used to standardize formatting
If a different format is desired, this can be explicitly
overriden with the log handler's setFormatter() method
"""
level_length = 0
max_level_length = len('TEST-START')
@@ -140,16 +145,17 @@ class MozFormatter(Formatter):
fmt = '%(name)s %(levelname)s ' + sep + ' %(message)s'
if self.include_timestamp:
fmt = '%(asctime)s ' + fmt
# this protected member is used to define the format
# used by the base Formatter's method
self._fmt = fmt
return Formatter.format(self, record)
+
def getLogger(name, handler=None):
"""
Returns the logger with the specified name.
If the logger doesn't exist, it is created.
If handler is specified, adds it to the logger. Otherwise a default handler
that logs to standard output will be used.
:param name: The name of the logger to retrieve
@@ -157,24 +163,23 @@ def getLogger(name, handler=None):
and a handler is specified, an exception will be raised. To
add a handler to an existing logger, call that logger's
addHandler method.
"""
setLoggerClass(MozLogger)
if name in Logger.manager.loggerDict:
if handler:
- raise ValueError('The handler parameter requires ' + \
- 'that a logger by this name does ' + \
+ raise ValueError('The handler parameter requires ' +
+ 'that a logger by this name does ' +
'not already exist')
return Logger.manager.loggerDict[name]
logger = getSysLogger(name)
logger.setLevel(_default_level)
if handler is None:
handler = StreamHandler()
handler.setFormatter(MozFormatter())
logger.addHandler(handler)
logger.propagate = False
return logger
-
--- a/testing/mozbase/mozlog/mozlog/unstructured/loglistener.py
+++ b/testing/mozbase/mozlog/mozlog/unstructured/loglistener.py
@@ -1,23 +1,26 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import SocketServer
import socket
import json
+
class LogMessageServer(SocketServer.TCPServer):
+
def __init__(self, server_address, logger, message_callback=None, timeout=3):
SocketServer.TCPServer.__init__(self, server_address, LogMessageHandler)
self._logger = logger
self._message_callback = message_callback
self.timeout = timeout
+
class LogMessageHandler(SocketServer.BaseRequestHandler):
"""Processes output from a connected log source, logging to an
existing logger upon receipt of a well-formed log messsage."""
def handle(self):
"""Continually listens for log messages."""
self._partial_message = ''
self.request.settimeout(self.server.timeout)
--- a/testing/mozbase/mozlog/setup.py
+++ b/testing/mozbase/mozlog/setup.py
@@ -14,26 +14,26 @@ setup(name=PACKAGE_NAME,
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL 1.1/GPL 2.0/LGPL 2.1',
packages=find_packages(),
zip_safe=False,
install_requires=["blessings>=1.3"],
tests_require=['mozfile'],
- platforms =['Any'],
+ platforms=['Any'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
- ],
+ ],
package_data={"mozlog": ["formatters/html/main.js",
"formatters/html/style.css"]},
entry_points={
"console_scripts": [
"structlog = mozlog.scripts:main"
],
'pytest11': [
- 'mozlog = mozlog.pytest_mozlog.plugin',
+ 'mozlog = mozlog.pytest_mozlog.plugin',
]}
- )
+ )
--- a/testing/mozbase/mozlog/tests/test_logger.py
+++ b/testing/mozbase/mozlog/tests/test_logger.py
@@ -8,26 +8,28 @@ import socket
import threading
import time
import unittest
import mozfile
import mozlog.unstructured as mozlog
+
class ListHandler(mozlog.Handler):
"""Mock handler appends messages to a list for later inspection."""
def __init__(self):
mozlog.Handler.__init__(self)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
+
class TestLogging(unittest.TestCase):
"""Tests behavior of basic mozlog api."""
def test_logger_defaults(self):
"""Tests the default logging format and behavior."""
default_logger = mozlog.getLogger('default.logger')
self.assertEqual(default_logger.name, 'default.logger')
@@ -55,16 +57,17 @@ class TestLogging(unittest.TestCase):
log.info('no timestamp')
self.assertTrue(handler.messages[-1].startswith('%s ' % log_name))
handler.setFormatter(mozlog.MozFormatter(include_timestamp=True))
log.info('timestamp')
# Just verify that this raises no exceptions.
datetime.datetime.strptime(handler.messages[-1][:23],
'%Y-%m-%d %H:%M:%S,%f')
+
class TestStructuredLogging(unittest.TestCase):
"""Tests structured output in mozlog."""
def setUp(self):
self.handler = ListHandler()
self.handler.setFormatter(mozlog.JSONFormatter())
self.logger = mozlog.MozLogger('test.Logger')
self.logger.addHandler(self.handler)
@@ -185,18 +188,18 @@ class TestStructuredLogging(unittest.Tes
'action': 'test_message',
'_level': 'DEBUG'})
message_string_three = json.dumps({'_message': 'socket message three',
'action': 'test_message',
'_level': 'DEBUG'})
message_string = message_string_one + '\n' + \
- message_string_two + '\n' + \
- message_string_three + '\n'
+ message_string_two + '\n' + \
+ message_string_three + '\n'
server_thread = threading.Thread(target=self.log_server.handle_request)
server_thread.start()
host, port = self.log_server.server_address
sock = socket.socket()
sock.connect((host, port))
@@ -210,20 +213,22 @@ class TestStructuredLogging(unittest.Tes
sock.sendall(message_string[32:64])
time.sleep(.01)
sock.sendall(message_string[64:128])
time.sleep(.01)
sock.sendall(message_string[128:])
server_thread.join()
+
class Loggable(mozlog.LoggingMixin):
"""Trivial class inheriting from LoggingMixin"""
pass
+
class TestLoggingMixin(unittest.TestCase):
"""Tests basic use of LoggingMixin"""
def test_mixin(self):
loggable = Loggable()
self.assertTrue(not hasattr(loggable, "_logger"))
loggable.log(mozlog.INFO, "This will instantiate the logger")
self.assertTrue(hasattr(loggable, "_logger"))
@@ -239,17 +244,17 @@ class TestLoggingMixin(unittest.TestCase
self.assertTrue(isinstance(loggable._logger.handlers[0],
ListHandler))
self.assertEqual(loggable._logger.name, "test.mixin")
loggable.log(mozlog.WARN, 'message for "log" method')
loggable.info('message for "info" method')
loggable.error('message for "error" method')
loggable.log_structured('test_message',
- params={'_message': 'message for ' + \
+ params={'_message': 'message for ' +
'"log_structured" method'})
expected_messages = ['message for "log" method',
'message for "info" method',
'message for "error" method',
'message for "log_structured" method']
actual_messages = loggable._logger.handlers[0].messages
--- a/testing/mozbase/mozlog/tests/test_structured.py
+++ b/testing/mozbase/mozlog/tests/test_structured.py
@@ -17,32 +17,34 @@ from mozlog import (
structuredlog,
stdadapter,
handlers,
formatters,
)
class TestHandler(object):
+
def __init__(self):
self.items = []
def __call__(self, data):
self.items.append(data)
@property
def last_item(self):
return self.items[-1]
@property
def empty(self):
return not self.items
class BaseStructuredTest(unittest.TestCase):
+
def setUp(self):
self.logger = structuredlog.StructuredLogger("test")
self.handler = TestHandler()
self.logger.add_handler(self.handler)
def pop_last_item(self):
return self.handler.items.pop()
@@ -54,20 +56,22 @@ class BaseStructuredTest(unittest.TestCa
"thread": "MainThread",
"source": "test"}
specials = set(["time"])
all_expected.update(expected)
for key, value in all_expected.iteritems():
self.assertEqual(actual[key], value)
- self.assertEquals(set(all_expected.keys()) | specials, set(actual.keys()))
+ self.assertEquals(set(all_expected.keys()) |
+ specials, set(actual.keys()))
class TestStatusHandler(BaseStructuredTest):
+
def setUp(self):
super(TestStatusHandler, self).setUp()
self.handler = handlers.StatusHandler()
self.logger.add_handler(self.handler)
def test_failure_run(self):
self.logger.suite_start([])
self.logger.test_start("test1")
@@ -96,141 +100,147 @@ class TestStatusHandler(BaseStructuredTe
summary = self.handler.summarize()
self.assertIn('ERROR', summary.log_level_counts)
self.assertEqual(1, summary.log_level_counts['ERROR'])
self.assertIn('OK', summary.expected_statuses)
self.assertEqual(2, summary.expected_statuses['OK'])
class TestStructuredLog(BaseStructuredTest):
+
def test_suite_start(self):
self.logger.suite_start(["test"])
self.assert_log_equals({"action": "suite_start",
- "tests":["test"]})
+ "tests": ["test"]})
self.logger.suite_end()
def test_suite_end(self):
self.logger.suite_start([])
self.logger.suite_end()
self.assert_log_equals({"action": "suite_end"})
def test_start(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.assert_log_equals({"action": "test_start",
- "test":"test1"})
+ "test": "test1"})
- self.logger.test_start(("test1", "==", "test1-ref"), path="path/to/test")
+ self.logger.test_start(
+ ("test1", "==", "test1-ref"), path="path/to/test")
self.assert_log_equals({"action": "test_start",
- "test":("test1", "==", "test1-ref"),
+ "test": ("test1", "==", "test1-ref"),
"path": "path/to/test"})
self.logger.suite_end()
def test_start_inprogress(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_start("test1")
self.assert_log_equals({"action": "log",
"message": "test_start for test1 logged while in progress.",
"level": "ERROR"})
self.logger.suite_end()
def test_status(self):
self.logger.suite_start([])
self.logger.test_start("test1")
- self.logger.test_status("test1", "subtest name", "fail", expected="FAIL", message="Test message")
+ self.logger.test_status("test1", "subtest name", "fail", expected="FAIL",
+ message="Test message")
self.assert_log_equals({"action": "test_status",
"subtest": "subtest name",
"status": "FAIL",
"message": "Test message",
- "test":"test1"})
+ "test": "test1"})
self.logger.test_end("test1", "OK")
self.logger.suite_end()
def test_status_1(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_status("test1", "subtest name", "fail")
self.assert_log_equals({"action": "test_status",
"subtest": "subtest name",
"status": "FAIL",
"expected": "PASS",
- "test":"test1"})
+ "test": "test1"})
self.logger.test_end("test1", "OK")
self.logger.suite_end()
def test_status_2(self):
- self.assertRaises(ValueError, self.logger.test_status, "test1", "subtest name", "XXXUNKNOWNXXX")
+ self.assertRaises(ValueError, self.logger.test_status, "test1", "subtest name",
+ "XXXUNKNOWNXXX")
def test_status_extra(self):
self.logger.suite_start([])
self.logger.test_start("test1")
- self.logger.test_status("test1", "subtest name", "FAIL", expected="PASS", extra={"data": 42})
+ self.logger.test_status("test1", "subtest name", "FAIL", expected="PASS",
+ extra={"data": 42})
self.assert_log_equals({"action": "test_status",
"subtest": "subtest name",
"status": "FAIL",
"expected": "PASS",
"test": "test1",
- "extra": {"data":42}
- })
+ "extra": {"data": 42}})
self.logger.test_end("test1", "OK")
self.logger.suite_end()
def test_status_stack(self):
self.logger.suite_start([])
self.logger.test_start("test1")
- self.logger.test_status("test1", "subtest name", "FAIL", expected="PASS", stack="many\nlines\nof\nstack")
+ self.logger.test_status("test1", "subtest name", "FAIL", expected="PASS",
+ stack="many\nlines\nof\nstack")
self.assert_log_equals({"action": "test_status",
"subtest": "subtest name",
"status": "FAIL",
"expected": "PASS",
"test": "test1",
- "stack": "many\nlines\nof\nstack"
- })
+ "stack": "many\nlines\nof\nstack"})
self.logger.test_end("test1", "OK")
self.logger.suite_end()
def test_status_not_started(self):
self.logger.test_status("test_UNKNOWN", "subtest", "PASS")
self.assertTrue(self.pop_last_item()["message"].startswith(
"test_status for test_UNKNOWN logged while not in progress. Logged with data: {"))
def test_end(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_end("test1", "fail", message="Test message")
self.assert_log_equals({"action": "test_end",
"status": "FAIL",
"expected": "OK",
"message": "Test message",
- "test":"test1"})
+ "test": "test1"})
self.logger.suite_end()
def test_end_1(self):
self.logger.suite_start([])
self.logger.test_start("test1")
- self.logger.test_end("test1", "PASS", expected="PASS", extra={"data":123})
+ self.logger.test_end(
+ "test1", "PASS", expected="PASS", extra={"data": 123})
self.assert_log_equals({"action": "test_end",
"status": "PASS",
"extra": {"data": 123},
- "test":"test1"})
+ "test": "test1"})
self.logger.suite_end()
def test_end_2(self):
- self.assertRaises(ValueError, self.logger.test_end, "test1", "XXXUNKNOWNXXX")
+ self.assertRaises(ValueError, self.logger.test_end,
+ "test1", "XXXUNKNOWNXXX")
def test_end_stack(self):
self.logger.suite_start([])
self.logger.test_start("test1")
- self.logger.test_end("test1", "PASS", expected="PASS", stack="many\nlines\nof\nstack")
+ self.logger.test_end("test1", "PASS", expected="PASS",
+ stack="many\nlines\nof\nstack")
self.assert_log_equals({"action": "test_end",
"status": "PASS",
"test": "test1",
- "stack": "many\nlines\nof\nstack"
- })
+ "stack": "many\nlines\nof\nstack"})
self.logger.suite_end()
def test_end_no_start(self):
self.logger.test_end("test1", "PASS", expected="PASS")
self.assertTrue(self.pop_last_item()["message"].startswith(
"test_end for test1 logged while not in progress. Logged with data: {"))
self.logger.suite_end()
@@ -379,28 +389,33 @@ class TestStructuredLog(BaseStructuredTe
file_like.write("line 4\r\n")
self.assert_log_equals({"action": "log",
"level": "INFO",
"message": "line 4"})
class TestTypeConversions(BaseStructuredTest):
+
def test_raw(self):
- self.logger.log_raw({"action":"suite_start", "tests":[1], "time": "1234"})
+ self.logger.log_raw({"action": "suite_start",
+ "tests": [1],
+ "time": "1234"})
self.assert_log_equals({"action": "suite_start",
- "tests":["1"],
+ "tests": ["1"],
"time": 1234})
self.logger.suite_end()
def test_tuple(self):
self.logger.suite_start([])
- self.logger.test_start(("\xf0\x90\x8d\x84\xf0\x90\x8c\xb4\xf0\x90\x8d\x83\xf0\x90\x8d\x84", 42, u"\u16a4"))
+ self.logger.test_start(("\xf0\x90\x8d\x84\xf0\x90\x8c\xb4\xf0\x90\x8d\x83\xf0\x90\x8d\x84",
+ 42, u"\u16a4"))
self.assert_log_equals({"action": "test_start",
- "test": (u'\U00010344\U00010334\U00010343\U00010344', u"42", u"\u16a4")})
+ "test": (u'\U00010344\U00010334\U00010343\U00010344',
+ u"42", u"\u16a4")})
self.logger.suite_end()
def test_non_string_messages(self):
self.logger.suite_start([])
self.logger.info(1)
self.assert_log_equals({"action": "log",
"message": "1",
"level": "INFO"})
@@ -428,40 +443,44 @@ class TestTypeConversions(BaseStructured
"message": "test",
"level": "INFO"})
self.logger.suite_start([], {})
self.assert_log_equals({"action": "suite_start",
"tests": [],
"run_info": {}})
self.logger.test_start(test="test1")
- self.logger.test_status("subtest1", "FAIL", test="test1", status="PASS")
+ self.logger.test_status(
+ "subtest1", "FAIL", test="test1", status="PASS")
self.assert_log_equals({"action": "test_status",
"test": "test1",
"subtest": "subtest1",
"status": "PASS",
"expected": "FAIL"})
self.logger.process_output(123, "data", "test")
self.assert_log_equals({"action": "process_output",
"process": "123",
"command": "test",
"data": "data"})
self.assertRaises(TypeError, self.logger.test_status, subtest="subtest2",
status="FAIL", expected="PASS")
self.assertRaises(TypeError, self.logger.test_status, "test1", "subtest1",
"PASS", "FAIL", "message", "stack", {}, "unexpected")
- self.assertRaises(TypeError, self.logger.test_status, "test1", test="test2")
+ self.assertRaises(TypeError, self.logger.test_status,
+ "test1", test="test2")
self.logger.suite_end()
class TestComponentFilter(BaseStructuredTest):
+
def test_filter_component(self):
component_logger = structuredlog.StructuredLogger(self.logger.name,
"test_component")
- component_logger.component_filter = handlers.LogLevelFilter(lambda x:x, "info")
+ component_logger.component_filter = handlers.LogLevelFilter(
+ lambda x: x, "info")
self.logger.debug("Test")
self.assertFalse(self.handler.empty)
self.assert_log_equals({"action": "log",
"level": "DEBUG",
"message": "Test"})
self.assertTrue(self.handler.empty)
@@ -489,17 +508,18 @@ class TestComponentFilter(BaseStructured
"test_component")
self.logger.debug("Test")
self.assertFalse(self.handler.empty)
self.assert_log_equals({"action": "log",
"level": "DEBUG",
"message": "Test"})
- self.logger.component_filter = handlers.LogLevelFilter(lambda x:x, "info")
+ self.logger.component_filter = handlers.LogLevelFilter(
+ lambda x: x, "info")
self.logger.debug("Test 1")
self.assertTrue(self.handler.empty)
component_logger.debug("Test 2")
self.assertFalse(self.handler.empty)
self.assert_log_equals({"action": "log",
"level": "DEBUG",
@@ -527,29 +547,31 @@ class TestComponentFilter(BaseStructured
"message": "FILTERED! Test"})
self.logger.component_filter = None
class FormatterTest(unittest.TestCase):
def setUp(self):
self.position = 0
- self.logger = structuredlog.StructuredLogger("test_%s" % type(self).__name__)
+ self.logger = structuredlog.StructuredLogger(
+ "test_%s" % type(self).__name__)
self.output_file = StringIO.StringIO()
self.handler = handlers.StreamHandler(
self.output_file, self.get_formatter())
self.logger.add_handler(self.handler)
def set_position(self, pos=None):
if pos is None:
pos = self.output_file.tell()
self.position = pos
def get_formatter(self):
- raise NotImplementedError("FormatterTest subclasses must implement get_formatter")
+ raise NotImplementedError(
+ "FormatterTest subclasses must implement get_formatter")
@property
def loglines(self):
self.output_file.seek(self.position)
return [line.rstrip() for line in self.output_file.readlines()]
class TestHTMLFormatter(FormatterTest):
@@ -639,75 +661,80 @@ class TestTBPLFormatter(FormatterTest):
self.logger.process_exit(1234, 0)
self.assertIn('TEST-INFO | 1234: exit 0', self.loglines)
@unittest.skipUnless(os.name == 'posix', 'posix only')
def test_process_exit_with_sig(self):
# subprocess return code is negative when process
# has been killed by signal on posix.
self.logger.process_exit(1234, -signal.SIGTERM)
- self.assertIn,('TEST-INFO | 1234: killed by SIGTERM', self.loglines)
+ self.assertIn, ('TEST-INFO | 1234: killed by SIGTERM', self.loglines)
+
class TestMachFormatter(FormatterTest):
def get_formatter(self):
return formatters.MachFormatter(disable_colors=True)
def test_summary(self):
self.logger.suite_start([])
- #Some tests that pass
+ # Some tests that pass
self.logger.test_start("test1")
self.logger.test_end("test1", status="PASS", expected="PASS")
self.logger.test_start("test2")
self.logger.test_end("test2", status="PASS", expected="TIMEOUT")
self.logger.test_start("test3")
self.logger.test_end("test3", status="FAIL", expected="PASS")
self.set_position()
self.logger.suite_end()
self.assertIn("Ran 3 tests", self.loglines)
self.assertIn("Expected results: 1", self.loglines)
- self.assertIn("Unexpected results: 2 (FAIL: 1, PASS: 1)", self.loglines)
+ self.assertIn(
+ "Unexpected results: 2 (FAIL: 1, PASS: 1)", self.loglines)
self.assertNotIn("test1", self.loglines)
self.assertIn("PASS expected TIMEOUT test2", self.loglines)
self.assertIn("FAIL test3", self.loglines)
def test_summary_subtests(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_status("test1", "subtest1", status="PASS")
self.logger.test_status("test1", "subtest2", status="FAIL")
self.logger.test_end("test1", status="OK", expected="OK")
self.logger.test_start("test2")
- self.logger.test_status("test2", "subtest1", status="TIMEOUT", expected="PASS")
+ self.logger.test_status("test2", "subtest1",
+ status="TIMEOUT", expected="PASS")
self.logger.test_end("test2", status="TIMEOUT", expected="OK")
self.set_position()
self.logger.suite_end()
self.assertIn("Ran 5 tests (2 parents, 3 subtests)", self.loglines)
self.assertIn("Expected results: 2", self.loglines)
- self.assertIn("Unexpected results: 3 (FAIL: 1, TIMEOUT: 2)", self.loglines)
+ self.assertIn(
+ "Unexpected results: 3 (FAIL: 1, TIMEOUT: 2)", self.loglines)
def test_summary_ok(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_status("test1", "subtest1", status="PASS")
self.logger.test_status("test1", "subtest2", status="PASS")
self.logger.test_end("test1", status="OK", expected="OK")
self.logger.test_start("test2")
- self.logger.test_status("test2", "subtest1", status="PASS", expected="PASS")
+ self.logger.test_status("test2", "subtest1",
+ status="PASS", expected="PASS")
self.logger.test_end("test2", status="OK", expected="OK")
self.set_position()
self.logger.suite_end()
self.assertIn("OK", self.loglines)
self.assertIn("Expected results: 5", self.loglines)
self.assertIn("Unexpected results: 0", self.loglines)
@@ -738,30 +765,32 @@ class TestXUnitFormatter(FormatterTest):
return formatters.XUnitFormatter()
def log_as_xml(self):
return ET.fromstring('\n'.join(self.loglines))
def test_stacktrace_is_present(self):
self.logger.suite_start([])
self.logger.test_start("test1")
- self.logger.test_end("test1", "fail", message="Test message", stack='this\nis\na\nstack')
+ self.logger.test_end(
+ "test1", "fail", message="Test message", stack='this\nis\na\nstack')
self.logger.suite_end()
root = self.log_as_xml()
self.assertIn('this\nis\na\nstack', root.find('testcase/failure').text)
def test_failure_message(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_end("test1", "fail", message="Test message")
self.logger.suite_end()
root = self.log_as_xml()
- self.assertEquals('Expected OK, got FAIL', root.find('testcase/failure').get('message'))
+ self.assertEquals('Expected OK, got FAIL', root.find(
+ 'testcase/failure').get('message'))
def test_suite_attrs(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_end("test1", "ok", message="Test message")
self.logger.suite_end()
root = self.log_as_xml()
@@ -771,17 +800,18 @@ class TestXUnitFormatter(FormatterTest):
self.assertEqual(root.get('tests'), '1')
self.assertEqual(root.get('time'), '0.00')
def test_time_is_not_rounded(self):
# call formatter directly, it is easier here
formatter = self.get_formatter()
formatter.suite_start(dict(time=55000))
formatter.test_start(dict(time=55100))
- formatter.test_end(dict(time=55558, test='id', message='message', status='PASS'))
+ formatter.test_end(
+ dict(time=55558, test='id', message='message', status='PASS'))
xml_string = formatter.suite_end(dict(time=55559))
root = ET.fromstring(xml_string)
self.assertEqual(root.get('time'), '0.56')
self.assertEqual(root.find('testcase').get('time'), '0.46')
class TestCommandline(unittest.TestCase):
@@ -846,62 +876,67 @@ class TestCommandline(unittest.TestCase)
# The debug level is not logged by default.
self.assertEqual(["INFO message",
"ERROR message"],
self.loglines)
def test_logging_errorlevel(self):
parser = argparse.ArgumentParser()
commandline.add_logging_group(parser)
- args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=error"])
+ args = parser.parse_args(
+ ["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=error"])
logger = commandline.setup_logging("test_fmtopts", args, {})
logger.info("INFO message")
logger.debug("DEBUG message")
logger.error("ERROR message")
# Only the error level and above were requested.
self.assertEqual(["ERROR message"],
self.loglines)
def test_logging_debuglevel(self):
parser = argparse.ArgumentParser()
commandline.add_logging_group(parser)
- args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=debug"])
+ args = parser.parse_args(
+ ["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=debug"])
logger = commandline.setup_logging("test_fmtopts", args, {})
logger.info("INFO message")
logger.debug("DEBUG message")
logger.error("ERROR message")
# Requesting a lower log level than default works as expected.
self.assertEqual(["INFO message",
"DEBUG message",
"ERROR message"],
self.loglines)
def test_unused_options(self):
parser = argparse.ArgumentParser()
commandline.add_logging_group(parser)
args = parser.parse_args(["--log-tbpl-level=error"])
- self.assertRaises(ValueError, commandline.setup_logging, "test_fmtopts", args, {})
+ self.assertRaises(ValueError, commandline.setup_logging,
+ "test_fmtopts", args, {})
+
class TestBuffer(BaseStructuredTest):
def assert_log_equals(self, expected, actual=None):
if actual is None:
actual = self.pop_last_item()
all_expected = {"pid": os.getpid(),
"thread": "MainThread",
"source": "testBuffer"}
specials = set(["time"])
all_expected.update(expected)
for key, value in all_expected.iteritems():
self.assertEqual(actual[key], value)
- self.assertEquals(set(all_expected.keys()) | specials, set(actual.keys()))
+ self.assertEquals(set(all_expected.keys()) |
+ specials, set(actual.keys()))
def setUp(self):
self.logger = structuredlog.StructuredLogger("testBuffer")
self.handler = handlers.BufferHandler(TestHandler(), message_limit=4)
self.logger.add_handler(self.handler)
def tearDown(self):
self.logger.remove_handler(self.handler)
@@ -929,17 +964,16 @@ class TestBuffer(BaseStructuredTest):
self.logger.test_status("test1", "sub7", status="PASS")
self.logger.test_end("test1", status="OK")
self.logger.send_message("buffer", "clear")
self.assert_log_equals({"action": "test_end",
"test": "test1",
"status": "OK"})
self.logger.suite_end()
-
def test_buffer_size(self):
self.logger.suite_start([])
self.logger.test_start("test1")
self.logger.test_status("test1", "sub1", status="PASS")
self.logger.test_status("test1", "sub2", status="PASS")
self.logger.test_status("test1", "sub3", status="PASS")
self.logger.test_status("test1", "sub4", status="PASS")
self.logger.test_status("test1", "sub5", status="PASS")
@@ -976,16 +1010,17 @@ class TestBuffer(BaseStructuredTest):
"test": "test1",
"status": "PASS",
"subtest": "sub5"})
self.assert_log_equals({"action": "suite_start",
"tests": []})
class TestReader(unittest.TestCase):
+
def to_file_like(self, obj):
data_str = "\n".join(json.dumps(item) for item in obj)
return StringIO.StringIO(data_str)
def test_read(self):
data = [{"action": "action_0", "data": "data_0"},
{"action": "action_1", "data": "data_1"}]
@@ -1011,39 +1046,41 @@ class TestReader(unittest.TestCase):
list(res_iter))
def test_each_log(self):
data = [{"action": "action_0", "data": "data_0"},
{"action": "action_1", "data": "data_1"}]
f = self.to_file_like(data)
- count = {"action_0":0,
- "action_1":0}
+ count = {"action_0": 0,
+ "action_1": 0}
def f_action_0(item):
count[item["action"]] += 1
def f_action_1(item):
count[item["action"]] += 2
reader.each_log(reader.read(f),
{"action_0": f_action_0,
"action_1": f_action_1})
- self.assertEquals({"action_0":1, "action_1":2}, count)
+ self.assertEquals({"action_0": 1, "action_1": 2}, count)
def test_handler(self):
data = [{"action": "action_0", "data": "data_0"},
{"action": "action_1", "data": "data_1"}]
f = self.to_file_like(data)
test = self
+
class ReaderTestHandler(reader.LogHandler):
+
def __init__(self):
self.action_0_count = 0
self.action_1_count = 0
def action_0(self, item):
test.assertEquals(item["action"], "action_0")
self.action_0_count += 1
--- a/testing/mozbase/moznetwork/moznetwork/__init__.py
+++ b/testing/mozbase/moznetwork/moznetwork/__init__.py
@@ -16,9 +16,11 @@ Example usage:
ip = moznetwork.get_ip()
print "The external IP of your machine is '%s'" % ip
except moznetwork.NetworkError:
print "Unable to determine IP address of machine"
raise
"""
-from moznetwork import *
+from moznetwork import get_ip
+
+__all__ = ['get_ip']
--- a/testing/mozbase/moznetwork/moznetwork/moznetwork.py
+++ b/testing/mozbase/moznetwork/moznetwork/moznetwork.py
@@ -43,17 +43,17 @@ def _get_interface_list():
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
namestr = names.tostring()
return [(namestr[i:i + 32].split('\0', 1)[0],
- socket.inet_ntoa(namestr[i + 20:i + 24]))
+ socket.inet_ntoa(namestr[i + 20:i + 24]))
for i in range(0, outbytes, struct_size)]
except IOError:
raise NetworkError('Unable to call ioctl with SIOCGIFCONF')
def _proc_matches(args, regex):
"""Helper returns the matches of regex in the output of a process created with
@@ -151,22 +151,22 @@ def get_ip():
def get_lan_ip():
"""Deprecated. Please use get_ip() instead."""
return get_ip()
def cli(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Retrieve IP address')
- structured.commandline.add_logging_group(
+ mozlog.commandline.add_logging_group(
parser,
- include_formatters=structured.commandline.TEXT_FORMATTERS
+ include_formatters=mozlog.commandline.TEXT_FORMATTERS
)
args = parser.parse_args()
- structured.commandline.setup_logging(
+ mozlog.commandline.setup_logging(
'mozversion', args, {'mach': sys.stdout})
_get_logger().info('IP address: %s' % get_ip())
if __name__ == '__main__':
cli()
--- a/testing/mozbase/moznetwork/setup.py
+++ b/testing/mozbase/moznetwork/setup.py
@@ -9,17 +9,17 @@ PACKAGE_VERSION = '0.27'
deps = ['mozinfo',
'mozlog >= 3.0',
]
setup(name='moznetwork',
version=PACKAGE_VERSION,
description="Library of network utilities for use in Mozilla testing",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['moznetwork'],
include_package_data=True,
zip_safe=False,
--- a/testing/mozbase/moznetwork/tests/test.py
+++ b/testing/mozbase/moznetwork/tests/test.py
@@ -22,23 +22,23 @@ def verify_ip_in_list(ip):
verify_ip_in_list('192.168.0.1')
returns True if the `ip` is in the list of IPs in ipconfig/ifconfig
"""
# Regex to match IPv4 addresses.
# 0-255.0-255.0-255.0-255, note order is important here.
regexip = re.compile("((25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)\.){3}"
- "(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)")
+ "(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)")
if mozinfo.isLinux or mozinfo.isMac or mozinfo.isBsd:
# if "/sbin/ifconfig" exist, use it because it may not be in the
# PATH (at least on some linux platforms)
if os.path.isfile('/sbin/ifconfig') and os.access('/sbin/ifconfig',
- os.X_OK):
+ os.X_OK):
args = ['/sbin/ifconfig']
else:
args = ["ifconfig"]
if mozinfo.isWin:
args = ["ipconfig"]
ps = subprocess.Popen(args, stdout=subprocess.PIPE)
--- a/testing/mozbase/mozprocess/mozprocess/__init__.py
+++ b/testing/mozbase/mozprocess/mozprocess/__init__.py
@@ -1,7 +1,8 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
from .processhandler import *
--- a/testing/mozbase/mozprocess/mozprocess/processhandler.py
+++ b/testing/mozbase/mozprocess/mozprocess/processhandler.py
@@ -17,46 +17,52 @@ from datetime import datetime
__all__ = ['ProcessHandlerMixin', 'ProcessHandler', 'LogOutput',
'StoreOutput', 'StreamOutput']
# Set the MOZPROCESS_DEBUG environment variable to 1 to see some debugging output
MOZPROCESS_DEBUG = os.getenv("MOZPROCESS_DEBUG")
# We dont use mozinfo because it is expensive to import, see bug 933558.
isWin = os.name == "nt"
-isPosix = os.name == "posix" # includes MacOS X
+isPosix = os.name == "posix" # includes MacOS X
if isWin:
- import ctypes, ctypes.wintypes, msvcrt
from ctypes import sizeof, addressof, c_ulong, byref, WinError, c_longlong
from . import winprocess
from .qijo import JobObjectAssociateCompletionPortInformation,\
- JOBOBJECT_ASSOCIATE_COMPLETION_PORT, JobObjectExtendedLimitInformation,\
- JOBOBJECT_BASIC_LIMIT_INFORMATION, JOBOBJECT_EXTENDED_LIMIT_INFORMATION, IO_COUNTERS
+ JOBOBJECT_ASSOCIATE_COMPLETION_PORT, JobObjectExtendedLimitInformation,\
+ JOBOBJECT_BASIC_LIMIT_INFORMATION, JOBOBJECT_EXTENDED_LIMIT_INFORMATION, IO_COUNTERS
class ProcessHandlerMixin(object):
"""
A class for launching and manipulating local processes.
- :param cmd: command to run. May be a string or a list. If specified as a list, the first element will be interpreted as the command, and all additional elements will be interpreted as arguments to that command.
- :param args: list of arguments to pass to the command (defaults to None). Must not be set when `cmd` is specified as a list.
+ :param cmd: command to run. May be a string or a list. If specified as a list, the first
+ element will be interpreted as the command, and all additional elements will be interpreted
+ as arguments to that command.
+ :param args: list of arguments to pass to the command (defaults to None). Must not be set when
+ `cmd` is specified as a list.
:param cwd: working directory for command (defaults to None).
:param env: is the environment to use for the process (defaults to os.environ).
- :param ignore_children: causes system to ignore child processes when True, defaults to False (which tracks child processes).
- :param kill_on_timeout: when True, the process will be killed when a timeout is reached. When False, the caller is responsible for killing the process. Failure to do so could cause a call to wait() to hang indefinitely. (Defaults to True.)
+ :param ignore_children: causes system to ignore child processes when True,
+ defaults to False (which tracks child processes).
+ :param kill_on_timeout: when True, the process will be killed when a timeout is reached.
+ When False, the caller is responsible for killing the process.
+ Failure to do so could cause a call to wait() to hang indefinitely. (Defaults to True.)
:param processOutputLine: function or list of functions to be called for
each line of output produced by the process (defaults to an empty
list).
:param processStderrLine: function or list of functions to be called
for each line of error output - stderr - produced by the process
(defaults to an empty list). If this is not specified, stderr lines
will be sent to the *processOutputLine* callbacks.
:param onTimeout: function or list of functions to be called when the process times out.
- :param onFinish: function or list of functions to be called when the process terminates normally without timing out.
+ :param onFinish: function or list of functions to be called when the process terminates
+ normally without timing out.
:param kwargs: additional keyword args to pass directly into Popen.
NOTE: Child processes will be tracked by default. If for any reason
we are unable to track child processes and ignore_children is set to False,
then we will fall back to only tracking the root process. The fallback
will be logged.
"""
@@ -202,25 +208,25 @@ class ProcessHandlerMixin(object):
return self.returncode
""" Private Members of Process class """
if isWin:
# Redefine the execute child so that we can track process groups
def _execute_child(self, *args_tuple):
# workaround for bug 950894
- if sys.hexversion < 0x02070600: # prior to 2.7.6
+ if sys.hexversion < 0x02070600: # prior to 2.7.6
(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo,
creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = args_tuple
to_close = set()
- else: # 2.7.6 and later
+ else: # 2.7.6 and later
(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo,
creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = args_tuple
if not isinstance(args, basestring):
args = subprocess.list2cmdline(args)
@@ -245,33 +251,34 @@ class ProcessHandlerMixin(object):
# Determine if we can create a job or create nested jobs.
can_create_job = winprocess.CanCreateJobObject()
can_nest_jobs = self._can_nest_jobs()
# Ensure we write a warning message if we are falling back
if not (can_create_job or can_nest_jobs) and not self._ignore_children:
# We can't create job objects AND the user wanted us to
# Warn the user about this.
- print >> sys.stderr, "ProcessManager UNABLE to use job objects to manage child processes"
+ print >> sys.stderr, \
+ "ProcessManager UNABLE to use job objects to manage child processes"
# set process creation flags
creationflags |= winprocess.CREATE_SUSPENDED
creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
if can_create_job:
creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
if not (can_create_job or can_nest_jobs):
# Since we've warned, we just log info here to inform you
# of the consequence of setting ignore_children = True
print "ProcessManager NOT managing child processes"
# create the process
hp, ht, pid, tid = winprocess.CreateProcess(
executable, args,
- None, None, # No special security
- 1, # Must inherit handles!
+ None, None, # No special security
+ 1, # Must inherit handles!
creationflags,
winprocess.EnvironmentBlock(env),
cwd, startupinfo)
self._child_created = True
self._handle = hp
self._thread = ht
self.pid = pid
self.tid = tid
@@ -282,60 +289,61 @@ class ProcessHandlerMixin(object):
# the process and any sub-processes
# Create the IO Completion Port
self._io_port = winprocess.CreateIoCompletionPort()
self._job = winprocess.CreateJobObject()
# Now associate the io comp port and the job object
joacp = JOBOBJECT_ASSOCIATE_COMPLETION_PORT(winprocess.COMPKEY_JOBOBJECT,
self._io_port)
- winprocess.SetInformationJobObject(self._job,
- JobObjectAssociateCompletionPortInformation,
- addressof(joacp),
- sizeof(joacp)
- )
+ winprocess.SetInformationJobObject(
+ self._job,
+ JobObjectAssociateCompletionPortInformation,
+ addressof(joacp),
+ sizeof(joacp)
+ )
# Allow subprocesses to break away from us - necessary for
# flash with protected mode
jbli = JOBOBJECT_BASIC_LIMIT_INFORMATION(
- c_longlong(0), # per process time limit (ignored)
- c_longlong(0), # per job user time limit (ignored)
- winprocess.JOB_OBJECT_LIMIT_BREAKAWAY_OK,
- 0, # min working set (ignored)
- 0, # max working set (ignored)
- 0, # active process limit (ignored)
- None, # affinity (ignored)
- 0, # Priority class (ignored)
- 0, # Scheduling class (ignored)
- )
+ c_longlong(0), # per process time limit (ignored)
+ c_longlong(0), # per job user time limit (ignored)
+ winprocess.JOB_OBJECT_LIMIT_BREAKAWAY_OK,
+ 0, # min working set (ignored)
+ 0, # max working set (ignored)
+ 0, # active process limit (ignored)
+ None, # affinity (ignored)
+ 0, # Priority class (ignored)
+ 0, # Scheduling class (ignored)
+ )
iocntr = IO_COUNTERS()
jeli = JOBOBJECT_EXTENDED_LIMIT_INFORMATION(
- jbli, # basic limit info struct
- iocntr, # io_counters (ignored)
- 0, # process mem limit (ignored)
- 0, # job mem limit (ignored)
- 0, # peak process limit (ignored)
- 0) # peak job limit (ignored)
+ jbli, # basic limit info struct
+ iocntr, # io_counters (ignored)
+ 0, # process mem limit (ignored)
+ 0, # job mem limit (ignored)
+ 0, # peak process limit (ignored)
+ 0) # peak job limit (ignored)
winprocess.SetInformationJobObject(self._job,
JobObjectExtendedLimitInformation,
addressof(jeli),
sizeof(jeli)
)
# Assign the job object to the process
winprocess.AssignProcessToJobObject(self._job, int(hp))
# It's overkill, but we use Queue to signal between threads
# because it handles errors more gracefully than event or condition.
self._process_events = Queue()
# Spin up our thread for managing the IO Completion Port
- self._procmgrthread = threading.Thread(target = self._procmgr)
+ self._procmgrthread = threading.Thread(target=self._procmgr)
except:
print >> sys.stderr, """Exception trying to use job objects;
falling back to not using job objects for managing child processes"""
tb = traceback.format_exc()
print >> sys.stderr, tb
# Ensure no dangling handles left behind
self._cleanup_job_io_port()
else:
@@ -345,27 +353,26 @@ falling back to not using job objects fo
if getattr(self, '_procmgrthread', None):
self._procmgrthread.start()
ht.Close()
for i in (p2cread, c2pwrite, errwrite):
if i is not None:
i.Close()
+ # Per:
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/hh448388%28v=vs.85%29.aspx
+ # Nesting jobs came in with windows versions starting with 6.2 according to the table
+ # on this page:
+ # https://msdn.microsoft.com/en-us/library/ms724834%28v=vs.85%29.aspx
def _can_nest_jobs(self):
- # Per:
- # https://msdn.microsoft.com/en-us/library/windows/desktop/hh448388%28v=vs.85%29.aspx
- # Nesting jobs came in with windows versions starting with 6.2 according to the table
- # on this page:
- # https://msdn.microsoft.com/en-us/library/ms724834%28v=vs.85%29.aspx
winver = sys.getwindowsversion()
return (winver.major > 6 or
winver.major == 6 and winver.minor >= 2)
-
# Windows Process Manager - watches the IO Completion Port and
# keeps track of child processes
def _procmgr(self):
if not (self._io_port) or not (self._job):
return
try:
self._poll_iocompletion_port()
@@ -394,20 +401,24 @@ falling back to not using job objects fo
if countdowntokill != 0:
diff = datetime.now() - countdowntokill
# Arbitrarily wait 3 minutes for windows to get its act together
# Windows sometimes takes a small nap between notifying the
# IO Completion port and actually killing the children, and we
# don't want to mistake that situation for the situation of an unexpected
# parent abort (which is what we're looking for here).
if diff.seconds > self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY:
- print >> sys.stderr, "WARNING | IO Completion Port failed to signal process shutdown"
- print >> sys.stderr, "Parent process %s exited with children alive:" % self.pid
- print >> sys.stderr, "PIDS: %s" % ', '.join([str(i) for i in self._spawned_procs])
- print >> sys.stderr, "Attempting to kill them, but no guarantee of success"
+ print >> sys.stderr, \
+ "WARNING | IO Completion Port failed to signal process shutdown"
+ print >> sys.stderr, \
+ "Parent process %s exited with children alive:" % self.pid
+ print >> sys.stderr, \
+ "PIDS: %s" % ', '.join([str(i) for i in self._spawned_procs])
+ print >> sys.stderr, \
+ "Attempting to kill them, but no guarantee of success"
self.kill()
self._process_events.put({self.pid: 'FINISHED'})
break
if not portstatus:
# Check to see what happened
errcode = winprocess.GetLastError()
@@ -415,17 +426,19 @@ falling back to not using job objects fo
# Then something has killed the port, break the loop
print >> sys.stderr, "IO Completion Port unexpectedly closed"
self._process_events.put({self.pid: 'FINISHED'})
break
elif errcode == winprocess.WAIT_TIMEOUT:
# Timeouts are expected, just keep on polling
continue
else:
- print >> sys.stderr, "Error Code %s trying to query IO Completion Port, exiting" % errcode
+ print >> sys.stderr, \
+ "Error Code %s trying to query IO Completion Port, " \
+ "exiting" % errcode
raise WinError(errcode)
break
if compkey.value == winprocess.COMPKEY_TERMINATE.value:
self.debug("compkeyterminate detected")
# Then we're done
break
@@ -481,22 +494,22 @@ falling back to not using job objects fo
if self._job and threadalive and threading.current_thread() != self._procmgrthread:
self.debug("waiting with IO completion port")
# Then we are managing with IO Completion Ports
# wait on a signal so we know when we have seen the last
# process come through.
# We use queues to synchronize between the thread and this
# function because events just didn't have robust enough error
# handling on pre-2.7 versions
- err = None
try:
# timeout is the max amount of time the procmgr thread will wait for
# child processes to shutdown before killing them with extreme prejudice.
- item = self._process_events.get(timeout=self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY +
- self.MAX_PROCESS_KILL_DELAY)
+ item = self._process_events.get(
+ timeout=self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY +
+ self.MAX_PROCESS_KILL_DELAY)
if item[self.pid] == 'FINISHED':
self.debug("received 'FINISHED' from _procmgrthread")
self._process_events.task_done()
except:
traceback.print_exc()
raise OSError("IO Completion Port failed to signal process shutdown")
finally:
if self._handle:
@@ -545,17 +558,18 @@ falling back to not using job objects fo
if getattr(self, '_job') and self._job != winprocess.INVALID_HANDLE_VALUE:
self._job.Close()
self._job = None
else:
# If windows already freed our handle just set it to none
# (saw this intermittently while testing)
self._job = None
- if getattr(self, '_io_port', None) and self._io_port != winprocess.INVALID_HANDLE_VALUE:
+ if getattr(self, '_io_port', None) and \
+ self._io_port != winprocess.INVALID_HANDLE_VALUE:
self._io_port.Close()
self._io_port = None
else:
self._io_port = None
if getattr(self, '_procmgrthread', None):
self._procmgrthread = None
@@ -597,47 +611,49 @@ falling back to not using job objects fo
# returncode attribute
if status > 255:
return status >> 8
return -status
except OSError as e:
if getattr(e, "errno", None) != 10:
# Error 10 is "no child process", which could indicate normal
# close
- print >> sys.stderr, "Encountered error waiting for pid to close: %s" % e
+ print >> sys.stderr, \
+ "Encountered error waiting for pid to close: %s" % e
raise
return self.returncode
else:
# For non-group wait, call base class
subprocess.Popen.wait(self)
return self.returncode
def _cleanup(self):
pass
else:
# An unrecognized platform, we will call the base class for everything
- print >> sys.stderr, "Unrecognized platform, process groups may not be managed properly"
+ print >> sys.stderr, \
+ "Unrecognized platform, process groups may not be managed properly"
def _wait(self):
self.returncode = subprocess.Popen.wait(self)
return self.returncode
def _cleanup(self):
pass
def __init__(self,
cmd,
args=None,
cwd=None,
env=None,
- ignore_children = False,
- kill_on_timeout = True,
+ ignore_children=False,
+ kill_on_timeout=True,
processOutputLine=(),
processStderrLine=(),
onTimeout=(),
onFinish=(),
**kwargs):
self.cmd = cmd
self.args = args
self.cwd = cwd
@@ -673,17 +689,17 @@ falling back to not using job objects fo
self.reader = ProcessReader(stdout_callback=processOutputLine,
stderr_callback=processStderrLine,
finished_callback=onFinish,
timeout_callback=onTimeout)
# It is common for people to pass in the entire array with the cmd and
# the args together since this is how Popen uses it. Allow for that.
if isinstance(self.cmd, list):
- if self.args != None:
+ if self.args is not None:
raise TypeError("cmd and args must not both be lists")
(self.cmd, self.args) = (self.cmd[0], self.cmd[1:])
elif self.args is None:
self.args = []
@property
def timedOut(self):
"""True if the process has timed out."""
@@ -860,24 +876,27 @@ falling back to not using job objects fo
self.proc.detached_pid = new_pid
print >> sys.stdout, \
'Child process with id "%s" has been marked as detached because it is no ' \
'longer in the managed process group. Keeping reference to the process id ' \
'"%s" which is the new child process.' % (self.pid, new_pid)
class CallableList(list):
+
def __call__(self, *args, **kwargs):
for e in self:
e(*args, **kwargs)
def __add__(self, lst):
return CallableList(list.__add__(self, lst))
+
class ProcessReader(object):
+
def __init__(self, stdout_callback=None, stderr_callback=None,
finished_callback=None, timeout_callback=None,
timeout=None, output_timeout=None):
self.stdout_callback = stdout_callback or (lambda line: True)
self.stderr_callback = stderr_callback or (lambda line: True)
self.finished_callback = finished_callback or (lambda: True)
self.timeout_callback = timeout_callback or (lambda: True)
self.timeout = timeout
@@ -928,17 +947,17 @@ class ProcessReader(object):
timeout = self.timeout
if timeout is not None:
timeout += start_time
output_timeout = self.output_timeout
if output_timeout is not None:
output_timeout += start_time
while (stdout_reader and stdout_reader.is_alive()) \
- or (stderr_reader and stderr_reader.is_alive()):
+ or (stderr_reader and stderr_reader.is_alive()):
has_line = True
try:
line, callback = queue.get(True, 0.02)
except Empty:
has_line = False
now = time.time()
if not has_line:
if output_timeout is not None and now > output_timeout:
@@ -964,56 +983,60 @@ class ProcessReader(object):
if not timed_out:
self.finished_callback()
def is_alive(self):
if self.thread:
return self.thread.is_alive()
return False
-### default output handlers
-### these should be callables that take the output line
+# default output handlers
+# these should be callables that take the output line
+
class StoreOutput(object):
"""accumulate stdout"""
def __init__(self):
self.output = []
def __call__(self, line):
self.output.append(line)
+
class StreamOutput(object):
"""pass output to a stream and flush"""
def __init__(self, stream):
self.stream = stream
def __call__(self, line):
try:
self.stream.write(line + '\n')
except UnicodeDecodeError:
# TODO: Workaround for bug #991866 to make sure we can display when
# when normal UTF-8 display is failing
self.stream.write(line.decode('iso8859-1') + '\n')
self.stream.flush()
+
class LogOutput(StreamOutput):
"""pass output to a file"""
def __init__(self, filename):
self.file_obj = open(filename, 'a')
StreamOutput.__init__(self, self.file_obj)
def __del__(self):
if self.file_obj is not None:
self.file_obj.close()
-### front end class with the default handlers
+# front end class with the default handlers
+
class ProcessHandler(ProcessHandlerMixin):
"""
Convenience class for handling processes with default output handlers.
By default, all output is sent to stdout. This can be disabled by setting
the *stream* argument to None.
--- a/testing/mozbase/mozprocess/mozprocess/qijo.py
+++ b/testing/mozbase/mozprocess/mozprocess/qijo.py
@@ -1,120 +1,143 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
-from ctypes import c_void_p, POINTER, sizeof, Structure, windll, WinError, WINFUNCTYPE, addressof, c_size_t, c_ulong
+from ctypes import (
+ c_void_p,
+ POINTER,
+ sizeof,
+ Structure,
+ windll,
+ WinError,
+ WINFUNCTYPE,
+ addressof,
+ c_size_t,
+ c_ulong
+)
+
from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LARGE_INTEGER
LPVOID = c_void_p
LPDWORD = POINTER(DWORD)
SIZE_T = c_size_t
ULONG_PTR = POINTER(c_ulong)
# A ULONGLONG is a 64-bit unsigned integer.
# Thus there are 8 bytes in a ULONGLONG.
# XXX why not import c_ulonglong ?
ULONGLONG = BYTE * 8
+
class IO_COUNTERS(Structure):
# The IO_COUNTERS struct is 6 ULONGLONGs.
# TODO: Replace with non-dummy fields.
_fields_ = [('dummy', ULONGLONG * 6)]
+
class JOBOBJECT_BASIC_ACCOUNTING_INFORMATION(Structure):
_fields_ = [('TotalUserTime', LARGE_INTEGER),
('TotalKernelTime', LARGE_INTEGER),
('ThisPeriodTotalUserTime', LARGE_INTEGER),
('ThisPeriodTotalKernelTime', LARGE_INTEGER),
('TotalPageFaultCount', DWORD),
('TotalProcesses', DWORD),
('ActiveProcesses', DWORD),
('TotalTerminatedProcesses', DWORD)]
+
class JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION(Structure):
_fields_ = [('BasicInfo', JOBOBJECT_BASIC_ACCOUNTING_INFORMATION),
('IoInfo', IO_COUNTERS)]
+
# see http://msdn.microsoft.com/en-us/library/ms684147%28VS.85%29.aspx
class JOBOBJECT_BASIC_LIMIT_INFORMATION(Structure):
_fields_ = [('PerProcessUserTimeLimit', LARGE_INTEGER),
('PerJobUserTimeLimit', LARGE_INTEGER),
('LimitFlags', DWORD),
('MinimumWorkingSetSize', SIZE_T),
('MaximumWorkingSetSize', SIZE_T),
('ActiveProcessLimit', DWORD),
('Affinity', ULONG_PTR),
('PriorityClass', DWORD),
('SchedulingClass', DWORD)
]
+
class JOBOBJECT_ASSOCIATE_COMPLETION_PORT(Structure):
_fields_ = [('CompletionKey', c_ulong),
('CompletionPort', HANDLE)]
+
# see http://msdn.microsoft.com/en-us/library/ms684156%28VS.85%29.aspx
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(Structure):
_fields_ = [('BasicLimitInformation', JOBOBJECT_BASIC_LIMIT_INFORMATION),
('IoInfo', IO_COUNTERS),
('ProcessMemoryLimit', SIZE_T),
('JobMemoryLimit', SIZE_T),
('PeakProcessMemoryUsed', SIZE_T),
('PeakJobMemoryUsed', SIZE_T)]
# These numbers below come from:
# http://msdn.microsoft.com/en-us/library/ms686216%28v=vs.85%29.aspx
JobObjectAssociateCompletionPortInformation = 7
JobObjectBasicAndIoAccountingInformation = 8
JobObjectExtendedLimitInformation = 9
+
class JobObjectInfo(object):
- mapping = { 'JobObjectBasicAndIoAccountingInformation': 8,
- 'JobObjectExtendedLimitInformation': 9,
- 'JobObjectAssociateCompletionPortInformation': 7
- }
+ mapping = {'JobObjectBasicAndIoAccountingInformation': 8,
+ 'JobObjectExtendedLimitInformation': 9,
+ 'JobObjectAssociateCompletionPortInformation': 7}
structures = {
- 7: JOBOBJECT_ASSOCIATE_COMPLETION_PORT,
- 8: JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION,
- 9: JOBOBJECT_EXTENDED_LIMIT_INFORMATION
- }
+ 7: JOBOBJECT_ASSOCIATE_COMPLETION_PORT,
+ 8: JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION,
+ 9: JOBOBJECT_EXTENDED_LIMIT_INFORMATION
+ }
+
def __init__(self, _class):
if isinstance(_class, basestring):
- assert _class in self.mapping, 'Class should be one of %s; you gave %s' % (self.mapping, _class)
+ assert _class in self.mapping, \
+ 'Class should be one of %s; you gave %s' % (self.mapping, _class)
_class = self.mapping[_class]
- assert _class in self.structures, 'Class should be one of %s; you gave %s' % (self.structures, _class)
+ assert _class in self.structures, \
+ 'Class should be one of %s; you gave %s' % (self.structures, _class)
self.code = _class
self.info = self.structures[_class]()
QueryInformationJobObjectProto = WINFUNCTYPE(
BOOL, # Return type
HANDLE, # hJob
DWORD, # JobObjectInfoClass
LPVOID, # lpJobObjectInfo
DWORD, # cbJobObjectInfoLength
LPDWORD # lpReturnLength
- )
+)
QueryInformationJobObjectFlags = (
(1, 'hJob'),
(1, 'JobObjectInfoClass'),
(1, 'lpJobObjectInfo'),
(1, 'cbJobObjectInfoLength'),
(1, 'lpReturnLength', None)
- )
+)
_QueryInformationJobObject = QueryInformationJobObjectProto(
('QueryInformationJobObject', windll.kernel32),
QueryInformationJobObjectFlags
- )
+)
+
class SubscriptableReadOnlyStruct(object):
+
def __init__(self, struct):
self._struct = struct
def _delegate(self, name):
result = getattr(self._struct, name)
if isinstance(result, Structure):
return SubscriptableReadOnlyStruct(result)
return result
@@ -124,19 +147,20 @@ class SubscriptableReadOnlyStruct(object
if fname == name]
if match:
return self._delegate(name)
raise KeyError(name)
def __getattr__(self, name):
return self._delegate(name)
+
def QueryInformationJobObject(hJob, JobObjectInfoClass):
jobinfo = JobObjectInfo(JobObjectInfoClass)
result = _QueryInformationJobObject(
hJob=hJob,
JobObjectInfoClass=jobinfo.code,
lpJobObjectInfo=addressof(jobinfo.info),
cbJobObjectInfoLength=sizeof(jobinfo.info)
- )
+ )
if not result:
raise WinError()
return SubscriptableReadOnlyStruct(jobinfo.info)
--- a/testing/mozbase/mozprocess/mozprocess/winprocess.py
+++ b/testing/mozbase/mozprocess/mozprocess/winprocess.py
@@ -32,78 +32,83 @@
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import, unicode_literals
import sys
+import subprocess
from ctypes import c_void_p, POINTER, sizeof, Structure, windll, WinError, WINFUNCTYPE, c_ulong
from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LPCWSTR, LPWSTR, UINT, WORD
from .qijo import QueryInformationJobObject
LPVOID = c_void_p
LPBYTE = POINTER(BYTE)
LPDWORD = POINTER(DWORD)
LPBOOL = POINTER(BOOL)
LPULONG = POINTER(c_ulong)
+
def ErrCheckBool(result, func, args):
"""errcheck function for Windows functions that return a BOOL True
on success"""
if not result:
raise WinError()
return args
# AutoHANDLE
class AutoHANDLE(HANDLE):
"""Subclass of HANDLE which will call CloseHandle() on deletion."""
-
+
CloseHandleProto = WINFUNCTYPE(BOOL, HANDLE)
CloseHandle = CloseHandleProto(("CloseHandle", windll.kernel32))
CloseHandle.errcheck = ErrCheckBool
-
+
def Close(self):
if self.value and self.value != HANDLE(-1).value:
self.CloseHandle(self)
self.value = 0
-
+
def __del__(self):
self.Close()
def __int__(self):
return self.value
+
def ErrCheckHandle(result, func, args):
"""errcheck function for Windows functions that return a HANDLE."""
if not result:
raise WinError()
return AutoHANDLE(result)
# PROCESS_INFORMATION structure
+
class PROCESS_INFORMATION(Structure):
_fields_ = [("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessID", DWORD),
("dwThreadID", DWORD)]
def __init__(self):
Structure.__init__(self)
-
+
self.cb = sizeof(self)
LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
# STARTUPINFO structure
+
class STARTUPINFO(Structure):
_fields_ = [("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
@@ -116,30 +121,31 @@ class STARTUPINFO(Structure):
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE)
]
LPSTARTUPINFO = POINTER(STARTUPINFO)
-SW_HIDE = 0
+SW_HIDE = 0
-STARTF_USESHOWWINDOW = 0x01
-STARTF_USESIZE = 0x02
-STARTF_USEPOSITION = 0x04
-STARTF_USECOUNTCHARS = 0x08
+STARTF_USESHOWWINDOW = 0x01
+STARTF_USESIZE = 0x02
+STARTF_USEPOSITION = 0x04
+STARTF_USECOUNTCHARS = 0x08
STARTF_USEFILLATTRIBUTE = 0x10
-STARTF_RUNFULLSCREEN = 0x20
-STARTF_FORCEONFEEDBACK = 0x40
+STARTF_RUNFULLSCREEN = 0x20
+STARTF_FORCEONFEEDBACK = 0x40
STARTF_FORCEOFFFEEDBACK = 0x80
-STARTF_USESTDHANDLES = 0x100
+STARTF_USESTDHANDLES = 0x100
# EnvironmentBlock
+
class EnvironmentBlock:
"""An object which can be passed as the lpEnv parameter of CreateProcess.
It is initialized with a dictionary."""
def __init__(self, env):
if not env:
self._as_parameter_ = None
else:
@@ -152,20 +158,19 @@ class EnvironmentBlock:
v = v.decode(fs_encoding, 'replace')
values.append("{}={}".format(k, v))
values.append("")
self._as_parameter_ = LPCWSTR("\0".join(values))
# Error Messages we need to watch for go here
# See: http://msdn.microsoft.com/en-us/library/ms681388%28v=vs.85%29.aspx
ERROR_ABANDONED_WAIT_0 = 735
-
+
# GetLastError()
-GetLastErrorProto = WINFUNCTYPE(DWORD # Return Type
- )
+GetLastErrorProto = WINFUNCTYPE(DWORD) # Return Type
GetLastErrorFlags = ()
GetLastError = GetLastErrorProto(("GetLastError", windll.kernel32), GetLastErrorFlags)
# CreateProcess()
CreateProcessProto = WINFUNCTYPE(BOOL, # Return type
LPCWSTR, # lpApplicationName
LPWSTR, # lpCommandLine
@@ -185,16 +190,17 @@ CreateProcessFlags = ((1, "lpApplication
(1, "lpThreadAttributes", None),
(1, "bInheritHandles", True),
(1, "dwCreationFlags", 0),
(1, "lpEnvironment", None),
(1, "lpCurrentDirectory", None),
(1, "lpStartupInfo"),
(2, "lpProcessInformation"))
+
def ErrCheckCreateProcess(result, func, args):
ErrCheckBool(result, func, args)
# return a tuple (hProcess, hThread, dwProcessID, dwThreadID)
pi = args[9]
return AutoHANDLE(pi.hProcess), AutoHANDLE(pi.hThread), pi.dwProcessID, pi.dwThreadID
CreateProcess = CreateProcessProto(("CreateProcessW", windll.kernel32),
CreateProcessFlags)
@@ -204,94 +210,96 @@ CreateProcess.errcheck = ErrCheckCreateP
CREATE_BREAKAWAY_FROM_JOB = 0x01000000
CREATE_DEFAULT_ERROR_MODE = 0x04000000
CREATE_NEW_CONSOLE = 0x00000010
CREATE_NEW_PROCESS_GROUP = 0x00000200
CREATE_NO_WINDOW = 0x08000000
CREATE_SUSPENDED = 0x00000004
CREATE_UNICODE_ENVIRONMENT = 0x00000400
-# Flags for IOCompletion ports (some of these would probably be defined if
-# we used the win32 extensions for python, but we don't want to do that if we
+# Flags for IOCompletion ports (some of these would probably be defined if
+# we used the win32 extensions for python, but we don't want to do that if we
# can help it.
-INVALID_HANDLE_VALUE = HANDLE(-1) # From winbase.h
+INVALID_HANDLE_VALUE = HANDLE(-1) # From winbase.h
# Self Defined Constants for IOPort <--> Job Object communication
COMPKEY_TERMINATE = c_ulong(0)
COMPKEY_JOBOBJECT = c_ulong(1)
# flags for job limit information
# see http://msdn.microsoft.com/en-us/library/ms684147%28VS.85%29.aspx
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000
# Flags for Job Object Completion Port Message IDs from winnt.h
# See also: http://msdn.microsoft.com/en-us/library/ms684141%28v=vs.85%29.aspx
-JOB_OBJECT_MSG_END_OF_JOB_TIME = 1
-JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2
-JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3
-JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4
-JOB_OBJECT_MSG_NEW_PROCESS = 6
-JOB_OBJECT_MSG_EXIT_PROCESS = 7
-JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8
-JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9
-JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10
+JOB_OBJECT_MSG_END_OF_JOB_TIME = 1
+JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2
+JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3
+JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4
+JOB_OBJECT_MSG_NEW_PROCESS = 6
+JOB_OBJECT_MSG_EXIT_PROCESS = 7
+JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8
+JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9
+JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10
# See winbase.h
DEBUG_ONLY_THIS_PROCESS = 0x00000002
DEBUG_PROCESS = 0x00000001
DETACHED_PROCESS = 0x00000008
-
-# GetQueuedCompletionPortStatus - http://msdn.microsoft.com/en-us/library/aa364986%28v=vs.85%29.aspx
+
+# GetQueuedCompletionPortStatus -
+# http://msdn.microsoft.com/en-us/library/aa364986%28v=vs.85%29.aspx
GetQueuedCompletionStatusProto = WINFUNCTYPE(BOOL, # Return Type
HANDLE, # Completion Port
LPDWORD, # Msg ID
LPULONG, # Completion Key
- LPULONG, # PID Returned from the call (may be null)
+ # PID Returned from the call (may be null)
+ LPULONG,
DWORD) # milliseconds to wait
GetQueuedCompletionStatusFlags = ((1, "CompletionPort", INVALID_HANDLE_VALUE),
(1, "lpNumberOfBytes", None),
(1, "lpCompletionKey", None),
(1, "lpPID", None),
(1, "dwMilliseconds", 0))
GetQueuedCompletionStatus = GetQueuedCompletionStatusProto(("GetQueuedCompletionStatus",
windll.kernel32),
GetQueuedCompletionStatusFlags)
# CreateIOCompletionPort
# Note that the completion key is just a number, not a pointer.
CreateIoCompletionPortProto = WINFUNCTYPE(HANDLE, # Return Type
HANDLE, # File Handle
HANDLE, # Existing Completion Port
c_ulong, # Completion Key
- DWORD # Number of Threads
- )
+ DWORD) # Number of Threads
+
CreateIoCompletionPortFlags = ((1, "FileHandle", INVALID_HANDLE_VALUE),
(1, "ExistingCompletionPort", 0),
(1, "CompletionKey", c_ulong(0)),
(1, "NumberOfConcurrentThreads", 0))
CreateIoCompletionPort = CreateIoCompletionPortProto(("CreateIoCompletionPort",
windll.kernel32),
- CreateIoCompletionPortFlags)
+ CreateIoCompletionPortFlags)
CreateIoCompletionPort.errcheck = ErrCheckHandle
# SetInformationJobObject
SetInformationJobObjectProto = WINFUNCTYPE(BOOL, # Return Type
HANDLE, # Job Handle
DWORD, # Type of Class next param is
LPVOID, # Job Object Class
- DWORD # Job Object Class Length
- )
+ DWORD) # Job Object Class Length
+
SetInformationJobObjectProtoFlags = ((1, "hJob", None),
(1, "JobObjectInfoClass", None),
(1, "lpJobObjectInfo", None),
(1, "cbJobObjectInfoLength", 0))
SetInformationJobObject = SetInformationJobObjectProto(("SetInformationJobObject",
windll.kernel32),
- SetInformationJobObjectProtoFlags)
+ SetInformationJobObjectProtoFlags)
SetInformationJobObject.errcheck = ErrCheckBool
# CreateJobObject()
CreateJobObjectProto = WINFUNCTYPE(HANDLE, # Return type
LPVOID, # lpJobAttributes
LPCWSTR # lpName
)
@@ -333,17 +341,17 @@ try:
LPBOOL # Result
)
IsProcessInJobFlags = ((1, "ProcessHandle"),
(1, "JobHandle", HANDLE(0)),
(2, "Result"))
IsProcessInJob = IsProcessInJobProto(
("IsProcessInJob", windll.kernel32),
IsProcessInJobFlags)
- IsProcessInJob.errcheck = ErrCheckBool
+ IsProcessInJob.errcheck = ErrCheckBool
except AttributeError:
# windows 2k doesn't have this API
def IsProcessInJob(process):
return False
# ResumeThread()
@@ -358,45 +366,45 @@ ResumeThreadProto = WINFUNCTYPE(DWORD,
)
ResumeThreadFlags = ((1, "hThread"),)
ResumeThread = ResumeThreadProto(("ResumeThread", windll.kernel32),
ResumeThreadFlags)
ResumeThread.errcheck = ErrCheckResumeThread
# TerminateProcess()
-TerminateProcessProto = WINFUNCTYPE(BOOL, # Return type
- HANDLE, # hProcess
- UINT # uExitCode
+TerminateProcessProto = WINFUNCTYPE(BOOL, # Return type
+ HANDLE, # hProcess
+ UINT # uExitCode
)
TerminateProcessFlags = ((1, "hProcess"),
(1, "uExitCode", 127))
TerminateProcess = TerminateProcessProto(
("TerminateProcess", windll.kernel32),
TerminateProcessFlags)
TerminateProcess.errcheck = ErrCheckBool
# TerminateJobObject()
-TerminateJobObjectProto = WINFUNCTYPE(BOOL, # Return type
- HANDLE, # hJob
- UINT # uExitCode
+TerminateJobObjectProto = WINFUNCTYPE(BOOL, # Return type
+ HANDLE, # hJob
+ UINT # uExitCode
)
TerminateJobObjectFlags = ((1, "hJob"),
(1, "uExitCode", 127))
TerminateJobObject = TerminateJobObjectProto(
("TerminateJobObject", windll.kernel32),
TerminateJobObjectFlags)
TerminateJobObject.errcheck = ErrCheckBool
# WaitForSingleObject()
-WaitForSingleObjectProto = WINFUNCTYPE(DWORD, # Return type
- HANDLE, # hHandle
- DWORD, # dwMilliseconds
+WaitForSingleObjectProto = WINFUNCTYPE(DWORD, # Return type
+ HANDLE, # hHandle
+ DWORD, # dwMilliseconds
)
WaitForSingleObjectFlags = ((1, "hHandle"),
(1, "dwMilliseconds", -1))
WaitForSingleObject = WaitForSingleObjectProto(
("WaitForSingleObject", windll.kernel32),
WaitForSingleObjectFlags)
# http://msdn.microsoft.com/en-us/library/ms681381%28v=vs.85%29.aspx
@@ -408,60 +416,64 @@ WAIT_ABANDONED = 0x0080
# http://msdn.microsoft.com/en-us/library/ms683189%28VS.85%29.aspx
STILL_ACTIVE = 259
# Used when we terminate a process.
ERROR_CONTROL_C_EXIT = 0x23c
# GetExitCodeProcess()
-GetExitCodeProcessProto = WINFUNCTYPE(BOOL, # Return type
- HANDLE, # hProcess
- LPDWORD, # lpExitCode
+GetExitCodeProcessProto = WINFUNCTYPE(BOOL, # Return type
+ HANDLE, # hProcess
+ LPDWORD, # lpExitCode
)
GetExitCodeProcessFlags = ((1, "hProcess"),
(2, "lpExitCode"))
GetExitCodeProcess = GetExitCodeProcessProto(
("GetExitCodeProcess", windll.kernel32),
GetExitCodeProcessFlags)
GetExitCodeProcess.errcheck = ErrCheckBool
+
def CanCreateJobObject():
currentProc = GetCurrentProcess()
if IsProcessInJob(currentProc):
jobinfo = QueryInformationJobObject(HANDLE(0), 'JobObjectExtendedLimitInformation')
limitflags = jobinfo['BasicLimitInformation']['LimitFlags']
- return bool(limitflags & JOB_OBJECT_LIMIT_BREAKAWAY_OK) or bool(limitflags & JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK)
+ return bool(limitflags & JOB_OBJECT_LIMIT_BREAKAWAY_OK) or \
+ bool(limitflags & JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK)
else:
return True
-### testing functions
+# testing functions
+
def parent():
print 'Starting parent'
currentProc = GetCurrentProcess()
if IsProcessInJob(currentProc):
print >> sys.stderr, "You should not be in a job object to test"
sys.exit(1)
assert CanCreateJobObject()
print 'File: %s' % __file__
command = [sys.executable, __file__, '-child']
print 'Running command: %s' % command
- process = Popen(command)
+ process = subprocess.Popen(command)
process.kill()
code = process.returncode
print 'Child code: %s' % code
assert code == 127
-
+
+
def child():
print 'Starting child'
currentProc = GetCurrentProcess()
injob = IsProcessInJob(currentProc)
print "Is in a job?: %s" % injob
can_create = CanCreateJobObject()
print 'Can create job?: %s' % can_create
- process = Popen('c:\\windows\\notepad.exe')
+ process = subprocess.Popen('c:\\windows\\notepad.exe')
assert process._job
jobinfo = QueryInformationJobObject(process._job, 'JobObjectExtendedLimitInformation')
print 'Job info: %s' % jobinfo
limitflags = jobinfo['BasicLimitInformation']['LimitFlags']
print 'LimitFlags: %s' % limitflags
process.kill()
--- a/testing/mozbase/mozprocess/tests/proclaunch.py
+++ b/testing/mozbase/mozprocess/tests/proclaunch.py
@@ -3,16 +3,17 @@
import argparse
import collections
import ConfigParser
import multiprocessing
import time
ProcessNode = collections.namedtuple('ProcessNode', ['maxtime', 'children'])
+
class ProcessLauncher(object):
""" Create and Launch process trees specified by a '.ini' file
Typical .ini file accepted by this class :
[main]
children=c1, 1*c2, 4*c3
@@ -73,37 +74,38 @@ class ProcessLauncher(object):
- File cannot be parsed because of incorrect specification
:param manifest: Path to the manifest file that contains the
configuration for the process tree to be launched
:verbose: Print the process start and end information.
Genrates a lot of output. Disabled by default.
"""
- self.verbose=verbose
+ self.verbose = verbose
# Children is a dictionary used to store information from the,
# Configuration file in a more usable format.
# Key : string contain the name of child process
# Value : A Named tuple of the form (max_time, (list of child processes of Key))
# Where each child process is a list of type: [count to run, name of child]
self.children = {}
-
cfgparser = ConfigParser.ConfigParser()
if not cfgparser.read(manifest):
raise IOError('The manifest %s could not be found/opened', manifest)
sections = cfgparser.sections()
for section in sections:
# Maxtime is a mandatory option
# ConfigParser.NoOptionError is raised if maxtime does not exist
if '*' in section or ',' in section:
- raise ConfigParser.ParsingError('%s is not a valid section name. Section names cannot contain a \'*\' or \',\'.' % section)
+ raise ConfigParser.ParsingError(
+ "%s is not a valid section name. "
+ "Section names cannot contain a '*' or ','." % section)
m_time = cfgparser.get(section, 'maxtime')
try:
m_time = int(m_time)
except ValueError:
raise ValueError('Expected maxtime to be an integer, specified %s' % m_time)
# No children option implies there are no further children
# Leaving the children option blank is an error.
@@ -121,19 +123,21 @@ class ProcessLauncher(object):
for i, child in enumerate(children):
# No multiplicate factor infront of a process implies 1
if len(child) == 1:
children[i] = [1, child[0]]
else:
children[i][0] = int(child[0])
if children[i][1] not in sections:
- raise ConfigParser.ParsingError('No section corresponding to child %s' % child[1])
+ raise ConfigParser.ParsingError(
+ 'No section corresponding to child %s' % child[1])
except ValueError:
- raise ValueError('Expected process count to be an integer, specified %s' % child[0])
+ raise ValueError(
+ 'Expected process count to be an integer, specified %s' % child[0])
except ConfigParser.NoOptionError:
children = None
pn = ProcessNode(maxtime=m_time,
children=children)
self.children[section] = pn
def run(self):
@@ -150,29 +154,32 @@ class ProcessLauncher(object):
:param proc_name: File name of the manifest as a string.
:param level: Depth of the current process in the tree.
"""
if proc_name not in self.children.keys():
raise IOError("%s is not a valid process" % proc_name)
maxtime = self.children[proc_name].maxtime
if self.verbose:
- print "%sLaunching %s for %d*%d seconds" % (" "*level, proc_name, maxtime, self.UNIT_TIME)
+ print "%sLaunching %s for %d*%d seconds" % (" " * level,
+ proc_name,
+ maxtime,
+ self.UNIT_TIME)
while self.children[proc_name].children:
child = self.children[proc_name].children.pop()
count, child_proc = child
for i in range(count):
- p = multiprocessing.Process(target=self._run, args=(child[1], level+1))
+ p = multiprocessing.Process(target=self._run, args=(child[1], level + 1))
p.start()
self._launch(maxtime)
if self.verbose:
- print "%sFinished %s" % (" "*level, proc_name)
+ print "%sFinished %s" % (" " * level, proc_name)
def _launch(self, running_time):
"""
Create and launch a process and idles for the time specified by
`running_time`
:param running_time: Running time of the process in seconds.
"""
--- a/testing/mozbase/mozprocess/tests/proctest.py
+++ b/testing/mozbase/mozprocess/tests/proctest.py
@@ -15,17 +15,18 @@ class ProcTest(unittest.TestCase):
def determine_status(self, proc, isalive=False, expectedfail=()):
"""
Use to determine if the situation has failed.
Parameters:
proc -- the processhandler instance
isalive -- Use True to indicate we pass if the process exists; however, by default
the test will pass if the process does not exist (isalive == False)
- expectedfail -- Defaults to [], used to indicate a list of fields that are expected to fail
+ expectedfail -- Defaults to [], used to indicate a list of fields
+ that are expected to fail
"""
returncode = proc.proc.returncode
didtimeout = proc.didTimeout
detected = psutil.pid_exists(proc.pid)
output = ''
# ProcessHandler has output when store_output is set to True in the constructor
# (this is the default)
if getattr(proc, 'output'):
@@ -39,11 +40,13 @@ class ProcTest(unittest.TestCase):
self.assertNotEqual(returncode, None, "Detected unexpected None return code of")
if 'didtimeout' in expectedfail:
self.assertTrue(didtimeout, "Detected that process didn't time out")
else:
self.assertTrue(not didtimeout, "Detected that process timed out")
if isalive:
- self.assertTrue(detected, "Detected process is not running, process output: %s" % output)
+ self.assertTrue(detected, "Detected process is not running, "
+ "process output: %s" % output)
else:
- self.assertTrue(not detected, "Detected process is still running, process output: %s" % output)
+ self.assertTrue(not detected, "Detected process is still running, "
+ "process output: %s" % output)
--- a/testing/mozbase/mozprocess/tests/test_mozprocess.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess.py
@@ -8,16 +8,17 @@ import os
import subprocess
import sys
import unittest
import proctest
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
+
def make_proclaunch(aDir):
"""
Makes the proclaunch executable.
Params:
aDir - the directory in which to issue the make commands
Returns:
the path to the proclaunch executable that is generated
"""
@@ -31,28 +32,30 @@ def make_proclaunch(aDir):
# otherwise, if the make fails you may not notice
if os.path.exists(exepath):
os.remove(exepath)
# Ideally make should take care of both calls through recursion, but since it doesn't,
# on windows anyway (to file?), let's just call out both targets explicitly.
for command in [["make", "-C", "iniparser"],
["make"]]:
- process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=aDir)
+ process = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, cwd=aDir)
stdout, stderr = process.communicate()
if process.returncode:
# SomethingBadHappen; print all the things
print "%s: exit %d" % (command, process.returncode)
print "stdout:\n%s" % stdout
print "stderr:\n%s" % stderr
raise subprocess.CalledProcessError(process.returncode, command, stdout)
# ensure the launcher now exists
if not os.path.exists(exepath):
- raise AssertionError("proclaunch executable '%s' does not exist (sys.platform=%s)" % (exepath, sys.platform))
+ raise AssertionError("proclaunch executable '%s' "
+ "does not exist (sys.platform=%s)" % (exepath, sys.platform))
return exepath
class ProcTest(proctest.ProcTest):
# whether to remove created files on exit
cleanup = os.environ.get('CLEANUP', 'true').lower() in ('1', 'true')
@@ -77,17 +80,18 @@ class ProcTest(proctest.ProcTest):
errors = []
for path in files:
if os.path.exists(path):
try:
os.remove(path)
except OSError as e:
errors.append(str(e))
if errors:
- raise OSError("Error(s) encountered tearing down %s.%s:\n%s" % (cls.__module__, cls.__name__, '\n'.join(errors)))
+ raise OSError("Error(s) encountered tearing down "
+ "%s.%s:\n%s" % (cls.__module__, cls.__name__, '\n'.join(errors)))
def test_process_normal_finish(self):
"""Process is started, runs to completion while we wait for it"""
p = processhandler.ProcessHandler([self.proclaunch, "process_normal_finish.ini"],
cwd=here)
p.run()
p.wait()
@@ -135,17 +139,17 @@ class ProcTest(proctest.ProcTest):
args=["1", "2", "3"],
cwd=here)
self.assertEqual(p.commandline, self.proclaunch + ' 1 2 3')
def test_process_wait(self):
"""Process is started runs to completion while we wait indefinitely"""
p = processhandler.ProcessHandler([self.proclaunch,
- "process_waittimeout_10s.ini"],
+ "process_waittimeout_10s.ini"],
cwd=here)
p.run()
p.wait()
self.determine_status(p)
def test_process_timeout(self):
""" Process is started, runs but we time out waiting on it
@@ -158,16 +162,17 @@ class ProcTest(proctest.ProcTest):
self.determine_status(p, False, ['returncode', 'didtimeout'])
def test_process_timeout_no_kill(self):
""" Process is started, runs but we time out waiting on it
to complete. Process should not be killed.
"""
p = None
+
def timeout_handler():
self.assertEqual(p.proc.poll(), None)
p.kill()
p = processhandler.ProcessHandler([self.proclaunch, "process_waittimeout.ini"],
cwd=here,
onTimeout=(timeout_handler,),
kill_on_timeout=False)
p.run(timeout=1)
@@ -177,29 +182,29 @@ class ProcTest(proctest.ProcTest):
self.determine_status(p, False, ['returncode', 'didtimeout'])
def test_process_waittimeout(self):
"""
Process is started, then wait is called and times out.
Process is still running and didn't timeout
"""
p = processhandler.ProcessHandler([self.proclaunch,
- "process_waittimeout_10s.ini"],
+ "process_waittimeout_10s.ini"],
cwd=here)
p.run()
p.wait(timeout=5)
self.determine_status(p, True, ())
def test_process_waitnotimeout(self):
""" Process is started, runs to completion before our wait times out
"""
p = processhandler.ProcessHandler([self.proclaunch,
- "process_waittimeout_10s.ini"],
+ "process_waittimeout_10s.ini"],
cwd=here)
p.run(timeout=30)
p.wait()
self.determine_status(p)
def test_process_kill(self):
"""Process is started, we kill it"""
@@ -211,17 +216,17 @@ class ProcTest(proctest.ProcTest):
self.determine_status(p)
def test_process_output_twice(self):
"""
Process is started, then processOutput is called a second time explicitly
"""
p = processhandler.ProcessHandler([self.proclaunch,
- "process_waittimeout_10s.ini"],
+ "process_waittimeout_10s.ini"],
cwd=here)
p.run()
p.processOutput(timeout=5)
p.wait()
self.determine_status(p, False, ())
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_kill.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_kill.py
@@ -4,62 +4,67 @@ import os
import time
import unittest
import proctest
import signal
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
+
class ProcTestKill(proctest.ProcTest):
""" Class to test various process tree killing scenatios """
def test_kill_before_run(self):
"""Process is not started, and kill() is called"""
p = processhandler.ProcessHandler([self.python, '-V'])
self.assertRaises(RuntimeError, p.kill)
def test_process_kill(self):
"""Process is started, we kill it"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_normal_finish_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_normal_finish_python.ini"],
cwd=here)
p.run()
p.kill()
self.determine_status(p, expectedfail=('returncode',))
def test_process_kill_deep(self):
"""Process is started, we kill it, we use a deep process tree"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_normal_deep_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_normal_deep_python.ini"],
cwd=here)
p.run()
p.kill()
self.determine_status(p, expectedfail=('returncode',))
def test_process_kill_deep_wait(self):
"""Process is started, we use a deep process tree, we let it spawn
for a bit, we kill it"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_normal_deep_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_normal_deep_python.ini"],
cwd=here)
p.run()
# Let the tree spawn a bit, before attempting to kill
time.sleep(3)
p.kill()
self.determine_status(p, expectedfail=('returncode',))
def test_process_kill_broad(self):
"""Process is started, we kill it, we use a broad process tree"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_normal_broad_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_normal_broad_python.ini"],
cwd=here)
p.run()
p.kill()
self.determine_status(p, expectedfail=('returncode',))
@unittest.skipUnless(processhandler.isPosix, "posix only")
def test_process_kill_with_sigterm(self):
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_kill_broad_wait.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_kill_broad_wait.py
@@ -3,27 +3,29 @@
import os
import time
import unittest
import proctest
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
+
class ProcTestKill(proctest.ProcTest):
""" Class to test various process tree killing scenatios """
# This test should ideally be a part of test_mozprocess_kill.py
# It has been separated for the purpose of tempporarily disabling it.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=921632
def test_process_kill_broad_wait(self):
"""Process is started, we use a broad process tree, we let it spawn
for a bit, we kill it"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_normal_broad_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_normal_broad_python.ini"],
cwd=here)
p.run()
# Let the tree spawn a bit, before attempting to kill
time.sleep(3)
p.kill()
self.determine_status(p, expectedfail=('returncode',))
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_misc.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_misc.py
@@ -3,38 +3,39 @@
import os
import unittest
import proctest
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
+
class ProcTestMisc(proctest.ProcTest):
""" Class to test misc operations """
def test_process_output_twice(self):
"""
Process is started, then processOutput is called a second time explicitly
"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_waittimeout_10s_python.ini"],
+ "process_waittimeout_10s_python.ini"],
cwd=here)
p.run()
p.processOutput(timeout=5)
p.wait()
self.determine_status(p, False, ())
def test_unicode_in_environment(self):
env = {
'FOOBAR': 'ʘ',
}
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_python.ini"],
+ "process_normal_finish_python.ini"],
cwd=here, env=env)
# passes if no exceptions are raised
p.run()
p.wait()
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_output.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_output.py
@@ -3,16 +3,17 @@
import io
import os
import unittest
import proctest
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
+
class ProcTestOutput(proctest.ProcTest):
""" Class to test operations related to output handling """
def test_process_output_nonewline(self):
"""
Process is started, outputs data with no newline
"""
p = processhandler.ProcessHandler([self.python, "procnonewline.py"],
@@ -23,29 +24,29 @@ class ProcTestOutput(proctest.ProcTest):
p.wait()
self.determine_status(p, False, ())
def test_stream_process_output(self):
"""
Process output stream does not buffer
"""
- expected = '\n'.join([str(n) for n in range(0,10)])
+ expected = '\n'.join([str(n) for n in range(0, 10)])
stream = io.BytesIO()
buf = io.BufferedRandom(stream)
p = processhandler.ProcessHandler([self.python, "proccountfive.py"],
cwd=here,
stream=buf)
p.run()
p.wait()
for i in range(5, 10):
- stream.write(str(i)+'\n')
+ stream.write(str(i) + '\n')
buf.flush()
self.assertEquals(stream.getvalue().strip(), expected)
# make sure mozprocess doesn't close the stream
# since mozprocess didn't create it
self.assertFalse(buf.closed)
buf.close()
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_params.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_params.py
@@ -2,42 +2,41 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from mozprocess import processhandler
+
class ParamTests(unittest.TestCase):
-
def test_process_outputline_handler(self):
"""Parameter processOutputLine is accepted with a single function"""
def output(line):
print("output " + str(line))
err = None
try:
- processhandler.ProcessHandler(['ls','-l'], processOutputLine=output)
+ processhandler.ProcessHandler(['ls', '-l'], processOutputLine=output)
except (TypeError, AttributeError) as e:
err = e
self.assertFalse(err)
def test_process_outputline_handler_list(self):
"""Parameter processOutputLine is accepted with a list of functions"""
def output(line):
print("output " + str(line))
err = None
try:
- processhandler.ProcessHandler(['ls','-l'], processOutputLine=[output])
+ processhandler.ProcessHandler(['ls', '-l'], processOutputLine=[output])
except (TypeError, AttributeError) as e:
err = e
self.assertFalse(err)
-
def test_process_ontimeout_handler(self):
"""Parameter onTimeout is accepted with a single function"""
def timeout():
print("timeout!")
err = None
try:
processhandler.ProcessHandler(['sleep', '2'], onTimeout=timeout)
except (TypeError, AttributeError) as e:
@@ -78,9 +77,8 @@ class ParamTests(unittest.TestCase):
self.assertFalse(err)
def main():
unittest.main()
if __name__ == '__main__':
main()
-
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_poll.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_poll.py
@@ -14,87 +14,87 @@ here = os.path.dirname(os.path.abspath(_
class ProcTestPoll(proctest.ProcTest):
""" Class to test process poll """
def test_poll_before_run(self):
"""Process is not started, and poll() is called"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_python.ini"],
+ "process_normal_finish_python.ini"],
cwd=here)
self.assertRaises(RuntimeError, p.poll)
def test_poll_while_running(self):
"""Process is started, and poll() is called"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_python.ini"],
+ "process_normal_finish_python.ini"],
cwd=here)
p.run()
returncode = p.poll()
self.assertEqual(returncode, None)
self.determine_status(p, True)
p.kill()
def test_poll_after_kill(self):
"""Process is killed, and poll() is called"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_python.ini"],
+ "process_normal_finish_python.ini"],
cwd=here)
p.run()
returncode = p.kill()
# We killed the process, so the returncode should be < 0
self.assertLess(returncode, 0)
self.assertEqual(returncode, p.poll())
self.determine_status(p)
def test_poll_after_kill_no_process_group(self):
"""Process (no group) is killed, and poll() is called"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_no_process_group.ini"],
+ "process_normal_finish_no_process_group.ini"],
cwd=here,
ignore_children=True
)
p.run()
returncode = p.kill()
# We killed the process, so the returncode should be < 0
self.assertLess(returncode, 0)
self.assertEqual(returncode, p.poll())
self.determine_status(p)
def test_poll_after_double_kill(self):
"""Process is killed twice, and poll() is called"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_python.ini"],
+ "process_normal_finish_python.ini"],
cwd=here)
p.run()
p.kill()
returncode = p.kill()
# We killed the process, so the returncode should be < 0
self.assertLess(returncode, 0)
self.assertEqual(returncode, p.poll())
self.determine_status(p)
def test_poll_after_external_kill(self):
"""Process is killed externally, and poll() is called"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_normal_finish_python.ini"],
+ "process_normal_finish_python.ini"],
cwd=here)
p.run()
os.kill(p.pid, signal.SIGTERM)
returncode = p.wait()
# We killed the process, so the returncode should be < 0
self.assertEqual(returncode, -signal.SIGTERM)
self.assertEqual(returncode, p.poll())
--- a/testing/mozbase/mozprocess/tests/test_mozprocess_wait.py
+++ b/testing/mozbase/mozprocess/tests/test_mozprocess_wait.py
@@ -3,85 +3,87 @@
import os
import unittest
import proctest
import mozinfo
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
+
class ProcTestWait(proctest.ProcTest):
""" Class to test process waits and timeouts """
def test_normal_finish(self):
"""Process is started, runs to completion while we wait for it"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_normal_finish_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_normal_finish_python.ini"],
cwd=here)
p.run()
p.wait()
self.determine_status(p)
def test_wait(self):
"""Process is started runs to completion while we wait indefinitely"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_waittimeout_10s_python.ini"],
+ "process_waittimeout_10s_python.ini"],
cwd=here)
p.run()
p.wait()
self.determine_status(p)
-
def test_timeout(self):
""" Process is started, runs but we time out waiting on it
to complete
"""
- p = processhandler.ProcessHandler([self.python, self.proclaunch, "process_waittimeout_python.ini"],
+ p = processhandler.ProcessHandler([self.python, self.proclaunch,
+ "process_waittimeout_python.ini"],
cwd=here)
p.run(timeout=10)
p.wait()
if mozinfo.isUnix:
# process was killed, so returncode should be negative
self.assertLess(p.proc.returncode, 0)
self.determine_status(p, False, ['returncode', 'didtimeout'])
def test_waittimeout(self):
"""
Process is started, then wait is called and times out.
Process is still running and didn't timeout
"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_waittimeout_10s_python.ini"],
+ "process_waittimeout_10s_python.ini"],
cwd=here)
p.run()
p.wait(timeout=5)
self.determine_status(p, True, ())
def test_waitnotimeout(self):
""" Process is started, runs to completion before our wait times out
"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_waittimeout_10s_python.ini"],
+ "process_waittimeout_10s_python.ini"],
cwd=here)
p.run(timeout=30)
p.wait()
self.determine_status(p)
def test_wait_twice_after_kill(self):
"""Bug 968718: Process is started and stopped. wait() twice afterward."""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
- "process_waittimeout_python.ini"],
+ "process_waittimeout_python.ini"],
cwd=here)
p.run()
p.kill()
returncode1 = p.wait()
returncode2 = p.wait()
self.determine_status(p)
--- a/testing/mozbase/mozprocess/tests/test_process_reader.py
+++ b/testing/mozbase/mozprocess/tests/test_process_reader.py
@@ -1,25 +1,30 @@
import unittest
import subprocess
import sys
from mozprocess.processhandler import ProcessReader, StoreOutput
+
def run_python(str_code, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
cmd = [sys.executable, '-c', str_code]
return subprocess.Popen(cmd, stdout=stdout, stderr=stderr)
+
class TestProcessReader(unittest.TestCase):
+
def setUp(self):
self.out = StoreOutput()
self.err = StoreOutput()
self.finished = False
+
def on_finished():
self.finished = True
self.timeout = False
+
def on_timeout():
self.timeout = True
self.reader = ProcessReader(stdout_callback=self.out,
stderr_callback=self.err,
finished_callback=on_finished,
timeout_callback=on_timeout)
def test_stdout_callback(self):
@@ -79,17 +84,18 @@ class TestProcessReader(unittest.TestCas
def test_read_with_strange_eol(self):
proc = run_python('import sys; sys.stdout.write("1\\r\\r\\r\\n")')
self.reader.start(proc)
self.reader.thread.join()
self.assertEqual(self.out.output, ['1'])
def test_mixed_stdout_stderr(self):
- proc = run_python('import sys; sys.stderr.write("hello world\\n"); print 1; print 2', stderr=subprocess.STDOUT)
+ proc = run_python('import sys; sys.stderr.write("hello world\\n"); print 1; print 2',
+ stderr=subprocess.STDOUT)
self.reader.start(proc)
self.reader.thread.join()
self.assertEqual(sorted(self.out.output), sorted(['1', '2', 'hello world']))
self.assertEqual(self.err.output, [])
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozprofile/mozprofile/__init__.py
+++ b/testing/mozbase/mozprofile/mozprofile/__init__.py
@@ -1,8 +1,9 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
To use mozprofile as an API you can import mozprofile.profile_ and/or the AddonManager_.
``mozprofile.profile`` features a generic ``Profile`` class. In addition,
--- a/testing/mozbase/mozprofile/mozprofile/addons.py
+++ b/testing/mozbase/mozprofile/mozprofile/addons.py
@@ -8,17 +8,18 @@ import sys
import tempfile
import urllib2
import zipfile
from xml.dom import minidom
import mozfile
from mozlog.unstructured import getLogger
-# Needed for the AMO's rest API - https://developer.mozilla.org/en/addons.mozilla.org_%28AMO%29_API_Developers%27_Guide/The_generic_AMO_API
+# Needed for the AMO's rest API -
+# https://developer.mozilla.org/en/addons.mozilla.org_%28AMO%29_API_Developers%27_Guide/The_generic_AMO_API
AMO_API_VERSION = "1.5"
# Logger for 'mozprofile.addons' module
module_logger = getLogger(__name__)
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
@@ -198,34 +199,37 @@ class AddonManager(object):
for addon in addons:
if '://' in addon['path'] or os.path.exists(addon['path']):
self.install_from_path(addon['path'])
continue
# No path specified, try to grab it off AMO
locale = addon.get('amo_locale', 'en_US')
- query = 'https://services.addons.mozilla.org/' + locale + '/firefox/api/' + AMO_API_VERSION + '/'
+ query = 'https://services.addons.mozilla.org/' + locale + '/firefox/api/' \
+ + AMO_API_VERSION + '/'
if 'amo_id' in addon:
- query += 'addon/' + addon['amo_id'] # this query grabs information on the addon base on its id
+ # this query grabs information on the addon base on its id
+ query += 'addon/' + addon['amo_id']
else:
- query += 'search/' + addon['name'] + '/default/1' # this query grabs information on the first addon returned from a search
+ # this query grabs information on the first addon returned from a search
+ query += 'search/' + addon['name'] + '/default/1'
install_path = AddonManager.get_amo_install_path(query)
self.install_from_path(install_path)
self.installed_manifests.append(filepath)
@classmethod
def get_amo_install_path(self, query):
"""
Get the addon xpi install path for the specified AMO query.
:param query: query-documentation_
- .. _query-documentation: https://developer.mozilla.org/en/addons.mozilla.org_%28AMO%29_API_Developers%27_Guide/The_generic_AMO_API
+ .. _query-documentation: https://developer.mozilla.org/en/addons.mozilla.org_%28AMO%29_API_Developers%27_Guide/The_generic_AMO_API # noqa
"""
response = urllib2.urlopen(query)
dom = minidom.parseString(response.read())
for node in dom.getElementsByTagName('install')[0].childNodes:
if node.nodeType == node.TEXT_NODE:
return node.data
@classmethod
@@ -282,17 +286,17 @@ class AddonManager(object):
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
- raise AddonFormatError, str(e), sys.exc_info()[2]
+ raise AddonFormatError(str(e)), None, sys.exc_info()[2]
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
@@ -303,17 +307,17 @@ class AddonManager(object):
if entry in details.keys():
details.update({entry: value})
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
except Exception as e:
- raise AddonFormatError, str(e), sys.exc_info()[2]
+ raise AddonFormatError(str(e)), None, sys.exc_info()[2]
# turn unpack into a true/false value
if isinstance(details['unpack'], basestring):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
--- a/testing/mozbase/mozprofile/mozprofile/cli.py
+++ b/testing/mozbase/mozprofile/mozprofile/cli.py
@@ -2,57 +2,63 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Creates and/or modifies a Firefox profile.
The profile can be modified by passing in addons to install or preferences to set.
-If no profile is specified, a new profile is created and the path of the resulting profile is printed.
+If no profile is specified, a new profile is created and the path of the
+resulting profile is printed.
"""
import sys
from optparse import OptionParser
from prefs import Preferences
from profile import FirefoxProfile
from profile import Profile
__all__ = ['MozProfileCLI', 'cli']
+
class MozProfileCLI(object):
"""The Command Line Interface for ``mozprofile``."""
module = 'mozprofile'
profile_class = Profile
def __init__(self, args=sys.argv[1:], add_options=None):
self.parser = OptionParser(description=__doc__)
self.add_options(self.parser)
if add_options:
add_options(self.parser)
(self.options, self.args) = self.parser.parse_args(args)
def add_options(self, parser):
parser.add_option("-p", "--profile", dest="profile",
- help="The path to the profile to operate on. If none, creates a new profile in temp directory")
+ help="The path to the profile to operate on. "
+ "If none, creates a new profile in temp directory")
parser.add_option("-a", "--addon", dest="addons",
action="append", default=[],
- help="Addon paths to install. Can be a filepath, a directory containing addons, or a url")
+ help="Addon paths to install. Can be a filepath, "
+ "a directory containing addons, or a url")
parser.add_option("--addon-manifests", dest="addon_manifests",
action="append",
help="An addon manifest to install")
parser.add_option("--pref", dest="prefs",
action='append', default=[],
- help="A preference to set. Must be a key-value pair separated by a ':'")
+ help="A preference to set. "
+ "Must be a key-value pair separated by a ':'")
parser.add_option("--preferences", dest="prefs_files",
action='append', default=[],
metavar="FILE",
- help="read preferences from a JSON or INI file. For INI, use 'file.ini:section' to specify a particular section.")
+ help="read preferences from a JSON or INI file. "
+ "For INI, use 'file.ini:section' to specify a particular section.")
def profile_args(self):
"""arguments to instantiate the profile class"""
return dict(profile=self.options.profile,
addons=self.options.addons,
addon_manifests=self.options.addon_manifests,
preferences=self.preferences())
@@ -66,17 +72,18 @@ class MozProfileCLI(object):
for prefs_file in self.options.prefs_files:
prefs.add_file(prefs_file)
# change CLI preferences into 2-tuples
separator = ':'
cli_prefs = []
for pref in self.options.prefs:
if separator not in pref:
- self.parser.error("Preference must be a key-value pair separated by a ':' (You gave: %s)" % pref)
+ self.parser.error("Preference must be a key-value pair separated by "
+ "a ':' (You gave: %s)" % pref)
cli_prefs.append(pref.split(separator, 1))
# string preferences
prefs.add(cli_prefs, cast=True)
return prefs()
def profile(self, restore=False):
--- a/testing/mozbase/mozprofile/mozprofile/diff.py
+++ b/testing/mozbase/mozprofile/mozprofile/diff.py
@@ -7,16 +7,17 @@ diff two profile summaries
import difflib
import profile
import optparse
import os
import sys
__all__ = ['diff', 'diff_profiles']
+
def diff(profile1, profile2, diff_function=difflib.unified_diff):
profiles = (profile1, profile2)
parts = {}
parts_dict = {}
for index in (0, 1):
prof = profiles[index]
@@ -29,32 +30,34 @@ def diff(profile1, profile2, diff_functi
first_missing = [i for i in parts_dict[1]
if i not in parts_dict[0]]
parts[0].extend([(i, '') for i in first_missing])
# diffs
retval = []
for key, value in parts[0]:
other = parts_dict[1].get(key, '')
- value = value.strip(); other = other.strip()
+ value = value.strip()
+ other = other.strip()
if key == 'Files':
# first line of files is the path; we don't care to diff that
value = '\n'.join(value.splitlines()[1:])
if other:
other = '\n'.join(other.splitlines()[1:])
value = value.splitlines()
other = other.splitlines()
section_diff = list(diff_function(value, other, profile1.profile, profile2.profile))
if section_diff:
retval.append((key, '\n'.join(section_diff)))
return retval
+
def diff_profiles(args=sys.argv[1:]):
# parse command line
usage = '%prog [options] profile1 profile2'
parser = optparse.OptionParser(usage=usage, description=__doc__)
options, args = parser.parse_args(args)
if len(args) != 2:
parser.error("Must give two profile paths")
--- a/testing/mozbase/mozprofile/mozprofile/permissions.py
+++ b/testing/mozbase/mozprofile/mozprofile/permissions.py
@@ -2,31 +2,32 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
add permissions to the profile
"""
+import codecs
+import os
+import sqlite3
+import urlparse
+
__all__ = ['MissingPrimaryLocationError', 'MultiplePrimaryLocationsError',
'DEFAULT_PORTS', 'DuplicateLocationError', 'BadPortLocationError',
'LocationsSyntaxError', 'Location', 'ServerLocations',
'Permissions']
-import codecs
-import os
-import sqlite3
-import urlparse
+# http://hg.mozilla.org/mozilla-central/file/b871dfb2186f/build/automation.py.in#l28
+DEFAULT_PORTS = {'http': '8888',
+ 'https': '4443',
+ 'ws': '4443',
+ 'wss': '4443'}
-# http://hg.mozilla.org/mozilla-central/file/b871dfb2186f/build/automation.py.in#l28
-DEFAULT_PORTS = { 'http': '8888',
- 'https': '4443',
- 'ws': '4443',
- 'wss': '4443' }
class LocationError(Exception):
"""Signifies an improperly formed location."""
def __str__(self):
s = "Bad location"
if self.message:
s += ": %s" % self.message
@@ -88,25 +89,26 @@ class Location(object):
self.options = options
try:
int(self.port)
except ValueError:
raise BadPortLocationError(self.port)
def isEqual(self, location):
"""compare scheme://host:port, but ignore options"""
- return len([i for i in self.attrs if getattr(self, i) == getattr(location, i)]) == len(self.attrs)
+ return len([i for i in self.attrs
+ if getattr(self, i) == getattr(location, i)]) == len(self.attrs)
__eq__ = isEqual
def url(self):
return '%s://%s:%s' % (self.scheme, self.host, self.port)
def __str__(self):
- return '%s %s' % (self.url(), ','.join(self.options))
+ return '%s %s' % (self.url(), ','.join(self.options))
class ServerLocations(object):
"""Iterable collection of locations.
Use provided functions to add new locations, rather that manipulating
_locations directly, in order to check for errors and to ensure the
callback is called, if given.
"""
@@ -139,19 +141,20 @@ class ServerLocations(object):
options = options.split(',')
self.add(Location(scheme, host, port, options))
def read(self, filename, check_for_primary=True):
"""
Reads the file and adds all valid locations to the ``self._locations`` array.
:param filename: in the format of server-locations.txt_
- :param check_for_primary: if True, a ``MissingPrimaryLocationError`` exception is raised if no primary is found
+ :param check_for_primary: if True, a ``MissingPrimaryLocationError`` exception is raised
+ if no primary is found
- .. _server-locations.txt: http://dxr.mozilla.org/mozilla-central/source/build/pgo/server-locations.txt
+ .. _server-locations.txt: http://dxr.mozilla.org/mozilla-central/source/build/pgo/server-locations.txt # noqa
The only exception is that the port, if not defined, defaults to 80 or 443.
FIXME: Shouldn't this default to the protocol-appropriate port? Is
there any reason to have defaults at all?
"""
locationFile = codecs.open(filename, "r", "UTF-8")
@@ -221,17 +224,17 @@ class Permissions(object):
elif os.path.exists(locations):
self._locations.read(locations)
def write_db(self, locations):
"""write permissions to the sqlite database"""
# Open database and create table
permDB = sqlite3.connect(os.path.join(self._profileDir, "permissions.sqlite"))
- cursor = permDB.cursor();
+ cursor = permDB.cursor()
# SQL copied from
# http://dxr.mozilla.org/mozilla-central/source/extensions/cookie/nsPermissionManager.cpp
cursor.execute("""CREATE TABLE IF NOT EXISTS moz_hosts (
id INTEGER PRIMARY KEY
,origin TEXT
,type TEXT
,permission INTEGER
@@ -258,17 +261,17 @@ class Permissions(object):
statement = "INSERT INTO moz_hosts values(NULL, ?, ?, ?, 0, 0, 0, 0)"
cursor.execute("PRAGMA user_version=3;")
else:
statement = "INSERT INTO moz_hosts values(NULL, ?, ?, ?, 0, 0)"
cursor.execute("PRAGMA user_version=2;")
for location in locations:
# set the permissions
- permissions = { 'allowXULXBL': 'noxul' not in location.options }
+ permissions = {'allowXULXBL': 'noxul' not in location.options}
for perm, allow in permissions.iteritems():
if allow:
permission_type = 1
else:
permission_type = 2
if using_origin:
# This is a crude approximation of the origin generation logic from
@@ -396,16 +399,17 @@ function FindProxyForURL(url, host)
"""Removed permissions added by mozprofile."""
sqlite_file = os.path.join(self._profileDir, "permissions.sqlite")
if not os.path.exists(sqlite_file):
return
# Open database and create table
permDB = sqlite3.connect(sqlite_file)
- cursor = permDB.cursor();
+ cursor = permDB.cursor()
- # TODO: only delete values that we add, this would require sending in the full permissions object
- cursor.execute("DROP TABLE IF EXISTS moz_hosts");
+ # TODO: only delete values that we add, this would require sending
+ # in the full permissions object
+ cursor.execute("DROP TABLE IF EXISTS moz_hosts")
# Commit and close
permDB.commit()
cursor.close()
--- a/testing/mozbase/mozprofile/mozprofile/prefs.py
+++ b/testing/mozbase/mozprofile/mozprofile/prefs.py
@@ -1,26 +1,26 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
user preferences
"""
-__all__ = ('PreferencesReadError', 'Preferences')
-
import json
import mozfile
import os
-import re
import tokenize
from ConfigParser import SafeConfigParser as ConfigParser
from StringIO import StringIO
+__all__ = ('PreferencesReadError', 'Preferences')
+
+
class PreferencesReadError(Exception):
"""read error for prefrences files"""
class Preferences(object):
"""assembly of preferences from various sources"""
def __init__(self, prefs=None):
@@ -54,40 +54,40 @@ class Preferences(object):
def cast(cls, value):
"""
interpolate a preference from a string
from the command line or from e.g. an .ini file, there is no good way to denote
what type the preference value is, as natively it is a string
- integers will get cast to integers
- true/false will get cast to True/False
- - anything enclosed in single quotes will be treated as a string with the ''s removed from both sides
+ - anything enclosed in single quotes will be treated as a string
+ with the ''s removed from both sides
"""
if not isinstance(value, basestring):
- return value # no op
+ return value # no op
quote = "'"
if value == 'true':
- return True
+ return True
if value == 'false':
return False
try:
return int(value)
except ValueError:
pass
if value.startswith(quote) and value.endswith(quote):
value = value[1:-1]
return value
-
@classmethod
def read(cls, path):
"""read preferences from a file"""
- section = None # for .ini files
+ section = None # for .ini files
basename = os.path.basename(path)
if ':' in basename:
# section of INI file
path, section = path.rsplit(':', 1)
if not os.path.exists(path) and not mozfile.is_url(path):
raise PreferencesReadError("'%s' does not exist" % path)
@@ -106,17 +106,16 @@ class Preferences(object):
try:
return cls.read_ini(path)
except Exception as f:
for exception in e, f:
if isinstance(exception, PreferencesReadError):
raise exception
raise PreferencesReadError("Could not recognize format of %s" % path)
-
@classmethod
def read_ini(cls, path, section=None):
"""read preferences from an .ini file"""
parser = ConfigParser()
parser.optionxform = str
parser.readfp(mozfile.load(path))
@@ -142,54 +141,54 @@ class Preferences(object):
if [i for i in prefs if type(i) != list or len(i) != 2]:
raise PreferencesReadError("Malformed preferences: %s" % path)
values = [i[1] for i in prefs]
elif isinstance(prefs, dict):
values = prefs.values()
else:
raise PreferencesReadError("Malformed preferences: %s" % path)
types = (bool, basestring, int)
- if [i for i in values
- if not [isinstance(i, j) for j in types]]:
+ if [i for i in values if not [isinstance(i, j) for j in types]]:
raise PreferencesReadError("Only bool, string, and int values allowed")
return prefs
@classmethod
def read_prefs(cls, path, pref_setter='user_pref', interpolation=None):
"""
Read preferences from (e.g.) prefs.js
:param path: The path to the preference file to read.
:param pref_setter: The name of the function used to set preferences
in the preference file.
:param interpolation: If provided, a dict that will be passed
to str.format to interpolate preference values.
"""
- marker = '##//' # magical marker
+ marker = '##//' # magical marker
lines = [i.strip() for i in mozfile.load(path).readlines()]
_lines = []
for line in lines:
if not line.startswith(pref_setter):
continue
if '//' in line:
line = line.replace('//', marker)
_lines.append(line)
string = '\n'.join(_lines)
# skip trailing comments
processed_tokens = []
f_obj = StringIO(string)
for token in tokenize.generate_tokens(f_obj.readline):
if token[0] == tokenize.COMMENT:
continue
- processed_tokens.append(token[:2]) # [:2] gets around http://bugs.python.org/issue9974
+ processed_tokens.append(token[:2]) # [:2] gets around http://bugs.python.org/issue9974
string = tokenize.untokenize(processed_tokens)
retval = []
+
def pref(a, b):
if interpolation and isinstance(b, basestring):
b = b.format(**interpolation)
retval.append((a, b))
lines = [i.strip().rstrip(';') for i in string.split('\n') if i.strip()]
_globals = {'retval': retval, 'true': True, 'false': False}
_globals[pref_setter] = pref
@@ -216,17 +215,17 @@ class Preferences(object):
else:
f = _file
if isinstance(prefs, dict):
# order doesn't matter
prefs = prefs.items()
# serialize -> JSON
- _prefs = [(json.dumps(k), json.dumps(v) )
+ _prefs = [(json.dumps(k), json.dumps(v))
for k, v in prefs]
# write the preferences
for _pref in _prefs:
print >> f, pref_string % _pref
# close the file if opened internally
if isinstance(_file, basestring):
--- a/testing/mozbase/mozprofile/mozprofile/profile.py
+++ b/testing/mozbase/mozprofile/mozprofile/profile.py
@@ -1,30 +1,29 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
-__all__ = ['Profile',
- 'FirefoxProfile',
- 'MetroFirefoxProfile',
- 'ThunderbirdProfile']
-
import os
import time
import tempfile
-import types
import uuid
from addons import AddonManager
import mozfile
from permissions import Permissions
from prefs import Preferences
from shutil import copytree
from webapps import WebappCollection
+__all__ = ['Profile',
+ 'FirefoxProfile',
+ 'MetroFirefoxProfile',
+ 'ThunderbirdProfile']
+
class Profile(object):
"""Handles all operations regarding profile.
Creating new profiles, installing add-ons, setting preferences and
handling cleanup.
The files associated with the profile will be removed automatically after
@@ -125,17 +124,17 @@ class Profile(object):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
def __del__(self):
self.cleanup()
- ### cleanup
+ # cleanup
def cleanup(self):
"""Cleanup operations for the profile."""
if self.restore:
# If copies of those class instances exist ensure we correctly
# reset them all (see bug 934484)
self.clean_preferences()
@@ -170,30 +169,30 @@ class Profile(object):
@classmethod
def clone(cls, path_from, path_to=None, **kwargs):
"""Instantiate a temporary profile via cloning
- path: path of the basis to clone
- kwargs: arguments to the profile constructor
"""
if not path_to:
- tempdir = tempfile.mkdtemp() # need an unused temp dir name
- mozfile.remove(tempdir) # copytree requires that dest does not exist
+ tempdir = tempfile.mkdtemp() # need an unused temp dir name
+ mozfile.remove(tempdir) # copytree requires that dest does not exist
path_to = tempdir
copytree(path_from, path_to)
c = cls(path_to, **kwargs)
c.create_new = True # deletes a cloned profile when restore is True
return c
def exists(self):
"""returns whether the profile exists or not"""
return os.path.exists(self.profile)
- ### methods for preferences
+ # methods for preferences
def set_preferences(self, preferences, filename='user.js'):
"""Adds preferences dict to profile preferences"""
# append to the file
prefs_file = os.path.join(self.profile, filename)
f = open(prefs_file, 'a')
@@ -236,102 +235,108 @@ class Profile(object):
"""
pop the last set of preferences added
returns True if popped
"""
path = os.path.join(self.profile, filename)
with file(path) as f:
lines = f.read().splitlines()
+
def last_index(_list, value):
"""
returns the last index of an item;
this should actually be part of python code but it isn't
"""
for index in reversed(range(len(_list))):
if _list[index] == value:
return index
s = last_index(lines, self.delimeters[0])
e = last_index(lines, self.delimeters[1])
# ensure both markers are found
if s is None:
assert e is None, '%s found without %s' % (self.delimeters[1], self.delimeters[0])
- return False # no preferences found
+ return False # no preferences found
elif e is None:
assert s is None, '%s found without %s' % (self.delimeters[0], self.delimeters[1])
# ensure the markers are in the proper order
- assert e > s, '%s found at %s, while %s found at %s' % (self.delimeters[1], e, self.delimeters[0], s)
+ assert e > s, '%s found at %s, while %s found at %s' % (self.delimeters[1], e,
+ self.delimeters[0], s)
# write the prefs
- cleaned_prefs = '\n'.join(lines[:s] + lines[e+1:])
+ cleaned_prefs = '\n'.join(lines[:s] + lines[e + 1:])
with file(path, 'w') as f:
f.write(cleaned_prefs)
return True
- ### methods for introspection
+ # methods for introspection
def summary(self, return_parts=False):
"""
returns string summarizing profile information.
if return_parts is true, return the (Part_name, value) list
of tuples instead of the assembled string
"""
- parts = [('Path', self.profile)] # profile path
+ parts = [('Path', self.profile)] # profile path
# directory tree
parts.append(('Files', '\n%s' % mozfile.tree(self.profile)))
# preferences
for prefs_file in ('user.js', 'prefs.js'):
path = os.path.join(self.profile, prefs_file)
if os.path.exists(path):
# prefs that get their own section
# This is currently only 'network.proxy.autoconfig_url'
# but could be expanded to include others
section_prefs = ['network.proxy.autoconfig_url']
line_length = 80
- line_length_buffer = 10 # buffer for 80 character display: length = 80 - len(key) - len(': ') - line_length_buffer
+ # buffer for 80 character display:
+ # length = 80 - len(key) - len(': ') - line_length_buffer
+ line_length_buffer = 10
line_length_buffer += len(': ')
+
def format_value(key, value):
if key not in section_prefs:
return value
max_length = line_length - len(key) - line_length_buffer
if len(value) > max_length:
value = '%s...' % value[:max_length]
return value
prefs = Preferences.read_prefs(path)
if prefs:
prefs = dict(prefs)
parts.append((prefs_file,
- '\n%s' %('\n'.join(['%s: %s' % (key, format_value(key, prefs[key]))
- for key in sorted(prefs.keys())
- ]))))
+ '\n%s' % ('\n'.join(
+ ['%s: %s' % (key, format_value(key, prefs[key]))
+ for key in sorted(prefs.keys())]))))
# Currently hardcorded to 'network.proxy.autoconfig_url'
# but could be generalized, possibly with a generalized (simple)
# JS-parser
network_proxy_autoconfig = prefs.get('network.proxy.autoconfig_url')
if network_proxy_autoconfig and network_proxy_autoconfig.strip():
network_proxy_autoconfig = network_proxy_autoconfig.strip()
lines = network_proxy_autoconfig.replace(';', ';\n').splitlines()
lines = [line.strip() for line in lines]
origins_string = 'var origins = ['
origins_end = '];'
if origins_string in lines[0]:
start = lines[0].find(origins_string)
- end = lines[0].find(origins_end, start);
+ end = lines[0].find(origins_end, start)
splitline = [lines[0][:start],
- lines[0][start:start+len(origins_string)-1],
+ lines[0][start:start + len(origins_string) - 1],
]
- splitline.extend(lines[0][start+len(origins_string):end].replace(',', ',\n').splitlines())
+ splitline.extend(lines[0][start + len(origins_string):end].replace(
+ ',', ',\n').splitlines())
splitline.append(lines[0][end:])
lines[0:1] = [i.strip() for i in splitline]
parts.append(('Network Proxy Autoconfig, %s' % (prefs_file),
'\n%s' % '\n'.join(lines)))
if return_parts:
return parts
@@ -340,108 +345,110 @@ class Profile(object):
return retval
__str__ = summary
class FirefoxProfile(Profile):
"""Specialized Profile subclass for Firefox"""
- preferences = {# Don't automatically update the application
- 'app.update.enabled' : False,
- # Don't restore the last open set of tabs if the browser has crashed
- 'browser.sessionstore.resume_from_crash': False,
- # Don't check for the default web browser during startup
- 'browser.shell.checkDefaultBrowser' : False,
- # Don't warn on exit when multiple tabs are open
- 'browser.tabs.warnOnClose' : False,
- # Don't warn when exiting the browser
- 'browser.warnOnQuit': False,
- # Don't send Firefox health reports to the production server
- 'datareporting.healthreport.documentServerURI' : 'http://%(server)s/healthreport/',
- # Only install add-ons from the profile and the application scope
- # Also ensure that those are not getting disabled.
- # see: https://developer.mozilla.org/en/Installing_extensions
- 'extensions.enabledScopes' : 5,
- 'extensions.autoDisableScopes' : 10,
- # Don't send the list of installed addons to AMO
- 'extensions.getAddons.cache.enabled' : False,
- # Don't install distribution add-ons from the app folder
- 'extensions.installDistroAddons' : False,
- # Dont' run the add-on compatibility check during start-up
- 'extensions.showMismatchUI' : False,
- # Don't automatically update add-ons
- 'extensions.update.enabled' : False,
- # Don't open a dialog to show available add-on updates
- 'extensions.update.notifyUser' : False,
- # Enable test mode to run multiple tests in parallel
- 'focusmanager.testmode' : True,
- # Enable test mode to not raise an OS level dialog for location sharing
- 'geo.provider.testing' : True,
- # Suppress delay for main action in popup notifications
- 'security.notification_enable_delay' : 0,
- # Suppress automatic safe mode after crashes
- 'toolkit.startup.max_resumed_crashes' : -1,
- # Don't report telemetry information
- 'toolkit.telemetry.enabled' : False,
- # Don't send Telemetry reports to the production server. This is
- # needed as Telemetry sends pings also if FHR upload is enabled.
- 'toolkit.telemetry.server' : 'http://%(server)s/telemetry-dummy/',
- }
+ preferences = { # Don't automatically update the application
+ 'app.update.enabled': False,
+ # Don't restore the last open set of tabs if the browser has crashed
+ 'browser.sessionstore.resume_from_crash': False,
+ # Don't check for the default web browser during startup
+ 'browser.shell.checkDefaultBrowser': False,
+ # Don't warn on exit when multiple tabs are open
+ 'browser.tabs.warnOnClose': False,
+ # Don't warn when exiting the browser
+ 'browser.warnOnQuit': False,
+ # Don't send Firefox health reports to the production server
+ 'datareporting.healthreport.documentServerURI': 'http://%(server)s/healthreport/',
+ # Only install add-ons from the profile and the application scope
+ # Also ensure that those are not getting disabled.
+ # see: https://developer.mozilla.org/en/Installing_extensions
+ 'extensions.enabledScopes': 5,
+ 'extensions.autoDisableScopes': 10,
+ # Don't send the list of installed addons to AMO
+ 'extensions.getAddons.cache.enabled': False,
+ # Don't install distribution add-ons from the app folder
+ 'extensions.installDistroAddons': False,
+ # Dont' run the add-on compatibility check during start-up
+ 'extensions.showMismatchUI': False,
+ # Don't automatically update add-ons
+ 'extensions.update.enabled': False,
+ # Don't open a dialog to show available add-on updates
+ 'extensions.update.notifyUser': False,
+ # Enable test mode to run multiple tests in parallel
+ 'focusmanager.testmode': True,
+ # Enable test mode to not raise an OS level dialog for location sharing
+ 'geo.provider.testing': True,
+ # Suppress delay for main action in popup notifications
+ 'security.notification_enable_delay': 0,
+ # Suppress automatic safe mode after crashes
+ 'toolkit.startup.max_resumed_crashes': -1,
+ # Don't report telemetry information
+ 'toolkit.telemetry.enabled': False,
+ # Don't send Telemetry reports to the production server. This is
+ # needed as Telemetry sends pings also if FHR upload is enabled.
+ 'toolkit.telemetry.server': 'http://%(server)s/telemetry-dummy/',
+ }
+
class MetroFirefoxProfile(Profile):
"""Specialized Profile subclass for Firefox Metro"""
- preferences = {# Don't automatically update the application for desktop and metro build
- 'app.update.enabled' : False,
- 'app.update.metro.enabled' : False,
- # Dismiss first run content overlay
- 'browser.firstrun-content.dismissed' : True,
- # Don't restore the last open set of tabs if the browser has crashed
- 'browser.sessionstore.resume_from_crash': False,
- # Don't check for the default web browser during startup
- 'browser.shell.checkDefaultBrowser' : False,
- # Don't send Firefox health reports to the production server
- 'datareporting.healthreport.documentServerURI' : 'http://%(server)s/healthreport/',
- # Enable extensions
- 'extensions.defaultProviders.enabled' : True,
- # Only install add-ons from the profile and the application scope
- # Also ensure that those are not getting disabled.
- # see: https://developer.mozilla.org/en/Installing_extensions
- 'extensions.enabledScopes' : 5,
- 'extensions.autoDisableScopes' : 10,
- # Don't send the list of installed addons to AMO
- 'extensions.getAddons.cache.enabled' : False,
- # Don't install distribution add-ons from the app folder
- 'extensions.installDistroAddons' : False,
- # Dont' run the add-on compatibility check during start-up
- 'extensions.showMismatchUI' : False,
- # Disable strict compatibility checks to allow add-ons enabled by default
- 'extensions.strictCompatibility' : False,
- # Don't automatically update add-ons
- 'extensions.update.enabled' : False,
- # Don't open a dialog to show available add-on updates
- 'extensions.update.notifyUser' : False,
- # Enable test mode to run multiple tests in parallel
- 'focusmanager.testmode' : True,
- # Suppress delay for main action in popup notifications
- 'security.notification_enable_delay' : 0,
- # Suppress automatic safe mode after crashes
- 'toolkit.startup.max_resumed_crashes' : -1,
- # Don't report telemetry information
- 'toolkit.telemetry.enabled' : False,
- # Don't send Telemetry reports to the production server. This is
- # needed as Telemetry sends pings also if FHR upload is enabled.
- 'toolkit.telemetry.server' : 'http://%(server)s/telemetry-dummy/',
- }
+ preferences = { # Don't automatically update the application for desktop and metro build
+ 'app.update.enabled': False,
+ 'app.update.metro.enabled': False,
+ # Dismiss first run content overlay
+ 'browser.firstrun-content.dismissed': True,
+ # Don't restore the last open set of tabs if the browser has crashed
+ 'browser.sessionstore.resume_from_crash': False,
+ # Don't check for the default web browser during startup
+ 'browser.shell.checkDefaultBrowser': False,
+ # Don't send Firefox health reports to the production server
+ 'datareporting.healthreport.documentServerURI': 'http://%(server)s/healthreport/',
+ # Enable extensions
+ 'extensions.defaultProviders.enabled': True,
+ # Only install add-ons from the profile and the application scope
+ # Also ensure that those are not getting disabled.
+ # see: https://developer.mozilla.org/en/Installing_extensions
+ 'extensions.enabledScopes': 5,
+ 'extensions.autoDisableScopes': 10,
+ # Don't send the list of installed addons to AMO
+ 'extensions.getAddons.cache.enabled': False,
+ # Don't install distribution add-ons from the app folder
+ 'extensions.installDistroAddons': False,
+ # Dont' run the add-on compatibility check during start-up
+ 'extensions.showMismatchUI': False,
+ # Disable strict compatibility checks to allow add-ons enabled by default
+ 'extensions.strictCompatibility': False,
+ # Don't automatically update add-ons
+ 'extensions.update.enabled': False,
+ # Don't open a dialog to show available add-on updates
+ 'extensions.update.notifyUser': False,
+ # Enable test mode to run multiple tests in parallel
+ 'focusmanager.testmode': True,
+ # Suppress delay for main action in popup notifications
+ 'security.notification_enable_delay': 0,
+ # Suppress automatic safe mode after crashes
+ 'toolkit.startup.max_resumed_crashes': -1,
+ # Don't report telemetry information
+ 'toolkit.telemetry.enabled': False,
+ # Don't send Telemetry reports to the production server. This is
+ # needed as Telemetry sends pings also if FHR upload is enabled.
+ 'toolkit.telemetry.server': 'http://%(server)s/telemetry-dummy/',
+ }
+
class ThunderbirdProfile(Profile):
"""Specialized Profile subclass for Thunderbird"""
- preferences = {'extensions.update.enabled' : False,
- 'extensions.update.notifyUser' : False,
- 'browser.shell.checkDefaultBrowser' : False,
- 'browser.tabs.warnOnClose' : False,
+ preferences = {'extensions.update.enabled': False,
+ 'extensions.update.notifyUser': False,
+ 'browser.shell.checkDefaultBrowser': False,
+ 'browser.tabs.warnOnClose': False,
'browser.warnOnQuit': False,
'browser.sessionstore.resume_from_crash': False,
# prevents the 'new e-mail address' wizard on new profile
'mail.provider.enabled': False,
}
--- a/testing/mozbase/mozprofile/mozprofile/view.py
+++ b/testing/mozbase/mozprofile/mozprofile/view.py
@@ -6,16 +6,17 @@ script to view mozilla profiles
import mozprofile
import optparse
import os
import sys
__all__ = ['view_profile']
+
def view_profile(args=sys.argv[1:]):
usage = '%prog [options] profile_path <...>'
parser = optparse.OptionParser(usage=usage, description=__doc__)
options, args = parser.parse_args(args)
if not args:
parser.print_usage()
parser.exit()
--- a/testing/mozbase/mozprofile/mozprofile/webapps.py
+++ b/testing/mozbase/mozprofile/mozprofile/webapps.py
@@ -7,32 +7,32 @@ Handles installing open webapps (https:/
to a profile. A webapp object is a dict that contains some metadata about
the webapp and must at least include a name, description and manifestURL.
Each webapp has a manifest (https://developer.mozilla.org/en-US/docs/Apps/Manifest).
Additionally there is a separate json manifest that keeps track of the installed
webapps, their manifestURLs and their permissions.
"""
-__all__ = ["Webapp", "WebappCollection", "WebappFormatException", "APP_STATUS_NOT_INSTALLED",
- "APP_STATUS_INSTALLED", "APP_STATUS_PRIVILEGED", "APP_STATUS_CERTIFIED"]
-
from string import Template
import json
import os
import shutil
import mozfile
+__all__ = ["Webapp", "WebappCollection", "WebappFormatException", "APP_STATUS_NOT_INSTALLED",
+ "APP_STATUS_INSTALLED", "APP_STATUS_PRIVILEGED", "APP_STATUS_CERTIFIED"]
+
# from http://hg.mozilla.org/mozilla-central/file/add0b94c2c0b/caps/idl/nsIPrincipal.idl#l163
APP_STATUS_NOT_INSTALLED = 0
-APP_STATUS_INSTALLED = 1
-APP_STATUS_PRIVILEGED = 2
-APP_STATUS_CERTIFIED = 3
+APP_STATUS_INSTALLED = 1
+APP_STATUS_PRIVILEGED = 2
+APP_STATUS_CERTIFIED = 3
class WebappFormatException(Exception):
"""thrown for invalid webapp objects"""
class Webapp(dict):
"""A webapp definition"""
@@ -100,17 +100,18 @@ class WebappCollection(object):
"icons": {
}
}
""")
def __init__(self, profile, apps=None, json_template=None, manifest_template=None):
"""
:param profile: the file path to a profile
- :param apps: [optional] a list of webapp objects or file paths to json files describing webapps
+ :param apps: [optional] a list of webapp objects or file paths to json files describing
+ webapps
:param json_template: [optional] string template describing the webapp json format
:param manifest_template: [optional] string template describing the webapp manifest format
"""
if not isinstance(profile, basestring):
raise TypeError("Must provide path to a profile, received '%s'" % type(profile))
self.profile = profile
self.webapps_dir = os.path.join(self.profile, 'webapps')
self.backup_dir = os.path.join(self.profile, '.mozprofile_backup', 'webapps')
--- a/testing/mozbase/mozprofile/setup.py
+++ b/testing/mozbase/mozprofile/setup.py
@@ -37,10 +37,9 @@ setup(name=PACKAGE_NAME,
extras_require={'manifest': ['manifestparser >= 0.6']},
tests_require=['mozhttpd'],
entry_points="""
# -*- Entry points: -*-
[console_scripts]
mozprofile = mozprofile:cli
view-profile = mozprofile:view_profile
diff-profiles = mozprofile:diff_profiles
- """,
- )
+ """, )
--- a/testing/mozbase/mozprofile/tests/addon_stubs.py
+++ b/testing/mozbase/mozprofile/tests/addon_stubs.py
@@ -29,17 +29,17 @@ def generate_addon(addon_id, path=None,
:param addon_id: id of an addon to generate from the stubs dictionary
:param path: path where addon and .xpi should be generated
:param name: name for the addon folder or .xpi file
:param xpi: Flag if an XPI or folder should be generated
Returns the file-path of the addon's .xpi file
"""
- if not addon_id in stubs.keys():
+ if addon_id not in stubs.keys():
raise IOError('Requested addon stub "%s" does not exist' % addon_id)
# Generate directory structure for addon
try:
tmpdir = path or tempfile.mkdtemp()
addon_dir = os.path.join(tmpdir, name or addon_id)
os.mkdir(addon_dir)
except IOError:
--- a/testing/mozbase/mozprofile/tests/addonid.py
+++ b/testing/mozbase/mozprofile/tests/addonid.py
@@ -38,25 +38,26 @@ class AddonIDTest(unittest.TestCase):
def get_test_list(self):
""" This just returns a hardcoded list of install.rdf snippets for testing.
When adding snippets for testing, remember that the id we're looking for
is "winning" (no quotes). So, make sure you have that id in your snippet
if you want it to pass.
"""
tests = [
-"""<?xml version="1.0"?>
+ """<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:id>winning</em:id>
<em:name>MozMill</em:name>
<em:version>2.0a</em:version>
<em:creator>Adam Christian</em:creator>
- <em:description>A testing extension based on the Windmill Testing Framework client source</em:description>
+ <em:description>A testing extension based on the
+ Windmill Testing Framework client source</em:description>
<em:unpack>true</em:unpack>
<em:targetApplication>
<!-- Firefox -->
<Description>
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>3.5</em:minVersion>
<em:maxVersion>8.*</em:maxVersion>
</Description>
@@ -97,64 +98,67 @@ class AddonIDTest(unittest.TestCase):
<Description>
<em:id>toolkit@mozilla.org</em:id>
<em:minVersion>1.9.1</em:minVersion>
<em:maxVersion>2.0*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>""",
-"""<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ """<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:targetApplication>
<!-- Firefox -->
<Description>
<em:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</em:id>
<em:minVersion>3.5</em:minVersion>
<em:maxVersion>8.*</em:maxVersion>
</Description>
</em:targetApplication>
<em:id>winning</em:id>
<em:name>MozMill</em:name>
<em:version>2.0a</em:version>
<em:creator>Adam Christian</em:creator>
- <em:description>A testing extension based on the Windmill Testing Framework client source</em:description>
+ <em:description>A testing extension based on the
+ Windmill Testing Framework client source</em:description>
<em:unpack>true</em:unpack>
</Description>
</RDF>""",
-"""<RDF xmlns="http://www.mozilla.org/2004/em-rdf#"
+ """<RDF xmlns="http://www.mozilla.org/2004/em-rdf#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<rdf:Description about="urn:mozilla:install-manifest">
<id>winning</id>
<name>foo</name>
<version>42</version>
- <description>A testing extension based on the Windmill Testing Framework client source</description>
+ <description>A testing extension based on the
+ Windmill Testing Framework client source</description>
</rdf:Description>
</RDF>""",
-"""<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ """<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foobar="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<foobar:targetApplication>
<!-- Firefox -->
<Description>
<foobar:id>{ec8030f7-c20a-464f-9b0e-13a3a9e97384}</foobar:id>
<foobar:minVersion>3.5</foobar:minVersion>
<foobar:maxVersion>8.*</foobar:maxVersion>
</Description>
</foobar:targetApplication>
<foobar:id>winning</foobar:id>
<foobar:name>MozMill</foobar:name>
<foobar:version>2.0a</foobar:version>
<foobar:creator>Adam Christian</foobar:creator>
- <foobar:description>A testing extension based on the Windmill Testing Framework client source</foobar:description>
+ <foobar:description>A testing extension based on the
+ Windmill Testing Framework client source</foobar:description>
<foobar:unpack>true</foobar:unpack>
</Description>
</RDF>""",
-"""<?xml version="1.0"?>
+ """<?xml version="1.0"?>
<!--
-->
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest"
em:id="winning"
--- a/testing/mozbase/mozprofile/tests/bug785146.py
+++ b/testing/mozbase/mozprofile/tests/bug785146.py
@@ -7,16 +7,17 @@
import mozfile
import os
import shutil
import sqlite3
import tempfile
import unittest
from mozprofile.permissions import Permissions
+
class PermissionsTest(unittest.TestCase):
locations = """http://mochi.test:8888 primary,privileged
http://127.0.0.1:80 noxul
http://127.0.0.1:8888 privileged
"""
def setUp(self):
--- a/testing/mozbase/mozprofile/tests/permissions.py
+++ b/testing/mozbase/mozprofile/tests/permissions.py
@@ -7,16 +7,17 @@
import mozfile
import os
import shutil
import sqlite3
import tempfile
import unittest
from mozprofile.permissions import Permissions
+
class PermissionsTest(unittest.TestCase):
locations = """http://mochi.test:8888 primary,privileged
http://127.0.0.1:80 noxul
http://127.0.0.1:8888 privileged
"""
profile_dir = None
@@ -138,17 +139,18 @@ http://127.0.0.1:8888 privileg
self.assertEqual(len(user_prefs), 0)
self.assertEqual(len(prefs), 0)
prefs, user_prefs = perms.network_prefs(True)
self.assertEqual(len(user_prefs), 2)
self.assertEqual(user_prefs[0], ('network.proxy.type', 2))
self.assertEqual(user_prefs[1][0], 'network.proxy.autoconfig_url')
- origins_decl = "var knownOrigins = (function () { return ['http://mochi.test:8888', 'http://127.0.0.1:80', 'http://127.0.0.1:8888'].reduce"
+ origins_decl = "var knownOrigins = (function () { return ['http://mochi.test:8888', " \
+ "'http://127.0.0.1:80', 'http://127.0.0.1:8888'].reduce"
self.assertTrue(origins_decl in user_prefs[1][1])
proxy_check = ("'http': 'PROXY mochi.test:8888'",
"'https': 'PROXY mochi.test:4443'",
"'ws': 'PROXY mochi.test:4443'",
"'wss': 'PROXY mochi.test:4443'")
self.assertTrue(all(c in user_prefs[1][1] for c in proxy_check))
--- a/testing/mozbase/mozprofile/tests/server_locations.py
+++ b/testing/mozbase/mozprofile/tests/server_locations.py
@@ -5,16 +5,17 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
import mozfile
import unittest
from mozprofile.permissions import ServerLocations, \
MissingPrimaryLocationError, MultiplePrimaryLocationsError, \
DuplicateLocationError, BadPortLocationError, LocationsSyntaxError
+
class ServerLocationsTest(unittest.TestCase):
"""test server locations"""
locations = """# This is the primary location from which tests run.
#
http://mochi.test:8888 primary,privileged
# a few test locations
--- a/testing/mozbase/mozprofile/tests/test_addons.py
+++ b/testing/mozbase/mozprofile/tests/test_addons.py
@@ -253,20 +253,20 @@ class TestAddonsManager(unittest.TestCas
self.assertEqual(os.listdir(staged_path),
['test-addon-1@mozilla.org.xpi'])
self.am.clean()
def test_install_from_path_invalid_addons(self):
# Generate installer stubs for all possible types of addons
addons = []
addons.append(generate_addon('test-addon-invalid-no-manifest@mozilla.org',
- path=self.tmpdir,
- xpi=False))
+ path=self.tmpdir,
+ xpi=False))
addons.append(generate_addon('test-addon-invalid-no-id@mozilla.org',
- path=self.tmpdir))
+ path=self.tmpdir))
self.am.install_from_path(self.tmpdir)
self.assertEqual(self.am.installed_addons, [])
@unittest.skip("Feature not implemented as part of AddonManger")
def test_install_from_path_error(self):
""" Check install_from_path raises an error with an invalid addon"""
--- a/testing/mozbase/mozprofile/tests/test_clone_cleanup.py
+++ b/testing/mozbase/mozprofile/tests/test_clone_cleanup.py
@@ -19,18 +19,18 @@ class CloneCleanupTest(unittest.TestCase
see https://bugzilla.mozilla.org/show_bug.cgi?id=642843
"""
def setUp(self):
# make a profile with one preference
path = tempfile.mktemp()
self.addCleanup(mozfile.remove, path)
self.profile = Profile(path,
- preferences={'foo': 'bar'},
- restore=False)
+ preferences={'foo': 'bar'},
+ restore=False)
user_js = os.path.join(self.profile.profile, 'user.js')
self.assertTrue(os.path.exists(user_js))
def test_restore_true(self):
# make a clone of this profile with restore=True
clone = Profile.clone(self.profile.profile, restore=True)
self.addCleanup(mozfile.remove, clone.profile)
@@ -56,9 +56,8 @@ class CloneCleanupTest(unittest.TestCase
self.assertTrue(os.path.exists(profile_dir))
del clone
# clone should be deleted
self.assertFalse(os.path.exists(profile_dir))
if __name__ == '__main__':
unittest.main()
-
--- a/testing/mozbase/mozprofile/tests/test_nonce.py
+++ b/testing/mozbase/mozprofile/tests/test_nonce.py
@@ -7,16 +7,17 @@ see https://bugzilla.mozilla.org/show_bu
import os
import tempfile
import unittest
import mozfile
from mozprofile.prefs import Preferences
from mozprofile.profile import Profile
+
class PreferencesNonceTest(unittest.TestCase):
def test_nonce(self):
# make a profile with one preference
path = tempfile.mktemp()
self.addCleanup(mozfile.remove, path)
profile = Profile(path,
--- a/testing/mozbase/mozprofile/tests/test_preferences.py
+++ b/testing/mozbase/mozprofile/tests/test_preferences.py
@@ -11,16 +11,17 @@ import shutil
import tempfile
import unittest
from mozprofile.cli import MozProfileCLI
from mozprofile.prefs import Preferences
from mozprofile.profile import Profile
here = os.path.dirname(os.path.abspath(__file__))
+
class PreferencesTest(unittest.TestCase):
"""test mozprofile preference handling"""
# preferences from files/prefs_with_comments.js
_prefs_with_comments = {'browser.startup.homepage': 'http://planet.mozilla.org',
'zoom.minPercent': 30,
'zoom.maxPercent': 300,
'webgl.verbose': 'false'}
@@ -116,17 +117,17 @@ browser.startup.homepage = http://github
general.warnOnAboutConfig = False
"""
try:
fd, name = tempfile.mkstemp(suffix='.ini')
os.write(fd, _ini)
os.close(fd)
commandline = ["--preferences", name]
- # test the [DEFAULT] section
+ # test the [DEFAULT] section
_prefs = {'general.warnOnAboutConfig': 'False'}
self.compare_generated(_prefs, commandline)
finally:
# cleanup
os.remove(name)
def test_reset_should_remove_added_prefs(self):
@@ -186,28 +187,28 @@ general.warnOnAboutConfig = False
# we shouldn't have any initial preferences
initial_prefs = Preferences.read_prefs(prefs_file)
self.assertFalse(initial_prefs)
initial_prefs = file(prefs_file).read().strip()
self.assertFalse(initial_prefs)
# add some preferences
prefs1 = [("browser.startup.homepage", "http://planet.mozilla.org/"),
- ("zoom.minPercent", 30)]
+ ("zoom.minPercent", 30)]
profile.set_preferences(prefs1)
self.assertEqual(prefs1, Preferences.read_prefs(prefs_file))
lines = file(prefs_file).read().strip().splitlines()
self.assertTrue(bool([line for line in lines
if line.startswith('#MozRunner Prefs Start')]))
self.assertTrue(bool([line for line in lines
if line.startswith('#MozRunner Prefs End')]))
# add some more preferences
prefs2 = [("zoom.maxPercent", 300),
- ("webgl.verbose", 'false')]
+ ("webgl.verbose", 'false')]
profile.set_preferences(prefs2)
self.assertEqual(prefs1 + prefs2, Preferences.read_prefs(prefs_file))
lines = file(prefs_file).read().strip().splitlines()
self.assertTrue(len([line for line in lines
if line.startswith('#MozRunner Prefs Start')]) == 2)
self.assertTrue(len([line for line in lines
if line.startswith('#MozRunner Prefs End')]) == 2)
@@ -337,21 +338,21 @@ user_pref("webgl.force-enabled", true);
"""test reading preferences from a prefs.js file whose values
require interpolation"""
expected_prefs = {
"browser.foo": "http://server-name",
"zoom.minPercent": 30,
"webgl.verbose": "false",
"browser.bar": "somethingxyz"
- }
+ }
values = {
"server": "server-name",
"abc": "something"
- }
+ }
path = os.path.join(here, 'files', 'prefs_with_interpolation.js')
read_prefs = Preferences.read_prefs(path, interpolation=values)
self.assertEqual(dict(read_prefs), expected_prefs)
def test_read_prefs_ttw(self):
"""test reading preferences through the web via mozhttpd"""
# create a MozHttpd instance
--- a/testing/mozbase/mozprofile/tests/test_profile.py
+++ b/testing/mozbase/mozprofile/tests/test_profile.py
@@ -5,25 +5,26 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
import os
from mozprofile import Profile
class TestProfile(unittest.TestCase):
+
def test_with_profile_should_cleanup(self):
with Profile() as profile:
self.assertTrue(os.path.exists(profile.profile))
# profile is cleaned
self.assertFalse(os.path.exists(profile.profile))
def test_with_profile_should_cleanup_even_on_exception(self):
with self.assertRaises(ZeroDivisionError):
with Profile() as profile:
self.assertTrue(os.path.exists(profile.profile))
- 1/0 # will raise ZeroDivisionError
+ 1 / 0 # will raise ZeroDivisionError
# profile is cleaned
self.assertFalse(os.path.exists(profile.profile))
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozprofile/tests/test_profile_view.py
+++ b/testing/mozbase/mozprofile/tests/test_profile_view.py
@@ -7,25 +7,26 @@
import mozfile
import mozprofile
import os
import tempfile
import unittest
here = os.path.dirname(os.path.abspath(__file__))
+
class TestProfilePrint(unittest.TestCase):
def test_profileprint(self):
"""
test the summary function
"""
keys = set(['Files', 'Path', 'user.js'])
- ff_prefs = mozprofile.FirefoxProfile.preferences # shorthand
+ ff_prefs = mozprofile.FirefoxProfile.preferences # shorthand
pref_string = '\n'.join(['%s: %s' % (key, ff_prefs[key])
for key in sorted(ff_prefs.keys())])
tempdir = tempfile.mkdtemp()
try:
profile = mozprofile.FirefoxProfile(tempdir)
parts = profile.summary(return_parts=True)
parts = dict(parts)
--- a/testing/mozbase/mozprofile/tests/test_webapps.py
+++ b/testing/mozbase/mozprofile/tests/test_webapps.py
@@ -8,16 +8,17 @@ import os
import shutil
import unittest
from tempfile import mkdtemp
from mozprofile.webapps import WebappCollection, Webapp, WebappFormatException
here = os.path.dirname(os.path.abspath(__file__))
+
class WebappTest(unittest.TestCase):
"""Tests reading, installing and cleaning webapps
from a profile.
"""
manifest_path_1 = os.path.join(here, 'files', 'webapps1.json')
manifest_path_2 = os.path.join(here, 'files', 'webapps2.json')
def setUp(self):
@@ -45,34 +46,34 @@ class WebappTest(unittest.TestCase):
self.assertIsInstance(app, Webapp)
for key in Webapp.required_keys:
self.assertIn(key, app)
def test_invalid_webapp(self):
"""Tests a webapp with a missing required key"""
webapps = WebappCollection(self.profile)
# Missing the required key "description", exception should be raised
- self.assertRaises(WebappFormatException, webapps.append, { 'name': 'foo' })
+ self.assertRaises(WebappFormatException, webapps.append, {'name': 'foo'})
def test_webapp_collection(self):
"""Tests the methods of the WebappCollection object"""
- webapp_1 = { 'name': 'test_app_1',
- 'description': 'a description',
- 'manifestURL': 'http://example.com/1/manifest.webapp',
- 'appStatus': 1 }
+ webapp_1 = {'name': 'test_app_1',
+ 'description': 'a description',
+ 'manifestURL': 'http://example.com/1/manifest.webapp',
+ 'appStatus': 1}
- webapp_2 = { 'name': 'test_app_2',
- 'description': 'another description',
- 'manifestURL': 'http://example.com/2/manifest.webapp',
- 'appStatus': 2 }
+ webapp_2 = {'name': 'test_app_2',
+ 'description': 'another description',
+ 'manifestURL': 'http://example.com/2/manifest.webapp',
+ 'appStatus': 2}
- webapp_3 = { 'name': 'test_app_2',
- 'description': 'a third description',
- 'manifestURL': 'http://example.com/3/manifest.webapp',
- 'appStatus': 3 }
+ webapp_3 = {'name': 'test_app_2',
+ 'description': 'a third description',
+ 'manifestURL': 'http://example.com/3/manifest.webapp',
+ 'appStatus': 3}
webapps = WebappCollection(self.profile)
self.assertEqual(len(webapps), 0)
# WebappCollection should behave like a list
def invalid_index():
webapps[0]
self.assertRaises(IndexError, invalid_index)
@@ -140,22 +141,24 @@ class WebappTest(unittest.TestCase):
self.assertTrue(os.path.isfile(self.webapps_json_path))
webapps_json = webapps.read_json(self.webapps_json_path, description="a description")
self.assertEqual(len(webapps_json), 11)
# The new apps should be added
for app in webapps_json:
self.assertIsInstance(app, Webapp)
- self.assertTrue(os.path.isfile(os.path.join(self.webapps_dir, app['name'], 'manifest.webapp')))
+ self.assertTrue(os.path.isfile(os.path.join(self.webapps_dir, app['name'],
+ 'manifest.webapp')))
# The removed app should not exist in the manifest
self.assertNotIn(removed_app, webapps_json)
self.assertFalse(os.path.exists(os.path.join(self.webapps_dir, removed_app['name'])))
- # Cleaning should delete the webapps directory entirely since there was nothing there before
+ # Cleaning should delete the webapps directory entirely
+ # since there was nothing there before
webapps.clean()
self.assertFalse(os.path.isdir(self.webapps_dir))
def test_install_webapps_preexisting(self):
"""Tests installing webapps when the webapps directory already exists"""
manifest_json_2 = WebappCollection.read_json(self.manifest_path_2)
# Synthesize a pre-existing webapps directory
@@ -175,23 +178,25 @@ class WebappTest(unittest.TestCase):
# A backup should be created
self.assertTrue(os.path.isdir(os.path.join(self.profile, webapps.backup_dir)))
# Both manifests should remain installed
webapps_json = webapps.read_json(self.webapps_json_path, description='a fake description')
self.assertEqual(len(webapps_json), 12)
for app in webapps_json:
self.assertIsInstance(app, Webapp)
- self.assertTrue(os.path.isfile(os.path.join(self.webapps_dir, app['name'], 'manifest.webapp')))
+ self.assertTrue(os.path.isfile(os.path.join(self.webapps_dir, app['name'],
+ 'manifest.webapp')))
# Upon cleaning the backup should be restored
webapps.clean()
self.assertFalse(os.path.isdir(os.path.join(self.profile, webapps.backup_dir)))
# The original webapps should still be installed
webapps_json = webapps.read_json(self.webapps_json_path)
for app in webapps_json:
self.assertIsInstance(app, Webapp)
- self.assertTrue(os.path.isfile(os.path.join(self.webapps_dir, app['name'], 'manifest.webapp')))
+ self.assertTrue(os.path.isfile(os.path.join(self.webapps_dir, app['name'],
+ 'manifest.webapp')))
self.assertEqual(webapps_json, manifest_json_2)
if __name__ == '__main__':
unittest.main()
--- a/testing/mozbase/mozrunner/mozrunner/__init__.py
+++ b/testing/mozbase/mozrunner/mozrunner/__init__.py
@@ -1,8 +1,9 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .cli import *
from .errors import *
from .runners import *
import base
--- a/testing/mozbase/mozrunner/mozrunner/application.py
+++ b/testing/mozbase/mozrunner/mozrunner/application.py
@@ -15,22 +15,22 @@ from mozprofile import (
MetroFirefoxProfile,
ThunderbirdProfile
)
here = os.path.abspath(os.path.dirname(__file__))
def get_app_context(appname):
- context_map = { 'default': DefaultContext,
- 'b2g': B2GContext,
- 'firefox': FirefoxContext,
- 'thunderbird': ThunderbirdContext,
- 'metro': MetroContext,
- 'fennec': FennecContext}
+ context_map = {'default': DefaultContext,
+ 'b2g': B2GContext,
+ 'firefox': FirefoxContext,
+ 'thunderbird': ThunderbirdContext,
+ 'metro': MetroContext,
+ 'fennec': FennecContext}
if appname not in context_map:
raise KeyError("Application '%s' not supported!" % appname)
return context_map[appname]
class DefaultContext(object):
profile_class = Profile
@@ -61,17 +61,17 @@ class RemoteContext(object):
def adb(self):
if not self._adb:
paths = [os.environ.get('ADB'),
os.environ.get('ADB_PATH'),
self.which('adb')]
paths = [p for p in paths if p is not None if os.path.isfile(p)]
if not paths:
raise OSError(
- 'Could not find the adb binary, make sure it is on your' \
+ 'Could not find the adb binary, make sure it is on your'
'path or set the $ADB_PATH environment variable.')
self._adb = paths[0]
return self._adb
@property
def dm(self):
if not self._dm:
self._dm = self.dm_class(adbPath=self.adb, autoconnect=False)
--- a/testing/mozbase/mozrunner/mozrunner/base/__init__.py
+++ b/testing/mozbase/mozrunner/mozrunner/base/__init__.py
@@ -1,3 +1,5 @@
from .runner import BaseRunner
from .device import DeviceRunner, FennecRunner
from .browser import GeckoRuntimeRunner
+
+__all__ = ['BaseRunner', 'DeviceRunner', 'FennecRunner', 'GeckoRuntimeRunner']
--- a/testing/mozbase/mozrunner/mozrunner/base/browser.py
+++ b/testing/mozbase/mozrunner/mozrunner/base/browser.py
@@ -1,15 +1,14 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import mozinfo
import os
-import platform
import sys
from .runner import BaseRunner
class GeckoRuntimeRunner(BaseRunner):
"""
The base runner class used for local gecko runtime binaries,
--- a/testing/mozbase/mozrunner/mozrunner/base/device.py
+++ b/testing/mozbase/mozrunner/mozrunner/base/device.py
@@ -11,45 +11,46 @@ import sys
import tempfile
import time
import mozfile
from .runner import BaseRunner
from ..devices import BaseEmulator
+
class DeviceRunner(BaseRunner):
"""
The base runner class used for running gecko on
remote devices (or emulators), such as B2G.
"""
- env = { 'MOZ_CRASHREPORTER': '1',
- 'MOZ_CRASHREPORTER_NO_REPORT': '1',
- 'MOZ_CRASHREPORTER_SHUTDOWN': '1',
- 'MOZ_HIDE_RESULTS_TABLE': '1',
- 'MOZ_LOG': 'signaling:5,mtransport:5,datachannel:5,jsep:5,MediaPipelineFactory:5',
- 'R_LOG_LEVEL': '6',
- 'R_LOG_DESTINATION': 'stderr',
- 'R_LOG_VERBOSE': '1',
- 'NO_EM_RESTART': '1', }
+ env = {'MOZ_CRASHREPORTER': '1',
+ 'MOZ_CRASHREPORTER_NO_REPORT': '1',
+ 'MOZ_CRASHREPORTER_SHUTDOWN': '1',
+ 'MOZ_HIDE_RESULTS_TABLE': '1',
+ 'MOZ_LOG': 'signaling:5,mtransport:5,datachannel:5,jsep:5,MediaPipelineFactory:5',
+ 'R_LOG_LEVEL': '6',
+ 'R_LOG_DESTINATION': 'stderr',
+ 'R_LOG_VERBOSE': '1',
+ 'NO_EM_RESTART': '1', }
def __init__(self, device_class, device_args=None, **kwargs):
process_log = tempfile.NamedTemporaryFile(suffix='pidlog')
# the env will be passed to the device, it is not a *real* env
self._device_env = dict(DeviceRunner.env)
self._device_env['MOZ_PROCESS_LOG'] = process_log.name
# be sure we do not pass env to the parent class ctor
env = kwargs.pop('env', None)
if env:
self._device_env.update(env)
process_args = {'stream': sys.stdout,
'processOutputLine': self.on_output,
'onFinish': self.on_finish,
- 'onTimeout': self.on_timeout }
+ 'onTimeout': self.on_timeout}
process_args.update(kwargs.get('process_args') or {})
kwargs['process_args'] = process_args
BaseRunner.__init__(self, **kwargs)
device_args = device_args or {}
self.device = device_class(**device_args)
@@ -75,17 +76,17 @@ class DeviceRunner(BaseRunner):
# to see if we have the homescreen running, or something, before waiting here
self.device.wait_for_net()
if not self.device.wait_for_net():
raise Exception("Network did not come up when starting device")
pid = BaseRunner.start(self, *args, **kwargs)
- timeout = 10 # seconds
+ timeout = 10 # seconds
starttime = datetime.datetime.now()
while datetime.datetime.now() - starttime < datetime.timedelta(seconds=timeout):
if self.is_running():
break
time.sleep(1)
else:
print("timed out waiting for '%s' process to start" % self.app_ctx.remote_process)
@@ -156,16 +157,17 @@ class DeviceRunner(BaseRunner):
return crashed
def cleanup(self, *args, **kwargs):
BaseRunner.cleanup(self, *args, **kwargs)
self.device.cleanup()
class FennecRunner(DeviceRunner):
+
def __init__(self, cmdargs=None, **kwargs):
super(FennecRunner, self).__init__(**kwargs)
self.cmdargs = cmdargs or []
@property
def command(self):
cmd = [self.app_ctx.adb]
if self.app_ctx.dm._deviceSerial:
@@ -173,11 +175,11 @@ class FennecRunner(DeviceRunner):
cmd.append("shell")
app = "%s/org.mozilla.gecko.BrowserApp" % self.app_ctx.remote_process
am_subcommand = ["am", "start", "-a", "android.activity.MAIN", "-n", app]
app_params = ["-no-remote", "-profile", self.app_ctx.remote_profile]
app_params.extend(self.cmdargs)
am_subcommand.extend(["--es", "args", "'%s'" % " ".join(app_params)])
# Append env variables in the form |--es env0 MOZ_CRASHREPORTER=1|
for (count, (k, v)) in enumerate(self._device_env.iteritems()):
- am_subcommand.extend(["--es", "env%d" % count, "%s=%s" % (k,v)])
+ am_subcommand.extend(["--es", "env%d" % count, "%s=%s" % (k, v)])
cmd.append("%s" % " ".join(am_subcommand))
return cmd
--- a/testing/mozbase/mozrunner/mozrunner/base/runner.py
+++ b/testing/mozbase/mozrunner/mozrunner/base/runner.py
@@ -33,17 +33,18 @@ class BaseRunner(object):
process_class=None, process_args=None, symbols_path=None,
dump_save_path=None, addons=None):
self.app_ctx = app_ctx or DefaultContext()
if isinstance(profile, basestring):
self.profile = self.app_ctx.profile_class(profile=profile,
addons=addons)
else:
- self.profile = profile or self.app_ctx.profile_class(**getattr(self.app_ctx, 'profile_args', {}))
+ self.profile = profile or self.app_ctx.profile_class(**getattr(self.app_ctx,
+ 'profile_args', {}))
# process environment
if env is None:
self.env = os.environ.copy()
else:
self.env = env.copy()
self.clean_profile = clean_profile
--- a/testing/mozbase/mozrunner/mozrunner/cli.py
+++ b/testing/mozbase/mozrunner/mozrunner/cli.py
@@ -9,21 +9,22 @@ from mozprofile import MozProfileCLI
from .application import get_app_context
from .runners import runners
from .utils import findInPath
# Map of debugging programs to information about them
# from http://mxr.mozilla.org/mozilla-central/source/build/automationutils.py#59
DEBUGGERS = {'gdb': {'interactive': True,
- 'args': ['-q', '--args'],},
+ 'args': ['-q', '--args'], },
'valgrind': {'interactive': False,
'args': ['--leak-check=full']}
}
+
def debugger_arguments(debugger, arguments=None, interactive=None):
"""Finds debugger arguments from debugger given and defaults
:param debugger: name or path to debugger
:param arguments: arguments for the debugger, or None to use defaults
:param interactive: whether the debugger should run in interactive mode
"""
@@ -85,17 +86,17 @@ class CLI(MozProfileCLI):
help="run under a debugger, e.g. gdb or valgrind")
parser.add_option('--debugger-args', dest='debugger_args',
action='store',
help="arguments to the debugger")
parser.add_option('--interactive', dest='interactive',
action='store_true',
help="run the program interactively")
- ### methods for running
+ # methods for running
def command_args(self):
"""additional arguments for the mozilla application"""
return map(os.path.expanduser, self.options.appArgs)
def runner_args(self):
"""arguments to instantiate the runner class"""
return dict(cmdargs=self.command_args(),
@@ -117,17 +118,18 @@ class CLI(MozProfileCLI):
(debugger_arguments, interactive)
"""
debug_args = self.options.debugger_args
if debug_args is not None:
debug_args = debug_args.split()
interactive = self.options.interactive
if self.options.debugger:
- debug_args, interactive = debugger_arguments(self.options.debugger, debug_args, interactive)
+ debug_args, interactive = debugger_arguments(self.options.debugger, debug_args,
+ interactive)
return debug_args, interactive
def start(self, runner):
"""Starts the runner and waits for the application to exit
It can also happen via a keyboard interrupt. It should be
overwritten to provide custom running of the runner instance.
--- a/testing/mozbase/mozrunner/mozrunner/devices/__init__.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/__init__.py
@@ -3,8 +3,11 @@
# You can obtain one at http://mozilla.org/MPL/2.0/.
from emulator import BaseEmulator, Emulator, EmulatorAVD
from base import Device
import emulator_battery
import emulator_geo
import emulator_screen
+
+__all__ = ['BaseEmulator', 'Emulator', 'EmulatorAVD', 'Device',
+ 'emulator_battery', 'emulator_geo', 'emulator_screen']
--- a/testing/mozbase/mozrunner/mozrunner/devices/android_device.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/android_device.py
@@ -2,17 +2,16 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import fileinput
import glob
import os
import platform
import psutil
-import re
import shutil
import signal
import sys
import telnetlib
import time
import urlparse
import urllib2
from distutils.spawn import find_executable
@@ -27,20 +26,22 @@ EMULATOR_AUTH_FILE = os.path.join(os.pat
TOOLTOOL_URL = 'https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py'
TRY_URL = 'https://hg.mozilla.org/try/raw-file/default'
MANIFEST_PATH = 'testing/config/tooltool-manifests'
verbose_logging = False
+
class AvdInfo(object):
"""
Simple class to contain an AVD description.
"""
+
def __init__(self, description, name, tooltool_manifest, extra_args,
port):
self.description = description
self.name = name
self.tooltool_manifest = tooltool_manifest
self.extra_args = extra_args
self.port = port
@@ -68,16 +69,17 @@ AVD_DICT = {
'mozemulator-x86',
'testing/config/tooltool-manifests/androidx86/releng.manifest',
['-debug',
'init,console,gles,memcheck,adbserver,adbclient,adb,avd_config,socket',
'-qemu', '-m', '1024', '-enable-kvm'],
5554)
}
+
def verify_android_device(build_obj, install=False, xre=False, debugger=False):
"""
Determine if any Android device is connected via adb.
If no device is found, prompt to start an emulator.
If a device is found or an emulator started and 'install' is
specified, also check whether Firefox is installed on the
device; if not, prompt to install Firefox.
If 'xre' is specified, also check with MOZ_HOST_BIN is set
@@ -96,17 +98,17 @@ def verify_android_device(build_obj, ins
elif emulator.is_available():
response = raw_input(
"No Android devices connected. Start an emulator? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
if not emulator.check_avd():
_log_info("Fetching AVD. This may take a while...")
emulator.update_avd()
_log_info("Starting emulator running %s..." %
- emulator.get_avd_description())
+ emulator.get_avd_description())
emulator.start()
emulator.wait_for_start()
device_verified = True
if device_verified and install:
# Determine if Firefox is installed on the device; if not,
# prompt to install. This feature allows a test command to
# launch an emulator, install Firefox, and proceed with testing
@@ -115,33 +117,34 @@ def verify_android_device(build_obj, ins
# been forgotten.
# If Firefox is installed, there is no way to determine whether
# the current build is installed, and certainly no way to
# determine if the installed build is the desired build.
# Installing every time is problematic because:
# - it prevents testing against other builds (downloaded apk)
# - installation may take a couple of minutes.
installed = emulator.dm.shellCheckOutput(['pm', 'list',
- 'packages', 'org.mozilla.'])
- if not 'fennec' in installed and not 'firefox' in installed:
+ 'packages', 'org.mozilla.'])
+ if 'fennec' not in installed and 'firefox' not in installed:
response = raw_input(
"It looks like Firefox is not installed on this device.\n"
"Install Firefox? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
_log_info("Installing Firefox. This may take a while...")
build_obj._run_make(directory=".", target='install',
- ensure_exit_code=False)
+ ensure_exit_code=False)
if device_verified and xre:
# Check whether MOZ_HOST_BIN has been set to a valid xre; if not,
# prompt to install one.
xre_path = os.environ.get('MOZ_HOST_BIN')
err = None
if not xre_path:
- err = 'environment variable MOZ_HOST_BIN is not set to a directory containing host xpcshell'
+ err = "environment variable MOZ_HOST_BIN is not set to a directory" \
+ "containing host xpcshell"
elif not os.path.isdir(xre_path):
err = '$MOZ_HOST_BIN does not specify a directory'
elif not os.path.isfile(os.path.join(xre_path, 'xpcshell')):
err = '$MOZ_HOST_BIN/xpcshell does not exist'
if err:
xre_path = glob.glob(os.path.join(EMULATOR_HOME_DIR, 'host-utils*'))
for path in xre_path:
if os.path.isdir(path) and os.path.isfile(os.path.join(path, 'xpcshell')):
@@ -152,28 +155,30 @@ def verify_android_device(build_obj, ins
_log_info("Host utilities not found: %s" % err)
response = raw_input(
"Download and setup your host utilities? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
_log_info("Installing host utilities. This may take a while...")
host_platform = _get_host_platform()
if host_platform:
path = os.path.join(MANIFEST_PATH, host_platform, 'hostutils.manifest')
- _get_tooltool_manifest(build_obj.substs, path, EMULATOR_HOME_DIR, 'releng.manifest')
+ _get_tooltool_manifest(build_obj.substs, path, EMULATOR_HOME_DIR,
+ 'releng.manifest')
_tooltool_fetch()
xre_path = glob.glob(os.path.join(EMULATOR_HOME_DIR, 'host-utils*'))
for path in xre_path:
if os.path.isdir(path) and os.path.isfile(os.path.join(path, 'xpcshell')):
os.environ['MOZ_HOST_BIN'] = path
err = None
break
if err:
_log_warning("Unable to install host utilities.")
else:
- _log_warning("Unable to install host utilities -- your platform is not supported!")
+ _log_warning(
+ "Unable to install host utilities -- your platform is not supported!")
if debugger:
# Optionally set up JimDB. See https://wiki.mozilla.org/Mobile/Fennec/Android/GDB.
build_platform = _get_device_platform(build_obj.substs)
jimdb_path = os.path.join(EMULATOR_HOME_DIR, 'jimdb-%s' % build_platform)
jimdb_utils_path = os.path.join(jimdb_path, 'utils')
gdb_path = os.path.join(jimdb_path, 'bin', 'gdb')
err = None
@@ -183,61 +188,70 @@ def verify_android_device(build_obj, ins
err = '%s not found' % gdb_path
if err:
_log_info("JimDB (%s) not found: %s" % (build_platform, err))
response = raw_input(
"Download and setup JimDB (%s)? (Y/n) " % build_platform).strip()
if response.lower().startswith('y') or response == '':
host_platform = _get_host_platform()
if host_platform:
- _log_info("Installing JimDB (%s/%s). This may take a while..." % (host_platform, build_platform))
- path = os.path.join(MANIFEST_PATH, host_platform, 'jimdb-%s.manifest' % build_platform)
- _get_tooltool_manifest(build_obj.substs, path, EMULATOR_HOME_DIR, 'releng.manifest')
+ _log_info(
+ "Installing JimDB (%s/%s). This may take a while..." % (host_platform,
+ build_platform))
+ path = os.path.join(MANIFEST_PATH, host_platform,
+ 'jimdb-%s.manifest' % build_platform)
+ _get_tooltool_manifest(build_obj.substs, path,
+ EMULATOR_HOME_DIR, 'releng.manifest')
_tooltool_fetch()
if os.path.isfile(gdb_path):
# Get JimDB utilities from git repository
proc = ProcessHandler(['git', 'pull'], cwd=jimdb_utils_path)
proc.run()
git_pull_complete = False
try:
proc.wait()
if proc.proc.returncode == 0:
git_pull_complete = True
except:
if proc.poll() is None:
proc.kill(signal.SIGTERM)
if not git_pull_complete:
- _log_warning("Unable to update JimDB utils from git -- some JimDB features may be unavailable.")
+ _log_warning("Unable to update JimDB utils from git -- "
+ "some JimDB features may be unavailable.")
else:
_log_warning("Unable to install JimDB -- unable to fetch from tooltool.")
else:
_log_warning("Unable to install JimDB -- your platform is not supported!")
if os.path.isfile(gdb_path):
# sync gdbinit.local with build settings
_update_gdbinit(build_obj.substs, os.path.join(jimdb_utils_path, "gdbinit.local"))
# ensure JimDB is in system path, so that mozdebug can find it
bin_path = os.path.join(jimdb_path, 'bin')
os.environ['PATH'] = "%s:%s" % (bin_path, os.environ['PATH'])
return device_verified
+
def run_firefox_for_android(build_obj, params):
"""
Launch Firefox for Android on the connected device.
Optional 'params' allow parameters to be passed to Firefox.
"""
adb_path = _find_sdk_exe(build_obj.substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1)
try:
#
# Construct an adb command similar to:
#
- # adb shell am start -a android.activity.MAIN -n org.mozilla.fennec_$USER -d <url param> --es args "<params>"
+ # $ adb shell am start -a android.activity.MAIN \
+ # -n org.mozilla.fennec_$USER \
+ # -d <url param> \
+ # --es args "<params>"
#
app = "%s/org.mozilla.gecko.BrowserApp" % build_obj.substs['ANDROID_PACKAGE_NAME']
cmd = ['am', 'start', '-a', 'android.activity.MAIN', '-n', app]
if params:
for p in params:
if urlparse.urlparse(p).scheme != "":
cmd.extend(['-d', p])
params.remove(p)
@@ -247,19 +261,21 @@ def run_firefox_for_android(build_obj, p
_log_debug(cmd)
output = dm.shellCheckOutput(cmd, timeout=10)
_log_info(output)
except DMError:
_log_warning("unable to launch Firefox for Android")
return 1
return 0
+
def grant_runtime_permissions(build_obj):
"""
- Grant required runtime permissions to the specified app (typically org.mozilla.fennec_$USER).
+ Grant required runtime permissions to the specified app
+ (typically org.mozilla.fennec_$USER).
"""
app = build_obj.substs['ANDROID_PACKAGE_NAME']
adb_path = _find_sdk_exe(build_obj.substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1)
dm.default_timeout = 10
try:
@@ -269,16 +285,17 @@ def grant_runtime_permissions(build_obj)
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.WRITE_EXTERNAL_STORAGE'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.READ_EXTERNAL_STORAGE'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.ACCESS_FINE_LOCATION'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.CAMERA'])
dm.shellCheckOutput(['pm', 'grant', app, 'android.permission.WRITE_CONTACTS'])
except DMError:
_log_warning("Unable to grant runtime permissions to %s" % app)
+
class AndroidEmulator(object):
"""
Support running the Android emulator with an AVD from Mozilla
test automation.
Example usage:
emulator = AndroidEmulator()
@@ -298,17 +315,17 @@ class AndroidEmulator(object):
verbose_logging = verbose
self.substs = substs
self.avd_type = self._get_avd_type(avd_type)
self.avd_info = AVD_DICT[self.avd_type]
adb_path = _find_sdk_exe(substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
self.dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1,
- deviceSerial=device_serial)
+ deviceSerial=device_serial)
self.dm.default_timeout = 10
_log_debug("Emulator created with type %s" % self.avd_type)
def __del__(self):
if self.emulator_log:
self.emulator_log.close()
def is_running(self):
@@ -396,25 +413,25 @@ class AndroidEmulator(object):
if self.avd_info.extra_args:
# -enable-kvm option is not valid on OSX
if _get_host_platform() == 'macosx64' and '-enable-kvm' in self.avd_info.extra_args:
self.avd_info.extra_args.remove('-enable-kvm')
command += self.avd_info.extra_args
log_path = os.path.join(EMULATOR_HOME_DIR, 'emulator.log')
self.emulator_log = open(log_path, 'w')
_log_debug("Starting the emulator with this command: %s" %
- ' '.join(command))
+ ' '.join(command))
_log_debug("Emulator output will be written to '%s'" %
- log_path)
+ log_path)
self.proc = ProcessHandler(
command, storeOutput=False, processOutputLine=outputHandler,
env=env)
self.proc.run()
_log_debug("Emulator started with pid %d" %
- int(self.proc.proc.pid))
+ int(self.proc.proc.pid))
def wait_for_start(self):
"""
Verify that the emulator is running, the emulator device is visible
to adb, and Android has booted.
"""
if not self.proc:
_log_warning("Emulator not started!")
@@ -509,25 +526,25 @@ class AndroidEmulator(object):
def _verify_emulator(self):
telnet_ok = False
tn = None
while(not telnet_ok):
try:
tn = telnetlib.Telnet('localhost', self.avd_info.port, 10)
if tn is not None:
- res = tn.read_until('OK', 10)
+ tn.read_until('OK', 10)
self._telnet_cmd(tn, 'avd status')
self._telnet_cmd(tn, 'redir list')
self._telnet_cmd(tn, 'network status')
tn.write('quit\n')
tn.read_all()
telnet_ok = True
else:
- _log_warning("Unable to connect to port %d" % port)
+ _log_warning("Unable to connect to port %d" % self.avd_info.port)
except:
_log_warning("Trying again after unexpected exception")
finally:
if tn is not None:
tn.close()
if not telnet_ok:
time.sleep(10)
if self.proc.proc.poll() is not None:
@@ -538,16 +555,17 @@ class AndroidEmulator(object):
def _get_avd_type(self, requested):
if requested in AVD_DICT.keys():
return requested
if self.substs:
if not self.substs['TARGET_CPU'].startswith('arm'):
return 'x86'
return '4.3'
+
def _find_sdk_exe(substs, exe, tools):
if tools:
subdir = 'tools'
else:
subdir = 'platform-tools'
found = False
if not found and substs:
@@ -580,17 +598,17 @@ def _find_sdk_exe(substs, exe, tools):
_log_debug(
"Unable to find executable at %s" % exe_path)
except KeyError:
_log_debug("ANDROID_SDK_ROOT not set")
if not found:
# Can exe be found in the default bootstrap location?
mozbuild_path = os.environ.get('MOZBUILD_STATE_PATH',
- os.path.expanduser(os.path.join('~', '.mozbuild')))
+ os.path.expanduser(os.path.join('~', '.mozbuild')))
exe_path = os.path.join(
mozbuild_path, 'android-sdk-linux', subdir, exe)
if os.path.exists(exe_path):
found = True
else:
_log_debug(
"Unable to find executable at %s" % exe_path)
@@ -603,40 +621,45 @@ def _find_sdk_exe(substs, exe, tools):
_log_debug("Unable to find executable on PATH")
if found:
_log_debug("%s found at %s" % (exe, exe_path))
else:
exe_path = None
return exe_path
+
def _log_debug(text):
if verbose_logging:
print "DEBUG: %s" % text
+
def _log_warning(text):
print "WARNING: %s" % text
+
def _log_info(text):
print "%s" % text
+
def _download_file(url, filename, path):
f = urllib2.urlopen(url)
if not os.path.isdir(path):
try:
os.makedirs(path)
except Exception, e:
_log_warning(str(e))
return False
local_file = open(os.path.join(path, filename), 'wb')
local_file.write(f.read())
local_file.close()
_log_debug("Downloaded %s to %s/%s" % (url, path, filename))
return True
+
def _get_tooltool_manifest(substs, src_path, dst_path, filename):
if not os.path.isdir(dst_path):
try:
os.makedirs(dst_path)
except Exception, e:
_log_warning(str(e))
copied = False
if substs and 'top_srcdir' in substs:
@@ -645,43 +668,46 @@ def _get_tooltool_manifest(substs, src_p
dst = os.path.join(dst_path, filename)
shutil.copy(src, dst)
copied = True
_log_debug("Copied tooltool manifest %s to %s" % (src, dst))
if not copied:
url = os.path.join(TRY_URL, src_path)
_download_file(url, filename, dst_path)
+
def _tooltool_fetch():
def outputHandler(line):
_log_debug(line)
_download_file(TOOLTOOL_URL, 'tooltool.py', EMULATOR_HOME_DIR)
command = [sys.executable, 'tooltool.py',
'fetch', '-o', '-m', 'releng.manifest']
proc = ProcessHandler(
command, processOutputLine=outputHandler, storeOutput=False,
cwd=EMULATOR_HOME_DIR)
proc.run()
try:
proc.wait()
except:
if proc.poll() is None:
proc.kill(signal.SIGTERM)
+
def _get_host_platform():
plat = None
if 'darwin' in str(sys.platform).lower():
plat = 'macosx64'
elif 'linux' in str(sys.platform).lower():
if '64' in platform.architecture()[0]:
plat = 'linux64'
else:
plat = 'linux32'
return plat
+
def _get_device_platform(substs):
# PIE executables are required when SDK level >= 21 - important for gdbserver
adb_path = _find_sdk_exe(substs, 'adb', False)
if not adb_path:
adb_path = 'adb'
dm = DeviceManagerADB(autoconnect=False, adbPath=adb_path, retryLimit=1)
sdk_level = None
try:
@@ -694,16 +720,17 @@ def _get_device_platform(substs):
_log_warning("unable to determine Android sdk level")
pie = ''
if sdk_level and sdk_level >= 21:
pie = '-pie'
if substs['TARGET_CPU'].startswith('arm'):
return 'arm%s' % pie
return 'x86%s' % pie
+
def _update_gdbinit(substs, path):
if os.path.exists(path):
obj_replaced = False
src_replaced = False
# update existing objdir/srcroot in place
for line in fileinput.input(path, inplace=True):
if "feninit.default.objdir" in line and substs and 'MOZ_BUILD_ROOT' in substs:
print("python feninit.default.objdir = '%s'" % substs['MOZ_BUILD_ROOT'])
--- a/testing/mozbase/mozrunner/mozrunner/devices/autophone.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/autophone.py
@@ -20,166 +20,174 @@ from mozbuild.virtualenv import Virtuale
from mozdevice import DeviceManagerADB
from mozprocess import ProcessHandler
class AutophoneRunner(object):
"""
Supporting the mach 'autophone' command: configure, run autophone.
"""
- config = {'base-dir' : None,
- 'requirements-installed' : False,
- 'devices-configured' : False,
- 'test-manifest' : None }
+ config = {'base-dir': None,
+ 'requirements-installed': False,
+ 'devices-configured': False,
+ 'test-manifest': None}
CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.mozbuild', 'autophone.json')
def __init__(self, build_obj, verbose):
self.build_obj = build_obj
self.verbose = verbose
self.autophone_options = []
self.httpd = None
self.webserver_required = False
def reset_to_clean(self):
"""
If confirmed, remove the autophone directory and configuration.
"""
dir = self.config['base-dir']
if dir and os.path.exists(dir) and os.path.exists(self.CONFIG_FILE):
self.build_obj.log(logging.WARN, "autophone", {},
- "*** This will delete %s and reset your 'mach autophone' configuration! ***" % dir)
+ "*** This will delete %s and reset your "
+ "'mach autophone' configuration! ***" % dir)
response = raw_input(
"Proceed with deletion? (y/N) ").strip()
if response.lower().startswith('y'):
os.remove(self.CONFIG_FILE)
shutil.rmtree(dir)
else:
self.build_obj.log(logging.INFO, "autophone", {},
- "Already clean -- nothing to do!")
+ "Already clean -- nothing to do!")
def save_config(self):
"""
Persist self.config to a file.
"""
try:
with open(self.CONFIG_FILE, 'w') as f:
json.dump(self.config, f)
if self.verbose:
print("saved configuration: %s" % self.config)
except:
self.build_obj.log(logging.ERROR, "autophone", {},
- "unable to save 'mach autophone' configuration to %s" % self.CONFIG_FILE)
+ "unable to save 'mach autophone' "
+ "configuration to %s" % self.CONFIG_FILE)
if self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- str(sys.exc_info()[0]))
+ str(sys.exc_info()[0]))
def load_config(self):
"""
Import the configuration info saved by save_config().
"""
if os.path.exists(self.CONFIG_FILE):
try:
with open(self.CONFIG_FILE, 'r') as f:
self.config = json.load(f)
if self.verbose:
print("loaded configuration: %s" % self.config)
except:
self.build_obj.log(logging.ERROR, "autophone", {},
- "unable to load 'mach autophone' configuration from %s" % self.CONFIG_FILE)
+ "unable to load 'mach autophone' "
+ "configuration from %s" % self.CONFIG_FILE)
if self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- str(sys.exc_info()[0]))
+ str(sys.exc_info()[0]))
def setup_directory(self):
"""
Find the autophone source code location, or download if necessary.
"""
keep_going = True
dir = self.config['base-dir']
if not dir:
dir = os.path.join(os.path.expanduser('~'), 'mach-autophone')
if os.path.exists(os.path.join(dir, '.git')):
response = raw_input(
"Run autophone from existing directory, %s (Y/n) " % dir).strip()
- if not 'n' in response.lower():
+ if 'n' not in response.lower():
self.build_obj.log(logging.INFO, "autophone", {},
- "Configuring and running autophone at %s" % dir)
+ "Configuring and running autophone at %s" % dir)
return keep_going
self.build_obj.log(logging.INFO, "autophone", {},
- "Unable to find an existing autophone directory. Let's setup a new one...")
+ "Unable to find an existing autophone directory. "
+ "Let's setup a new one...")
response = raw_input(
"Enter location of new autophone directory: [%s] " % dir).strip()
if response != '':
dir = response
self.config['base-dir'] = dir
if not os.path.exists(os.path.join(dir, '.git')):
self.build_obj.log(logging.INFO, "autophone", {},
- "Cloning autophone repository to '%s'..." % dir)
+ "Cloning autophone repository to '%s'..." % dir)
self.config['requirements-installed'] = False
self.config['devices-configured'] = False
self.run_process(['git', 'clone', 'https://github.com/mozilla/autophone', dir])
self.run_process(['git', 'submodule', 'update', '--init', '--remote'], cwd=dir)
if not os.path.exists(os.path.join(dir, '.git')):
# git not installed? File permission problem? github not available?
self.build_obj.log(logging.ERROR, "autophone", {},
- "Unable to clone autophone directory.")
+ "Unable to clone autophone directory.")
if not self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- "Try re-running this command with --verbose to get more info.")
+ "Try re-running this command with --verbose to get more info.")
keep_going = False
return keep_going
def install_requirements(self):
"""
Install required python modules in a virtualenv rooted at <autophone>/_virtualenv.
"""
keep_going = True
dir = self.config['base-dir']
vdir = os.path.join(dir, '_virtualenv')
self.auto_virtualenv_manager = VirtualenvManager(self.build_obj.topsrcdir,
- self.build_obj.topobjdir, vdir, sys.stdout,
- os.path.join(self.build_obj.topsrcdir, 'build', 'virtualenv_packages.txt'))
+ self.build_obj.topobjdir,
+ vdir, sys.stdout,
+ os.path.join(self.build_obj.topsrcdir,
+ 'build',
+ 'virtualenv_packages.txt'))
if not self.config['requirements-installed'] or not os.path.exists(vdir):
self.build_obj.log(logging.INFO, "autophone", {},
- "Installing required modules in a virtualenv...")
+ "Installing required modules in a virtualenv...")
self.auto_virtualenv_manager.build()
self.auto_virtualenv_manager._run_pip(['install', '-r',
- os.path.join(dir, 'requirements.txt')])
+ os.path.join(dir, 'requirements.txt')])
self.config['requirements-installed'] = True
return keep_going
def configure_devices(self):
"""
Ensure devices.ini is set up.
"""
keep_going = True
device_ini = os.path.join(self.config['base-dir'], 'devices.ini')
if os.path.exists(device_ini):
response = raw_input(
"Use existing device configuration at %s? (Y/n) " % device_ini).strip()
- if not 'n' in response.lower():
+ if 'n' not in response.lower():
self.build_obj.log(logging.INFO, "autophone", {},
- "Using device configuration at %s" % device_ini)
+ "Using device configuration at %s" % device_ini)
return keep_going
keep_going = False
self.build_obj.log(logging.INFO, "autophone", {},
- "You must configure at least one Android device before running autophone.")
+ "You must configure at least one Android device "
+ "before running autophone.")
response = raw_input(
"Configure devices now? (Y/n) ").strip()
if response.lower().startswith('y') or response == '':
response = raw_input(
"Connect your rooted Android test device(s) with usb and press Enter ")
adb_path = 'adb'
try:
if os.path.exists(self.build_obj.substs["ADB"]):
adb_path = self.build_obj.substs["ADB"]
except:
if self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- str(sys.exc_info()[0]))
+ str(sys.exc_info()[0]))
# No build environment?
try:
adb_path = which.which('adb')
except which.WhichError:
adb_path = raw_input(
"adb not found. Enter path to adb: ").strip()
if self.verbose:
print("Using adb at %s" % adb_path)
@@ -188,59 +196,61 @@ class AutophoneRunner(object):
try:
with open(os.path.join(self.config['base-dir'], 'devices.ini'), 'w') as f:
for device in dm.devices():
serial = device[0]
if self.verify_device(adb_path, serial):
f.write("[device-%d]\nserialno=%s\n" % (device_index, serial))
device_index += 1
self.build_obj.log(logging.INFO, "autophone", {},
- "Added '%s' to device configuration." % serial)
+ "Added '%s' to device configuration." % serial)
keep_going = True
else:
self.build_obj.log(logging.WARNING, "autophone", {},
- "Device '%s' is not rooted - skipping" % serial)
+ "Device '%s' is not rooted - skipping" % serial)
except:
self.build_obj.log(logging.ERROR, "autophone", {},
- "Failed to get list of connected Android devices.")
+ "Failed to get list of connected Android devices.")
if self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- str(sys.exc_info()[0]))
+ str(sys.exc_info()[0]))
keep_going = False
if device_index <= 1:
self.build_obj.log(logging.ERROR, "autophone", {},
- "No devices configured! (Can you see your rooted test device(s) in 'adb devices'?")
+ "No devices configured! (Can you see your rooted test device(s)"
+ " in 'adb devices'?")
keep_going = False
if keep_going:
self.config['devices-configured'] = True
return keep_going
def configure_tests(self):
"""
Determine the required autophone --test-path option.
"""
dir = self.config['base-dir']
self.build_obj.log(logging.INFO, "autophone", {},
- "Autophone must be started with a 'test manifest' describing the type(s) of test(s) to run.")
+ "Autophone must be started with a 'test manifest' "
+ "describing the type(s) of test(s) to run.")
test_options = []
for ini in glob.glob(os.path.join(dir, 'tests', '*.ini')):
with open(ini, 'r') as f:
content = f.readlines()
for line in content:
if line.startswith('# @mach@ '):
webserver = False
if '@webserver@' in line:
webserver = True
line = line.replace('@webserver@', '')
test_options.append((line[9:].strip(), ini, webserver))
break
if len(test_options) >= 1:
test_options.sort()
self.build_obj.log(logging.INFO, "autophone", {},
- "These test manifests are available:")
+ "These test manifests are available:")
index = 1
for option in test_options:
print("%d. %s" % (index, option[0]))
index += 1
highest = index - 1
path = None
while not path:
path = None
@@ -251,25 +261,27 @@ class AutophoneRunner(object):
path = response
self.config['test-manifest'] = path
# Assume a webserver is required; if it isn't, user can provide a dummy url.
self.webserver_required = True
else:
try:
choice = int(response)
if choice >= 1 and choice <= highest:
- path = test_options[choice-1][1]
- if test_options[choice-1][2]:
+ path = test_options[choice - 1][1]
+ if test_options[choice - 1][2]:
self.webserver_required = True
else:
self.build_obj.log(logging.ERROR, "autophone", {},
- "'%s' invalid: Enter a number between 1 and %d!" % (response, highest))
+ "'%s' invalid: Enter a number between "
+ "1 and %d!" % (response, highest))
except ValueError:
self.build_obj.log(logging.ERROR, "autophone", {},
- "'%s' unrecognized: Enter a number between 1 and %d!" % (response, highest))
+ "'%s' unrecognized: Enter a number between "
+ "1 and %d!" % (response, highest))
self.autophone_options.extend(['--test-path', path])
else:
# Provide a simple backup for the unusual case where test manifests
# cannot be found.
response = ""
default = self.config['test-manifest'] or ""
while not os.path.isfile(response):
response = raw_input(
@@ -297,54 +309,57 @@ xre_path = %s
utility_path = %s
console_level = DEBUG
log_level = DEBUG
time_out = 300""" % (xre_path, xre_path))
if self.verbose:
print("Created %s with host utilities path %s" % (defaults_path, xre_path))
except:
self.build_obj.log(logging.ERROR, "autophone", {},
- "Unable to create %s" % defaults_path)
+ "Unable to create %s" % defaults_path)
if self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- str(sys.exc_info()[0]))
+ str(sys.exc_info()[0]))
def configure_unittests(self):
"""
Ensure unittest-defaults.ini is set up.
"""
defaults_path = os.path.join(self.config['base-dir'], 'configs', 'unittest-defaults.ini')
if os.path.isfile(defaults_path):
response = raw_input(
"Use existing unit test configuration at %s? (Y/n) " % defaults_path).strip()
if 'n' in response.lower():
os.remove(defaults_path)
if not os.path.isfile(defaults_path):
xre_path = os.environ.get('MOZ_HOST_BIN')
if not xre_path or not os.path.isdir(xre_path):
- emulator_path = os.path.join(os.path.expanduser('~'), '.mozbuild', 'android-device')
+ emulator_path = os.path.join(os.path.expanduser('~'), '.mozbuild',
+ 'android-device')
xre_paths = glob.glob(os.path.join(emulator_path, 'host-utils*'))
for xre_path in xre_paths:
if os.path.isdir(xre_path):
break
if not xre_path or not os.path.isdir(xre_path) or \
not os.path.isfile(os.path.join(xre_path, 'xpcshell')):
self.build_obj.log(logging.INFO, "autophone", {},
- "Some tests require access to 'host utilities' such as xpcshell.")
+ "Some tests require access to 'host utilities' "
+ "such as xpcshell.")
xre_path = raw_input(
"Enter path to host utilities directory: ").strip()
if not xre_path or not os.path.isdir(xre_path) or \
not os.path.isfile(os.path.join(xre_path, 'xpcshell')):
- self.build_obj.log(logging.ERROR, "autophone", {},
+ self.build_obj.log(
+ logging.ERROR, "autophone", {},
"Unable to configure unit tests - no path to host utilities.")
return False
self.write_unittest_defaults(defaults_path, xre_path)
if os.path.isfile(defaults_path):
self.build_obj.log(logging.INFO, "autophone", {},
- "Using unit test configuration at %s" % defaults_path)
+ "Using unit test configuration at %s" % defaults_path)
return True
def configure_ip(self):
"""
Determine what IP should be used for the autophone --ipaddr option.
"""
# Take a guess at the IP to suggest. This won't always get the "right" IP,
# but will save some typing, sometimes.
@@ -360,44 +375,50 @@ time_out = 300""" % (xre_path, xre_path)
return True
def configure_webserver(self):
"""
Determine the autophone --webserver-url option.
"""
if self.webserver_required:
self.build_obj.log(logging.INFO, "autophone", {},
- "Some of your selected tests require a webserver.")
+ "Some of your selected tests require a webserver.")
response = raw_input("Start a webserver now? [Y/n] ").strip()
parts = []
while len(parts) != 2:
response2 = raw_input(
"Webserver address? [%s:8100] " % self.ipaddr).strip()
if response2 == "":
parts = [self.ipaddr, "8100"]
else:
parts = response2.split(":")
if len(parts) == 2:
addr = parts[0]
try:
port = int(parts[1])
if port <= 0:
- self.build_obj.log(logging.ERROR, "autophone", {},
- "Port must be > 0. Enter webserver address in the format <ip>:<port>")
+ self.build_obj.log(
+ logging.ERROR, "autophone", {},
+ "Port must be > 0. "
+ "Enter webserver address in the format <ip>:<port>")
parts = []
except ValueError:
- self.build_obj.log(logging.ERROR, "autophone", {},
- "Port must be a number. Enter webserver address in the format <ip>:<port>")
+ self.build_obj.log(
+ logging.ERROR, "autophone", {},
+ "Port must be a number. "
+ "Enter webserver address in the format <ip>:<port>")
parts = []
else:
- self.build_obj.log(logging.ERROR, "autophone", {},
+ self.build_obj.log(
+ logging.ERROR, "autophone", {},
"Enter webserver address in the format <ip>:<port>")
if not ('n' in response.lower()):
self.launch_webserver(addr, port)
- self.autophone_options.extend(['--webserver-url', 'http://%s:%d' % (addr,port)])
+ self.autophone_options.extend(['--webserver-url',
+ 'http://%s:%d' % (addr, port)])
return True
def configure_other(self):
"""
Advanced users may set up additional options in autophone.ini.
Find and handle that case silently.
"""
path = os.path.join(self.config['base-dir'], 'autophone.ini')
@@ -405,75 +426,78 @@ time_out = 300""" % (xre_path, xre_path)
self.autophone_options.extend(['--config', path])
return True
def configure(self):
"""
Ensure all configuration files are set up and determine autophone options.
"""
return self.configure_devices() and \
- self.configure_unittests() and \
- self.configure_tests() and \
- self.configure_ip() and \
- self.configure_webserver() and \
- self.configure_other()
+ self.configure_unittests() and \
+ self.configure_tests() and \
+ self.configure_ip() and \
+ self.configure_webserver() and \
+ self.configure_other()
def verify_device(self, adb_path, device):
"""
Check that the specified device is available and rooted.
"""
try:
dm = DeviceManagerADB(adbPath=adb_path, retryLimit=1, deviceSerial=device)
if dm._haveSu or dm._haveRootShell:
return True
except:
- self.build_obj.log(logging.WARN, "autophone", {},
+ self.build_obj.log(
+ logging.WARN, "autophone", {},
"Unable to verify root on device.")
if self.verbose:
self.build_obj.log(logging.ERROR, "autophone", {},
- str(sys.exc_info()[0]))
+ str(sys.exc_info()[0]))
return False
def launch_autophone(self):
"""
Launch autophone in its own thread and wait for autophone startup.
"""
self.build_obj.log(logging.INFO, "autophone", {},
- "Launching autophone...")
+ "Launching autophone...")
self.thread = threading.Thread(target=self.run_autophone)
self.thread.start()
# Wait for startup, so that autophone startup messages do not get mixed
# in with our interactive command prompts.
dir = self.config['base-dir']
started = False
for seconds in [5, 5, 3, 3, 1, 1, 1, 1]:
time.sleep(seconds)
if self.run_process(['./ap.sh', 'autophone-status'], cwd=dir, dump=False):
started = True
break
time.sleep(1)
if not started:
self.build_obj.log(logging.WARN, "autophone", {},
- "Autophone is taking longer than expected to start.")
+ "Autophone is taking longer than expected to start.")
def run_autophone(self):
dir = self.config['base-dir']
cmd = [self.auto_virtualenv_manager.python_path, 'autophone.py']
cmd.extend(self.autophone_options)
self.run_process(cmd, cwd=dir, dump=True)
def command_prompts(self):
"""
Interactive command prompts: Provide access to ap.sh and trigger_runs.py.
"""
dir = self.config['base-dir']
if self.thread.isAlive():
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"Use 'trigger' to select builds to test using the current test manifest.")
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"Type 'trigger', 'help', 'quit', or an autophone command.")
quitting = False
while self.thread.isAlive() and not quitting:
response = raw_input(
"autophone command? ").strip().lower()
if response == "help":
self.run_process(['./ap.sh', 'autophone-help'], cwd=dir, dump=True)
print("""\
@@ -490,104 +514,112 @@ quit
""")
continue
if response == "trigger":
self.trigger_prompts()
continue
if response == "quit":
self.build_obj.log(logging.INFO, "autophone", {},
- "Quitting...")
+ "Quitting...")
response = "autophone-shutdown"
if response == "autophone-shutdown":
quitting = True
self.run_process(['./ap.sh', response], cwd=dir, dump=True)
if self.httpd:
self.httpd.shutdown()
self.thread.join()
def trigger_prompts(self):
"""
Sub-prompts for the "trigger" command.
"""
dir = self.config['base-dir']
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"Tests will be run against a build or collection of builds, selected by:")
print("""\
1. The latest build
2. Build URL
3. Build ID
4. Date/date-time range\
""")
highest = 4
choice = 0
while (choice < 1 or choice > highest):
response = raw_input(
"Build selection type? (1-%d) " % highest).strip()
try:
choice = int(response)
except ValueError:
self.build_obj.log(logging.ERROR, "autophone", {},
- "Enter a number between 1 and %d" % highest)
+ "Enter a number between 1 and %d" % highest)
choice = 0
if choice == 1:
options = ["latest"]
elif choice == 2:
url = raw_input(
"Enter url of build to test; may be an http or file schema ").strip()
options = ["--build-url=%s" % url]
elif choice == 3:
response = raw_input(
"Enter Build ID, eg 20120403063158 ").strip()
options = [response]
elif choice == 4:
start = raw_input(
- "Enter start build date or date-time, e.g. 2012-04-03 or 2012-04-03T06:31:58 ").strip()
+ "Enter start build date or date-time, "
+ "e.g. 2012-04-03 or 2012-04-03T06:31:58 ").strip()
end = raw_input(
- "Enter end build date or date-time, e.g. 2012-04-03 or 2012-04-03T06:31:58 ").strip()
+ "Enter end build date or date-time, "
+ "e.g. 2012-04-03 or 2012-04-03T06:31:58 ").strip()
options = [start, end]
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"You may optionally specify a repository name like 'mozilla-inbound' or 'try'.")
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"If not specified, 'mozilla-central' is assumed.")
repo = raw_input(
"Enter repository name: ").strip()
if len(repo) > 0:
options.extend(["--repo=%s" % repo])
if repo == "mozilla-central" or repo == "mozilla-aurora" or len(repo) < 1:
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"You may optionally specify the build location, like 'nightly' or 'tinderbox'.")
location = raw_input(
"Enter build location: ").strip()
if len(location) > 0:
options.extend(["--build-location=%s" % location])
else:
options.extend(["--build-location=tinderbox"])
cmd = [self.auto_virtualenv_manager.python_path, "trigger_runs.py"]
cmd.extend(options)
- self.build_obj.log(logging.INFO, "autophone", {},
+ self.build_obj.log(
+ logging.INFO, "autophone", {},
"Triggering...Tests will run once builds have been downloaded.")
self.build_obj.log(logging.INFO, "autophone", {},
- "Use 'autophone-status' to check progress.")
+ "Use 'autophone-status' to check progress.")
self.run_process(cmd, cwd=dir, dump=True)
def launch_webserver(self, addr, port):
"""
Launch the webserver (in a separate thread).
"""
self.build_obj.log(logging.INFO, "autophone", {},
- "Launching webserver...")
+ "Launching webserver...")
self.webserver_addr = addr
self.webserver_port = port
self.threadweb = threading.Thread(target=self.run_webserver)
self.threadweb.start()
def run_webserver(self):
class AutoHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# A simple request handler with logging suppressed.
+
def log_message(self, format, *args):
pass
os.chdir(self.config['base-dir'])
address = (self.webserver_addr, self.webserver_port)
self.httpd = BaseHTTPServer.HTTPServer(address, AutoHTTPRequestHandler)
try:
self.httpd.serve_forever()
@@ -596,19 +628,19 @@ 4. Date/date-time range\
def run_process(self, cmd, cwd=None, dump=False):
def _processOutput(line):
if self.verbose or dump:
print(line)
if self.verbose:
self.build_obj.log(logging.INFO, "autophone", {},
- "Running '%s' in '%s'" % (cmd, cwd))
+ "Running '%s' in '%s'" % (cmd, cwd))
proc = ProcessHandler(cmd, cwd=cwd, processOutputLine=_processOutput,
- processStderrLine=_processOutput)
+ processStderrLine=_processOutput)
proc.run()
proc_complete = False
try:
proc.wait()
if proc.proc.returncode == 0:
proc_complete = True
except:
if proc.poll() is None:
--- a/testing/mozbase/mozrunner/mozrunner/devices/base.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/base.py
@@ -11,16 +11,17 @@ import socket
import subprocess
import tempfile
import time
import traceback
from mozdevice import DMError
from mozprocess import ProcessHandler
+
class Device(object):
connected = False
logcat_proc = None
def __init__(self, app_ctx, logdir=None, serial=None, restore=True):
self.app_ctx = app_ctx
self.dm = self.app_ctx.dm
self.restore = restore
@@ -42,18 +43,18 @@ class Device(object):
self.dm.getFile(remote_ini, local_ini.name)
cfg = ConfigParser()
cfg.read(local_ini.name)
profiles = []
for section in cfg.sections():
if cfg.has_option(section, 'Path'):
if cfg.has_option(section, 'IsRelative') and cfg.getint(section, 'IsRelative'):
- profiles.append(posixpath.join(posixpath.dirname(remote_ini), \
- cfg.get(section, 'Path')))
+ profiles.append(posixpath.join(posixpath.dirname(remote_ini),
+ cfg.get(section, 'Path')))
else:
profiles.append(cfg.get(section, 'Path'))
return profiles
def pull_minidumps(self):
"""
Saves any minidumps found in the remote profile on the local filesystem.
@@ -76,17 +77,17 @@ class Device(object):
"""
self.dm.remount()
if self.dm.dirExists(self.app_ctx.remote_profile):
self.dm.shellCheckOutput(['rm', '-r', self.app_ctx.remote_profile])
self.dm.pushDir(profile.profile, self.app_ctx.remote_profile)
- timeout = 5 # seconds
+ timeout = 5 # seconds
starttime = datetime.datetime.now()
while datetime.datetime.now() - starttime < datetime.timedelta(seconds=timeout):
if self.dm.fileExists(self.app_ctx.remote_profiles_ini):
break
time.sleep(1)
else:
print "timed out waiting for profiles.ini"
@@ -109,32 +110,35 @@ class Device(object):
# Ideally all applications would read the profile the same way, but in practice
# this isn't true. Perform application specific profile-related setup if necessary.
if hasattr(self.app_ctx, 'setup_profile'):
for remote_path in self.app_ctx.remote_backup_files:
self.backup_file(remote_path)
self.app_ctx.setup_profile(profile)
def _get_online_devices(self):
- return [d[0] for d in self.dm.devices() if d[1] != 'offline' if not d[0].startswith('emulator')]
+ return [d[0] for d in self.dm.devices()
+ if d[1] != 'offline'
+ if not d[0].startswith('emulator')]
def connect(self):
"""
Connects to a running device. If no serial was specified in the
constructor, defaults to the first entry in `adb devices`.
"""
if self.connected:
return
if self.serial:
serial = self.serial
else:
online_devices = self._get_online_devices()
if not online_devices:
- raise IOError("No devices connected. Ensure the device is on and remote debugging via adb is enabled in the settings.")
+ raise IOError("No devices connected. Ensure the device is on and "
+ "remote debugging via adb is enabled in the settings.")
serial = online_devices[0]
self.dm._deviceSerial = serial
self.dm.connect()
self.connected = True
if self.logdir:
# save logcat
@@ -171,17 +175,17 @@ class Device(object):
:param busybox: Path to busybox binary to install.
"""
self.dm.remount()
print 'pushing %s' % self.app_ctx.remote_busybox
self.dm.pushFile(busybox, self.app_ctx.remote_busybox, retryLimit=10)
# TODO for some reason using dm.shellCheckOutput doesn't work,
# while calling adb shell directly does.
args = [self.app_ctx.adb, '-s', self.dm._deviceSerial,
- 'shell', 'cd /system/bin; chmod 555 busybox;' \
+ 'shell', 'cd /system/bin; chmod 555 busybox;'
'for x in `./busybox --list`; do ln -s ./busybox $x; done']
adb = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
adb.wait()
self.dm._verifyZip()
def setup_port_forwarding(self, local_port=None, remote_port=2828):
"""
Set up TCP port forwarding to the specified port on the device,
@@ -189,29 +193,30 @@ class Device(object):
:param local_port: The local port to forward from, if unspecified a
random port is chosen.
:param remote_port: The remote port to forward to, defaults to 2828.
:returns: The local_port being forwarded.
"""
if not local_port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("",0))
+ s.bind(("", 0))
local_port = s.getsockname()[1]
s.close()
self.dm.forward('tcp:%d' % int(local_port), 'tcp:%d' % int(remote_port))
return local_port
def wait_for_net(self):
active = False
time_out = 0
while not active and time_out < 40:
- proc = subprocess.Popen([self.app_ctx.adb, 'shell', '/system/bin/netcfg'], stdout=subprocess.PIPE)
- proc.stdout.readline() # ignore first line
+ proc = subprocess.Popen([self.app_ctx.adb, 'shell', '/system/bin/netcfg'],
+ stdout=subprocess.PIPE)
+ proc.stdout.readline() # ignore first line
line = proc.stdout.readline()
while line != "":
if (re.search(r'UP\s+[1-9]\d{0,2}\.\d{1,3}\.\d{1,3}\.\d{1,3}', line)):
active = True
break
line = proc.stdout.readline()
time_out += 1
time.sleep(1)
@@ -255,17 +260,18 @@ class Device(object):
return
self.dm.remount()
# Restore the original profile
for added_file in self.added_files:
self.dm.removeFile(added_file)
for backup_file in self.backup_files:
- if self.dm.fileExists('%s.orig' % backup_file) or self.dm.dirExists('%s.orig' % backup_file):
+ if self.dm.fileExists('%s.orig' % backup_file) or \
+ self.dm.dirExists('%s.orig' % backup_file):
self.dm.moveTree('%s.orig' % backup_file, backup_file)
# Perform application specific profile cleanup if necessary
if hasattr(self.app_ctx, 'cleanup_profile'):
self.app_ctx.cleanup_profile()
# Remove the test profile
self.dm.removeDir(self.app_ctx.remote_profile)
@@ -281,21 +287,20 @@ class Device(object):
basename = basename[:-len('.1')]
basename = '%s.%d.log' % (basename, index)
destlog = os.path.join(self.logdir, basename)
if os.path.isfile(destlog):
if index == 3:
os.remove(destlog)
else:
- self._rotate_log(destlog, index+1)
+ self._rotate_log(destlog, index + 1)
shutil.move(srclog, destlog)
-
class ProfileConfigParser(RawConfigParser):
"""
Class to create profiles.ini config files
Subclass of RawConfigParser that outputs .ini files in the exact
format expected for profiles.ini, which is slightly different
than the default format.
"""
@@ -313,9 +318,8 @@ class ProfileConfigParser(RawConfigParse
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = "=".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
-
--- a/testing/mozbase/mozrunner/mozrunner/devices/emulator.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/emulator.py
@@ -15,18 +15,19 @@ from mozprocess import ProcessHandler
from .base import Device
from .emulator_battery import EmulatorBattery
from .emulator_geo import EmulatorGeo
from .emulator_screen import EmulatorScreen
from ..errors import TimeoutException
class ArchContext(object):
+
def __init__(self, arch, context, binary=None, avd=None, extra_args=None):
- homedir = getattr(context,'homedir', '')
+ homedir = getattr(context, 'homedir', '')
kernel = os.path.join(homedir, 'prebuilts', 'qemu-kernel', '%s', '%s')
sysdir = os.path.join(homedir, 'out', 'target', 'product', '%s')
self.extra_args = []
self.binary = os.path.join(context.bindir or '', 'emulator')
if arch == 'x86':
self.binary = os.path.join(context.bindir or '', 'emulator-x86')
self.kernel = kernel % ('x86', 'kernel-qemu')
self.sysdir = sysdir % 'generic_x86'
@@ -44,16 +45,17 @@ class ArchContext(object):
if binary:
self.binary = binary
if extra_args:
self.extra_args.extend(extra_args)
class SDCard(object):
+
def __init__(self, emulator, size):
self.emulator = emulator
self.path = self.create_sdcard(size)
def create_sdcard(self, sdcard_size):
"""
Creates an sdcard partition in the emulator.
@@ -133,17 +135,17 @@ class BaseEmulator(Device):
'timed out waiting for emulator to start')
devices = set(self._get_online_devices())
devices = devices - original_devices
self.serial = devices.pop()
self.connect()
def _get_online_devices(self):
return [d[0] for d in self.dm.devices() if d[1] != 'offline' if
- d[0].startswith('emulator')]
+ d[0].startswith('emulator')]
def connect(self):
"""
Connects to a running device. If no serial was specified in the
constructor, defaults to the first entry in `adb devices`.
"""
if self.connected:
return
@@ -186,16 +188,17 @@ class BaseEmulator(Device):
def __del__(self):
if self.telnet:
self.telnet.write('exit\n')
self.telnet.read_all()
class Emulator(BaseEmulator):
+
def __init__(self, app_ctx, arch, resolution=None, sdcard=None, userdata=None,
no_window=None, binary=None, **kwargs):
super(Emulator, self).__init__(app_ctx, arch=arch, binary=binary, **kwargs)
# emulator args
self.resolution = resolution or '320x480'
self._sdcard_size = sdcard
self._sdcard = None
@@ -212,21 +215,21 @@ class Emulator(BaseEmulator):
@property
def args(self):
"""
Arguments to pass into the emulator binary.
"""
qemu_args = super(Emulator, self).args
qemu_args.extend([
- '-kernel', self.arch.kernel,
- '-sysdir', self.arch.sysdir,
- '-data', self.userdata.name,
- '-initdata', self.initdata,
- '-wipe-data'])
+ '-kernel', self.arch.kernel,
+ '-sysdir', self.arch.sysdir,
+ '-data', self.userdata.name,
+ '-initdata', self.initdata,
+ '-wipe-data'])
if self.no_window:
qemu_args.append('-no-window')
if self.sdcard:
qemu_args.extend(['-sdcard', self.sdcard])
qemu_args.extend(['-memory', '512',
'-partition-size', '512',
'-verbose',
'-skin', self.resolution,
@@ -252,17 +255,19 @@ class Emulator(BaseEmulator):
def cleanup(self):
"""
Cleans up and kills the emulator, if it was started by mozrunner.
"""
super(Emulator, self).cleanup()
# Remove temporary files
self.userdata.close()
+
class EmulatorAVD(BaseEmulator):
+
def __init__(self, app_ctx, binary, avd, port=5554, **kwargs):
super(EmulatorAVD, self).__init__(app_ctx, binary=binary, avd=avd, **kwargs)
self.port = port
@property
def args(self):
"""
Arguments to pass into the emulator binary.
--- a/testing/mozbase/mozrunner/mozrunner/devices/emulator_battery.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/emulator_battery.py
@@ -1,12 +1,13 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
class EmulatorBattery(object):
def __init__(self, emulator):
self.emulator = emulator
def get_state(self):
status = {}
state = {}
@@ -45,9 +46,8 @@ class EmulatorBattery(object):
if charging:
cmd = 'power ac on'
else:
cmd = 'power ac off'
self.emulator._run_telnet(cmd)
charging = property(get_charging, set_charging)
level = property(get_level, set_level)
-
--- a/testing/mozbase/mozrunner/mozrunner/devices/emulator_geo.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/emulator_geo.py
@@ -1,17 +1,17 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
class EmulatorGeo(object):
def __init__(self, emulator):
self.emulator = emulator
def set_default_location(self):
self.lon = -122.08769
self.lat = 37.41857
self.set_location(self.lon, self.lat)
def set_location(self, lon, lat):
self.emulator._run_telnet('geo fix %0.5f %0.5f' % (self.lon, self.lat))
-
--- a/testing/mozbase/mozrunner/mozrunner/devices/emulator_screen.py
+++ b/testing/mozbase/mozrunner/mozrunner/devices/emulator_screen.py
@@ -1,12 +1,13 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
class EmulatorScreen(object):
"""Class for screen related emulator commands."""
SO_PORTRAIT_PRIMARY = 'portrait-primary'
SO_PORTRAIT_SECONDARY = 'portrait-secondary'
SO_LANDSCAPE_PRIMARY = 'landscape-primary'
SO_LANDSCAPE_SECONDARY = 'landscape-secondary'
@@ -81,9 +82,8 @@ class EmulatorScreen(object):
SCREEN_ORIENTATIONS = {"portrait": EmulatorScreen.SO_PORTRAIT_PRIMARY,
"landscape": EmulatorScreen.SO_LANDSCAPE_PRIMARY,
"portrait-primary": EmulatorScreen.SO_PORTRAIT_PRIMARY,
"landscape-primary": EmulatorScreen.SO_LANDSCAPE_PRIMARY,
"portrait-secondary": EmulatorScreen.SO_PORTRAIT_SECONDARY,
"landscape-secondary": EmulatorScreen.SO_LANDSCAPE_SECONDARY}
-
--- a/testing/mozbase/mozrunner/mozrunner/errors.py
+++ b/testing/mozbase/mozrunner/mozrunner/errors.py
@@ -1,13 +1,16 @@
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
+
class RunnerException(Exception):
"""Base exception handler for mozrunner related errors"""
+
class RunnerNotStartedError(RunnerException):
"""Exception handler in case the runner hasn't been started"""
+
class TimeoutException(RunnerException):
"""Raised on timeout waiting for targets to start."""
--- a/testing/mozbase/mozrunner/mozrunner/runners.py
+++ b/testing/mozbase/mozrunner/mozrunner/runners.py
@@ -100,36 +100,36 @@ def FennecEmulatorRunner(avd='mozemulato
binary=None,
app='org.mozilla.fennec',
**kwargs):
"""
Create a Fennec emulator runner. This can either start a new emulator
(which will use an avd), or connect to an already-running emulator.
:param avd: name of an AVD available in your environment.
- Typically obtained via tooltool: either 'mozemulator-4.3' or 'mozemulator-x86'. Defaults to 'mozemulator-4.3'
+ Typically obtained via tooltool: either 'mozemulator-4.3' or 'mozemulator-x86'.
+ Defaults to 'mozemulator-4.3'
:param avd_home: Path to avd parent directory
:param logdir: Path to save logfiles such as logcat and qemu output.
:param serial: Serial of emulator to connect to as seen in `adb devices`.
Defaults to the first entry in `adb devices`.
:param binary: Path to emulator binary.
Defaults to None, which causes the device_class to guess based on PATH.
:param app: Name of Fennec app (often org.mozilla.fennec_$USER)
Defaults to 'org.mozilla.fennec'
:param cmdargs: Arguments to pass into binary.
:returns: A DeviceRunner for Android emulators.
"""
kwargs['app_ctx'] = get_app_context('fennec')(app, adb_path=adb_path,
avd_home=avd_home)
- device_args = { 'app_ctx': kwargs['app_ctx'],
- 'avd': avd,
- 'binary': binary,
- 'serial': serial,
- 'logdir': logdir
- }
+ device_args = {'app_ctx': kwargs['app_ctx'],
+ 'avd': avd,
+ 'binary': binary,
+ 'serial': serial,
+ 'logdir': logdir}
return FennecRunner(device_class=EmulatorAVD,
device_args=device_args,
**kwargs)
def B2GEmulatorRunner(arch='arm',
b2g_home=None,
adb_path=None,
@@ -154,28 +154,29 @@ def B2GEmulatorRunner(arch='arm',
:param env: Environment variables to pass into the b2g.sh process.
:param clean_profile: If True, restores profile back to original state.
:param process_class: Class used to launch the b2g.sh process.
:param process_args: Arguments to pass into the b2g.sh process.
:param symbols_path: Path to symbol files used for crash analysis.
:returns: A DeviceRunner for B2G emulators.
"""
kwargs['app_ctx'] = get_app_context('b2g')(b2g_home, adb_path=adb_path)
- device_args = { 'app_ctx': kwargs['app_ctx'],
- 'arch': arch,
- 'binary': binary,
- 'resolution': resolution,
- 'sdcard': sdcard,
- 'userdata': userdata,
- 'no_window': no_window,
- 'logdir': logdir }
+ device_args = {'app_ctx': kwargs['app_ctx'],
+ 'arch': arch,
+ 'binary': binary,
+ 'resolution': resolution,
+ 'sdcard': sdcard,
+ 'userdata': userdata,
+ 'no_window': no_window,
+ 'logdir': logdir}
return DeviceRunner(device_class=Emulator,
device_args=device_args,
**kwargs)
+
def B2GDeviceRunner(b2g_home=None,
adb_path=None,
logdir=None,
serial=None,
**kwargs):
"""
Create a B2G device runner.
@@ -186,26 +187,25 @@ def B2GDeviceRunner(b2g_home=None,
:param env: Environment variables to pass into the b2g.sh process.
:param clean_profile: If True, restores profile back to original state.
:param process_class: Class used to launch the b2g.sh process.
:param process_args: Arguments to pass into the b2g.sh process.
:param symbols_path: Path to symbol files used for crash analysis.
:returns: A DeviceRunner for B2G devices.
"""
kwargs['app_ctx'] = get_app_context('b2g')(b2g_home, adb_path=adb_path)
- device_args = { 'app_ctx': kwargs['app_ctx'],
- 'logdir': logdir,
- 'serial': serial }
+ device_args = {'app_ctx': kwargs['app_ctx'],
+ 'logdir': logdir,
+ 'serial': serial}
return DeviceRunner(device_class=Device,
device_args=device_args,
**kwargs)
runners = {
- 'default': Runner,
- 'b2g_desktop': B2GDesktopRunner,
- 'b2g_emulator': B2GEmulatorRunner,
- 'b2g_device': B2GDeviceRunner,
- 'firefox': FirefoxRunner,
- 'thunderbird': ThunderbirdRunner,
- 'fennec': FennecEmulatorRunner
+ 'default': Runner,
+ 'b2g_desktop': B2GDesktopRunner,
+ 'b2g_emulator': B2GEmulatorRunner,
+ 'b2g_device': B2GDeviceRunner,
+ 'firefox': FirefoxRunner,
+ 'thunderbird': ThunderbirdRunner,
+ 'fennec': FennecEmulatorRunner
}
-
--- a/testing/mozbase/mozrunner/mozrunner/utils.py
+++ b/testing/mozbase/mozrunner/mozrunner/utils.py
@@ -1,36 +1,36 @@
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""Utility functions for mozrunner"""
-__all__ = ['findInPath', 'get_metadata_from_egg', 'uses_marionette']
-
-
-from functools import wraps
import mozinfo
import os
import sys
+__all__ = ['findInPath', 'get_metadata_from_egg']
-### python package method metadata by introspection
+
+# python package method metadata by introspection
try:
import pkg_resources
+
def get_metadata_from_egg(module):
ret = {}
try:
dist = pkg_resources.get_distribution(module)
except pkg_resources.DistributionNotFound:
return {}
if dist.has_metadata("PKG-INFO"):
key = None
+ value = ""
for line in dist.get_metadata("PKG-INFO").splitlines():
# see http://www.python.org/dev/peps/pep-0314/
if key == 'Description':
# descriptions can be long
if not line or line[0].isspace():
value += '\n' + line
continue
else:
@@ -181,17 +181,17 @@ def test_environment(xrePath, env=None,
else:
message = message % 'default memory'
if lsanPath:
log.info("LSan enabled.")
asanOptions.append('detect_leaks=1')
lsanOptions = ["exitcode=0"]
# Uncomment out the next line to report the addresses of leaked objects.
- #lsanOptions.append("report_objects=1")
+ # lsanOptions.append("report_objects=1")
suppressionsFile = os.path.join(
lsanPath, 'lsan_suppressions.txt')
if os.path.exists(suppressionsFile):
log.info("LSan using suppression file " + suppressionsFile)
lsanOptions.append("suppressions=" + suppressionsFile)
else:
log.info("WARNING | runtests.py | LSan suppressions file"
" does not exist! " + suppressionsFile)
@@ -231,44 +231,49 @@ def get_stack_fixer_function(utilityPath
A stack fixing function checks if a line conforms to the output from
MozFormatCodeAddressDetails. If the line does not, the line is returned
unchanged. If the line does, an attempt is made to convert the
file+offset into something human-readable (e.g. a function name).
"""
if not mozinfo.info.get('debug'):
return None
- stack_fixer_function = None
-
def import_stack_fixer_module(module_name):
sys.path.insert(0, utilityPath)
module = __import__(module_name, globals(), locals(), [])
sys.path.pop(0)
return module
if symbolsPath and os.path.exists(symbolsPath):
- # Run each line through a function in fix_stack_using_bpsyms.py (uses breakpad symbol files).
+ # Run each line through a function in fix_stack_using_bpsyms.py (uses breakpad
+ # symbol files).
# This method is preferred for Tinderbox builds, since native
# symbols may have been stripped.
stack_fixer_module = import_stack_fixer_module(
'fix_stack_using_bpsyms')
- stack_fixer_function = lambda line: stack_fixer_module.fixSymbols(
- line, symbolsPath)
+
+ def stack_fixer_function(line):
+ return stack_fixer_module.fixSymbols(line, symbolsPath)
elif mozinfo.isMac:
# Run each line through fix_macosx_stack.py (uses atos).
# This method is preferred for developer machines, so we don't
# have to run "make buildsymbols".
stack_fixer_module = import_stack_fixer_module(
'fix_macosx_stack')
- stack_fixer_function = lambda line: stack_fixer_module.fixSymbols(
- line)
+
+ def stack_fixer_function(line):
+ return stack_fixer_module.fixSymbols(line)
elif mozinfo.isLinux:
# Run each line through fix_linux_stack.py (uses addr2line).
# This method is preferred for developer machines, so we don't
# have to run "make buildsymbols".
stack_fixer_module = import_stack_fixer_module(
'fix_linux_stack')
- stack_fixer_function = lambda line: stack_fixer_module.fixSymbols(
- line)
+
+ def stack_fixer_function(line):
+ return stack_fixer_module.fixSymbols(line)
+
+ else:
+ return None
return stack_fixer_function
--- a/testing/mozbase/mozrunner/setup.py
+++ b/testing/mozbase/mozrunner/setup.py
@@ -37,19 +37,18 @@ setup(name=PACKAGE_NAME,
],
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL 2.0',
packages=find_packages(),
package_data={'mozrunner': [
- 'resources/metrotestharness.exe'
+ 'resources/metrotestharness.exe'
]},
zip_safe=False,
install_requires=deps,
extras_require=EXTRAS_REQUIRE,
entry_points="""
# -*- Entry points: -*-
[console_scripts]
mozrunner = mozrunner:cli
- """,
- )
+ """)
--- a/testing/mozbase/mozrunner/tests/mozrunnertest.py
+++ b/testing/mozbase/mozrunner/tests/mozrunnertest.py
@@ -10,17 +10,17 @@ import mozrunner
@unittest.skipIf(not os.environ.get('BROWSER_PATH'),
'No binary has been specified.')
class MozrunnerTestCase(unittest.TestCase):
def setUp(self):
self.pids = []
- self.threads = [ ]
+ self.threads = []
self.profile = mozprofile.FirefoxProfile()
self.runner = mozrunner.FirefoxRunner(os.environ['BROWSER_PATH'],
profile=self.profile)
def tearDown(self):
for thread in self.threads:
thread.join()
--- a/testing/mozbase/mozrunner/tests/test_interactive.py
+++ b/testing/mozbase/mozrunner/tests/test_interactive.py
@@ -2,16 +2,17 @@
import threading
from time import sleep
import mozrunnertest
class RunnerThread(threading.Thread):
+
def __init__(self, runner, timeout=10):
threading.Thread.__init__(self)
self.runner = runner
self.timeout = timeout
def run(self):
sleep(self.timeout)
self.runner.stop()
--- a/testing/mozbase/mozrunner/tests/test_threads.py
+++ b/testing/mozbase/mozrunner/tests/test_threads.py
@@ -5,16 +5,17 @@
import threading
from time import sleep
import mozrunnertest
class RunnerThread(threading.Thread):
+
def __init__(self, runner, do_start, timeout=10):
threading.Thread.__init__(self)
self.runner = runner
self.timeout = timeout
self.do_start = do_start
def run(self):
sleep(self.timeout)
--- a/testing/mozbase/mozscreenshot/setup.py
+++ b/testing/mozbase/mozscreenshot/setup.py
@@ -9,17 +9,17 @@ PACKAGE_NAME = 'mozscreenshot'
PACKAGE_VERSION = '0.1'
setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Library for taking screenshots in tests harness",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=['mozscreenshot'],
zip_safe=False,
install_requires=['mozlog', 'mozinfo'],
--- a/testing/mozbase/mozsystemmonitor/mozsystemmonitor/resourcemonitor.py
+++ b/testing/mozbase/mozsystemmonitor/mozsystemmonitor/resourcemonitor.py
@@ -1,45 +1,52 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
+from contextlib import contextmanager
import multiprocessing
import sys
import time
import warnings
from collections import (
OrderedDict,
namedtuple,
)
+
class PsutilStub(object):
+
def __init__(self):
self.sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
'sout'])
self.sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'read_time', 'write_time'])
self.pcputimes = namedtuple('pcputimes', ['user', 'system'])
self.svmem = namedtuple(
'svmem', ['total', 'available', 'percent', 'used', 'free',
'active', 'inactive', 'buffers', 'cached'])
def cpu_percent(self, a, b):
return [0]
+
def cpu_times(self, percpu):
if percpu:
return [self.pcputimes(0, 0)]
else:
return self.pcputimes(0, 0)
+
def disk_io_counters(self):
return self.sdiskio(0, 0, 0, 0, 0, 0)
+
def swap_memory(self):
return self.sswap(0, 0, 0, 0, 0, 0)
+
def virtual_memory(self):
return self.svmem(0, 0, 0, 0, 0, 0, 0, 0, 0)
# psutil will raise NotImplementedError if the platform is not supported.
try:
import psutil
have_psutil = True
@@ -47,17 +54,16 @@ except Exception:
try:
# The PsutilStub should get us time intervals, at least
psutil = PsutilStub()
except Exception:
psutil = None
have_psutil = False
-from contextlib import contextmanager
def get_disk_io_counters():
try:
io_counters = psutil.disk_io_counters()
except RuntimeError:
io_counters = []
return io_counters
@@ -98,42 +104,43 @@ def _collect(pipe, poll_interval):
# TODO Does this wrap? At 32 bits? At 64 bits?
# TODO Consider patching "delta" API to upstream.
io_diff = [v - io_last[i] for i, v in enumerate(io)]
io_last = io
cpu_diff = []
for core, values in enumerate(cpu_times):
cpu_diff.append([v - cpu_last[core][i] for i, v in
- enumerate(values)])
+ enumerate(values)])
cpu_last = cpu_times
swap_entry = list(swap_mem)
swap_entry[sin_index] = swap_mem.sin - swap_last.sin
swap_entry[sout_index] = swap_mem.sout - swap_last.sout
swap_last = swap_mem
data.append((last_time, measured_end_time, io_diff, cpu_diff,
- cpu_percent, list(virt_mem), swap_entry))
+ cpu_percent, list(virt_mem), swap_entry))
collection_overhead = time.time() - last_time - poll_interval
last_time = measured_end_time
sleep_interval = max(0, poll_interval - collection_overhead)
for entry in data:
pipe.send(entry)
pipe.send(('done', None, None, None, None, None, None))
pipe.close()
sys.exit(0)
SystemResourceUsage = namedtuple('SystemResourceUsage',
- ['start', 'end', 'cpu_times', 'cpu_percent', 'io', 'virt', 'swap'])
+ ['start', 'end',
+ 'cpu_times', 'cpu_percent', 'io', 'virt', 'swap'])
class SystemResourceMonitor(object):
"""Measures system resources.
Each instance measures system resources from the time it is started
until it is finished. It does this on a separate process so it doesn't
impact execution of the main Python process.
@@ -242,17 +249,17 @@ class SystemResourceMonitor(object):
self._virt_type = type(virt)
self._virt_len = len(virt)
self._swap_type = type(swap)
self._swap_len = len(swap)
self._pipe, child_pipe = multiprocessing.Pipe(True)
self._process = multiprocessing.Process(None, _collect,
- args=(child_pipe, poll_interval))
+ args=(child_pipe, poll_interval))
def __del__(self):
if self._running:
self._pipe.send(('terminate',))
self._process.join()
# Methods to control monitoring.
@@ -305,17 +312,17 @@ class SystemResourceMonitor(object):
break
io = self._io_type(*io_diff)
virt = self._virt_type(*virt_mem)
swap = self._swap_type(*swap_mem)
cpu_times = [self._cpu_times_type(*v) for v in cpu_diff]
self.measurements.append(SystemResourceUsage(start_time, end_time,
- cpu_times, cpu_percent, io, virt, swap))
+ cpu_times, cpu_percent, io, virt, swap))
# We establish a timeout so we don't hang forever if the child
# process has crashed.
self._process.join(10)
if self._process.is_alive():
self._process.terminate()
self._process.join(10)
else:
@@ -423,17 +430,17 @@ class SystemResourceMonitor(object):
raise Exception('Could not find start event: %s' % start_event)
if end_time is None:
raise Exception('Could not find end event: %s' % end_event)
return self.range_usage(start_time, end_time)
def aggregate_cpu_percent(self, start=None, end=None, phase=None,
- per_cpu=True):
+ per_cpu=True):
"""Obtain the aggregate CPU percent usage for a range.
Returns a list of floats representing average CPU usage percentage per
core if per_cpu is True (the default). If per_cpu is False, return a
single percentage value.
By default this will return data for the entire instrumented interval.
If phase is defined, data for a named phase will be returned. If start
@@ -458,17 +465,17 @@ class SystemResourceMonitor(object):
if per_cpu:
return [sum(x) / samples for x in cpu]
cores = [sum(x) for x in cpu]
return sum(cores) / len(cpu) / samples
def aggregate_cpu_times(self, start=None, end=None, phase=None,
- per_cpu=True):
+ per_cpu=True):
"""Obtain the aggregate CPU times for a range.
If per_cpu is True (the default), this returns a list of named tuples.
Each tuple is as if it were returned by psutil.cpu_times(). If per_cpu
is False, this returns a single named tuple of the aforementioned type.
"""
empty = [0 for i in range(0, self._cpu_times_len)]
cpu = [list(empty) for i in range(0, self._cpu_cores)]
--- a/testing/mozbase/mozsystemmonitor/mozsystemmonitor/test/test_resource_monitor.py
+++ b/testing/mozbase/mozsystemmonitor/mozsystemmonitor/test/test_resource_monitor.py
@@ -14,16 +14,17 @@ except ImportError:
from mozsystemmonitor.resourcemonitor import (
SystemResourceMonitor,
SystemResourceUsage,
)
@unittest.skipIf(psutil is None, 'Resource monitor requires psutil.')
class TestResourceMonitor(unittest.TestCase):
+
def test_basic(self):
monitor = SystemResourceMonitor(poll_interval=0.5)
monitor.start()
time.sleep(3)
monitor.stop()
--- a/testing/mozbase/moztest/moztest/__init__.py
+++ b/testing/mozbase/moztest/moztest/__init__.py
@@ -1,5 +1,7 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import adapters
+
+__all__ = ['adapters']
--- a/testing/mozbase/moztest/moztest/adapters/__init__.py
+++ b/testing/mozbase/moztest/moztest/adapters/__init__.py
@@ -1,5 +1,7 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unit
+
+__all__ = ['unit']
--- a/testing/mozbase/moztest/moztest/adapters/unit.py
+++ b/testing/mozbase/moztest/moztest/adapters/unit.py
@@ -11,40 +11,44 @@ try:
from unittest import TextTestResult
except ImportError:
# bug 971243 - python 2.6 compatibilty
from unittest import _TextTestResult as TextTestResult
"""Adapter used to output structuredlog messages from unittest
testsuites"""
+
def get_test_class_name(test):
"""
This method is used to return the full class name from a
:class:`unittest.TestCase` instance.
It is used as a default to define the "class_name" extra value
passed in structured loggers. You can override the default by
implementing a "get_test_class_name" method on you TestCase subclass.
"""
return "%s.%s" % (test.__class__.__module__,
test.__class__.__name__)
+
def get_test_method_name(test):
"""
This method is used to return the full method name from a
:class:`unittest.TestCase` instance.
It is used as a default to define the "method_name" extra value
passed in structured loggers. You can override the default by
implementing a "get_test_method_name" method on you TestCase subclass.
"""
return test._testMethodName
+
class StructuredTestResult(TextTestResult):
+
def __init__(self, *args, **kwargs):
self.logger = kwargs.pop('logger')
self.test_list = kwargs.pop("test_list", [])
self.result_callbacks = kwargs.pop('result_callbacks', [])
self.passed = 0
self.testsRun = 0
TextTestResult.__init__(self, *args, **kwargs)
@@ -128,31 +132,31 @@ class StructuredTestResult(TextTestResul
expected="PASS",
stack=self._extract_stacktrace(err, test),
extra=extra)
def addFailure(self, test, err):
extra = self.call_callbacks(test, "ERROR")
extra.update(self._get_class_method_name(test))
self.logger.test_end(test.id(),
- "FAIL",
+ "FAIL",
message=self._extract_err_message(err),
expected="PASS",
stack=self._extract_stacktrace(err, test),
extra=extra)
def addSuccess(self, test):
extra = self._get_class_method_name(test)
self.logger.test_end(test.id(), "PASS", expected="PASS", extra=extra)
def addExpectedFailure(self, test, err):
extra = self.call_callbacks(test, "ERROR")
extra.update(self._get_class_method_name(test))
self.logger.test_end(test.id(),
- "FAIL",
+ "FAIL",
message=self._extract_err_message(err),
expected="FAIL",
stack=self._extract_stacktrace(err, test),
extra=extra)
def addUnexpectedSuccess(self, test):
extra = self.call_callbacks(test, "ERROR")
extra.update(self._get_class_method_name(test))
--- a/testing/mozbase/moztest/moztest/output/autolog.py
+++ b/testing/mozbase/moztest/moztest/output/autolog.py
@@ -29,45 +29,45 @@ class AutologOutput(Output):
passed = coll.tests_with_result('PASS')
failed = coll.tests_with_result('UNEXPECTED-FAIL')
unexpected_passes = coll.tests_with_result('UNEXPECTED-PASS')
errors = coll.tests_with_result('ERROR')
skipped = coll.tests_with_result('SKIPPED')
known_fails = coll.tests_with_result('KNOWN-FAIL')
testgroup = RESTfulAutologTestGroup(
- testgroup=context.testgroup,
- os=context.os,
- platform=context.arch,
- harness=context.harness,
- server=self.es_server,
- restserver=self.rest_server,
- machine=context.hostname,
- logfile=context.logfile,
- )
+ testgroup=context.testgroup,
+ os=context.os,
+ platform=context.arch,
+ harness=context.harness,
+ server=self.es_server,
+ restserver=self.rest_server,
+ machine=context.hostname,
+ logfile=context.logfile,
+ )
testgroup.add_test_suite(
testsuite=results_collection.suite_name,
elapsedtime=coll.time_taken,
passed=count(passed),
failed=count(failed) + count(errors) + count(unexpected_passes),
todo=count(skipped) + count(known_fails),
- )
+ )
testgroup.set_primary_product(
tree=context.tree,
revision=context.revision,
productname=context.product,
buildtype=context.buildtype,
- )
+ )
# need to call this again since we already used the generator
for f in coll.tests_with_result('UNEXPECTED-FAIL'):
testgroup.add_test_failure(
test=long_name(f),
text='\n'.join(f.output),
status=f.result,
- )
+ )
testgroups.append(testgroup)
return testgroups
def post(self, data):
msg = "Must pass in a list returned by make_testgroups."
for d in data:
assert isinstance(d, RESTfulAutologTestGroup), msg
d.submit()
--- a/testing/mozbase/moztest/moztest/output/base.py
+++ b/testing/mozbase/moztest/moztest/output/base.py
@@ -10,21 +10,24 @@ from StringIO import StringIO
try:
from abc import abstractmethod
except ImportError:
# abc is python 2.6+
# from https://github.com/mozilla/mozbase/blob/master/mozdevice/mozdevice/devicemanager.py
def abstractmethod(method):
line = method.func_code.co_firstlineno
filename = method.func_code.co_filename
+
def not_implemented(*args, **kwargs):
- raise NotImplementedError('Abstract method %s at File "%s", line %s should be implemented by a concrete class' %
- (repr(method), filename,line))
+ raise NotImplementedError('Abstract method %s at File "%s", '
+ 'line %s should be implemented by a concrete class' %
+ (repr(method), filename, line))
return not_implemented
+
class Output(object):
""" Abstract base class for outputting test results """
@abstractmethod
def serialize(self, results_collection, file_obj):
""" Writes the string representation of the results collection
to the given file object"""
--- a/testing/mozbase/moztest/moztest/output/xunit.py
+++ b/testing/mozbase/moztest/moztest/output/xunit.py
@@ -49,17 +49,17 @@ class XUnitOutput(Output):
t.appendChild(f)
cls = classes[cls_name]
cls.appendChild(t)
doc = dom.Document()
failed = sum([count(results_collection.tests_with_result(t))
- for t in TestResult.FAIL_RESULTS])
+ for t in TestResult.FAIL_RESULTS])
passed = count(results_collection.tests_with_result('PASS'))
skipped = count(results_collection.tests_with_result('SKIPPED'))
assembly = doc.createElement('assembly')
assembly.setAttribute('name', results_collection.suite_name)
assembly.setAttribute('time', str(results_collection.time_taken))
assembly.setAttribute('total', str(len(results_collection)))
assembly.setAttribute('passed', str(passed))
--- a/testing/mozbase/moztest/moztest/results.py
+++ b/testing/mozbase/moztest/moztest/results.py
@@ -78,17 +78,17 @@ class TestResult(object):
test_class = the class that the test belongs to
time_start = timestamp (seconds since UNIX epoch) of when the test started
running; if not provided, defaults to the current time
! Provide 0 if you only have the duration
context = TestContext instance; can be None
result_expected = string representing the expected outcome of the test"""
msg = "Result '%s' not in possible results: %s" %\
- (result_expected, ', '.join(self.POSSIBLE_RESULTS))
+ (result_expected, ', '.join(self.POSSIBLE_RESULTS))
assert isinstance(name, basestring), "name has to be a string"
assert result_expected in self.POSSIBLE_RESULTS, msg
self.name = name
self.test_class = test_class
self.context = context
self.time_start = time_start if time_start is not None else time.time()
self.time_end = None
@@ -154,24 +154,24 @@ class TestResult(object):
def finish(self, result, time_end=None, output=None, reason=None):
""" Marks the test as finished, storing its end time and status
! Provide the duration as time_end if you only have that. """
if result in self.POSSIBLE_RESULTS:
self._result_actual = result
self.result = self.calculate_result(self._result_expected,
- self._result_actual)
+ self._result_actual)
elif result in self.COMPUTED_RESULTS:
self.infer_results(result)
self.result = result
else:
valid = self.POSSIBLE_RESULTS + self.COMPUTED_RESULTS
msg = "Result '%s' not valid. Need one of: %s" %\
- (result, ', '.join(valid))
+ (result, ', '.join(valid))
raise ValueError(msg)
# use lists instead of multiline strings
if isinstance(output, basestring):
output = output.splitlines()
self.time_end = time_end if time_end is not None else time.time()
self.output = output or self.output
@@ -202,17 +202,17 @@ class TestResultCollection(list):
list.__init__(self)
self.suite_name = suite_name
self.time_taken = time_taken
if resultClass is not None:
self.resultClass = resultClass
def __str__(self):
return "%s (%.2fs)\n%s" % (self.suite_name, self.time_taken,
- list.__str__(self))
+ list.__str__(self))
def subset(self, predicate):
tests = self.filter(predicate)
duration = 0
sub = TestResultCollection(self.suite_name)
for t in tests:
sub.append(t)
duration += t.duration
@@ -227,33 +227,33 @@ class TestResultCollection(list):
def filter(self, predicate):
""" Returns a generator of TestResults that satisfy a given predicate """
return (tr for tr in self if predicate(tr))
def tests_with_result(self, result):
""" Returns a generator of TestResults with the given result """
msg = "Result '%s' not in possible results: %s" %\
- (result, ', '.join(self.resultClass.COMPUTED_RESULTS))
+ (result, ', '.join(self.resultClass.COMPUTED_RESULTS))
assert result in self.resultClass.COMPUTED_RESULTS, msg
return self.filter(lambda t: t.result == result)
@property
def tests(self):
""" Generator of all tests in the collection """
return (t for t in self)
def add_result(self, test, result_expected='PASS',
- result_actual='PASS', output='', context=None):
+ result_actual='PASS', output='', context=None):
def get_class(test):
return test.__class__.__module__ + '.' + test.__class__.__name__
t = self.resultClass(name=str(test).split()[0], test_class=get_class(test),
- time_start=0, result_expected=result_expected,
- context=context)
+ time_start=0, result_expected=result_expected,
+ context=context)
t.finish(result_actual, time_end=0, reason=relevant_line(output),
output=output)
self.append(t)
@property
def num_failures(self):
fails = 0
for t in self:
--- a/testing/mozbase/moztest/setup.py
+++ b/testing/mozbase/moztest/setup.py
@@ -8,17 +8,17 @@ PACKAGE_VERSION = '0.7'
# dependencies
deps = ['mozinfo']
setup(name='moztest',
version=PACKAGE_VERSION,
description="Package for storing and outputting Mozilla test results",
long_description="see http://mozbase.readthedocs.org/",
- classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='mozilla',
author='Mozilla Automation and Tools team',
author_email='tools@lists.mozilla.org',
url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase',
license='MPL',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
--- a/testing/mozbase/mozversion/mozversion/__init__.py
+++ b/testing/mozbase/mozversion/mozversion/__init__.py
@@ -1,6 +1,7 @@
+# flake8: noqa
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .errors import *
from .mozversion import cli, get_version
--- a/testing/mozbase/mozversion/mozversion/errors.py
+++ b/testing/mozbase/mozversion/mozversion/errors.py
@@ -1,26 +1,30 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
class VersionError(Exception):
+
def __init__(self, message):
Exception.__init__(self, message)
class AppNotFoundError(VersionError):
"""Exception for the application not found"""
+
def __init__(self, message):
VersionError.__init__(self, message)
class LocalAppNotFoundError(AppNotFoundError):
"""Exception for local application not found"""
+
def __init__(self, path):
AppNotFoundError.__init__(self, 'Application not found at: %s' % path)
class RemoteAppNotFoundError(AppNotFoundError):
"""Exception for remote application not found"""
+
def __init__(self, message):
AppNotFoundError.__init__(self, message)
--- a/testing/mozbase/mozversion/mozversion/mozversion.py
+++ b/testing/mozbase/mozversion/mozversion/mozversion.py
@@ -144,19 +144,19 @@ class B2GVersion(Version):
'-d', tempdir])
with open(gaia_commit) as f:
changeset, date = f.read().splitlines()
self._info['gaia_changeset'] = re.match(
'^\w{40}$', changeset) and changeset or None
self._info['gaia_date'] = date
except KeyError:
- self._logger.warning(
- 'Unable to find resources/gaia_commit.txt in '
- 'application.zip')
+ self._logger.warning(
+ 'Unable to find resources/gaia_commit.txt in '
+ 'application.zip')
finally:
mozfile.remove(tempdir)
class LocalB2GVersion(B2GVersion):
def __init__(self, binary, sources=None, **kwargs):
B2GVersion.__init__(self, sources, **kwargs)
--- a/testing/mozbase/mozversion/tests/test_apk.py
+++ b/testing/mozbase/mozversion/tests/test_apk.py
@@ -8,18 +8,18 @@ import mozfile
import unittest
import zipfile
from mozversion import get_version
class ApkTest(unittest.TestCase):
"""test getting version information from an android .apk"""
- application_changeset = 'a'*40
- platform_changeset = 'b'*40
+ application_changeset = 'a' * 40
+ platform_changeset = 'b' * 40
def create_apk_zipfiles(self, zfile):
zfile.writestr('application.ini',
"""[App]\nSourceStamp=%s\n""" % self.application_changeset)
zfile.writestr('platform.ini',
"""[Build]\nSourceStamp=%s\n""" % self.platform_changeset)
zfile.writestr('AndroidManifest.xml', '')
--- a/testing/mozbase/setup_development.py
+++ b/testing/mozbase/setup_development.py
@@ -25,60 +25,67 @@ except ImportError:
# directory containing this file
here = os.path.dirname(os.path.abspath(__file__))
# all python packages
mozbase_packages = [i for i in os.listdir(here)
if os.path.exists(os.path.join(here, i, 'setup.py'))]
-test_packages = [ "mock" # testing: https://wiki.mozilla.org/Auto-tools/Projects/Mozbase#Tests
- ]
-extra_packages = [ "sphinx" # documentation: https://wiki.mozilla.org/Auto-tools/Projects/Mozbase#Documentation
- ]
+
+# testing: https://wiki.mozilla.org/Auto-tools/Projects/Mozbase#Tests
+test_packages = ["mock"]
+
+# documentation: https://wiki.mozilla.org/Auto-tools/Projects/Mozbase#Documentation
+extra_packages = ["sphinx"]
+
def cycle_check(order, dependencies):
"""ensure no cyclic dependencies"""
order_dict = dict([(j, i) for i, j in enumerate(order)])
for package, deps in dependencies.items():
index = order_dict[package]
for d in deps:
assert index > order_dict[d], "Cyclic dependencies detected"
+
def info(directory):
"get the package setup.py information"
assert os.path.exists(os.path.join(directory, 'setup.py'))
# setup the egg info
try:
- call([sys.executable, 'setup.py', 'egg_info'], cwd=directory, stdout=PIPE)
+ call([sys.executable, 'setup.py', 'egg_info'],
+ cwd=directory, stdout=PIPE)
except subprocess.CalledProcessError:
print "Error running setup.py in %s" % directory
raise
# get the .egg-info directory
egg_info = [entry for entry in os.listdir(directory)
if entry.endswith('.egg-info')]
- assert len(egg_info) == 1, 'Expected one .egg-info directory in %s, got: %s' % (directory, egg_info)
+ assert len(egg_info) == 1, 'Expected one .egg-info directory in %s, got: %s' % (directory,
+ egg_info)
egg_info = os.path.join(directory, egg_info[0])
assert os.path.isdir(egg_info), "%s is not a directory" % egg_info
# read the package information
pkg_info = os.path.join(egg_info, 'PKG-INFO')
info_dict = {}
for line in file(pkg_info).readlines():
if not line or line[0].isspace():
- continue # XXX neglects description
+ continue # XXX neglects description
assert ':' in line
key, value = [i.strip() for i in line.split(':', 1)]
info_dict[key] = value
return info_dict
+
def get_dependencies(directory):
"returns the package name and dependencies given a package directory"
# get the package metadata
info_dict = info(directory)
# get the .egg-info directory
egg_info = [entry for entry in os.listdir(directory)
@@ -94,30 +101,32 @@ def get_dependencies(directory):
# Also lines like [device] are sections to mark optional
# dependencies, we don't want those sections.
if line and not (line.startswith('[') and line.endswith(']')):
dependencies.append(line)
# return the information
return info_dict['Name'], dependencies
+
def dependency_info(dep):
"return dictionary of dependency information from a dependency string"
retval = dict(Name=None, Type=None, Version=None)
for joiner in ('==', '<=', '>='):
if joiner in dep:
retval['Type'] = joiner
name, version = [i.strip() for i in dep.split(joiner, 1)]
retval['Name'] = name
retval['Version'] = version
break
else:
retval['Name'] = dep.strip()
return retval
+
def unroll_dependencies(dependencies):
"""
unroll a set of dependencies to a flat list
dependencies = {'packageA': set(['packageB', 'packageC', 'packageF']),
'packageB': set(['packageC', 'packageD', 'packageE', 'packageG']),
'packageC': set(['packageE']),
'packageE': set(['packageF', 'packageG']),
@@ -136,17 +145,17 @@ def unroll_dependencies(dependencies):
for package in packages.difference(order):
if set(dependencies.get(package, set())).issubset(order):
order.append(package)
break
else:
raise AssertionError("Cyclic dependencies detected")
- cycle_check(order, dependencies) # sanity check
+ cycle_check(order, dependencies) # sanity check
return order
def main(args=sys.argv[1:]):
# parse command line options
usage = '%prog [options] [package] [package] [...]'
@@ -160,29 +169,30 @@ def main(args=sys.argv[1:]):
help="installs extra supporting packages as well as core mozbase ones")
options, packages = parser.parse_args(args)
if not packages:
# install all packages
packages = sorted(mozbase_packages)
# ensure specified packages are in the list
- assert set(packages).issubset(mozbase_packages), "Packages should be in %s (You gave: %s)" % (mozbase_packages, packages)
+ assert set(packages).issubset(mozbase_packages), \
+ "Packages should be in %s (You gave: %s)" % (mozbase_packages, packages)
if options.list_dependencies:
# list the package dependencies
for package in packages:
print '%s: %s' % get_dependencies(os.path.join(here, package))
parser.exit()
# gather dependencies
# TODO: version conflict checking
deps = {}
alldeps = {}
- mapping = {} # mapping from subdir name to package name
+ mapping = {} # mapping from subdir name to package name
# core dependencies
for package in packages:
key, value = get_dependencies(os.path.join(here, package))
deps[key] = [dependency_info(dep)['Name'] for dep in value]
mapping[package] = key
# keep track of all dependencies for non-mozbase packages
for dep in value:
@@ -212,17 +222,17 @@ def main(args=sys.argv[1:]):
continue
key, value = get_dependencies(os.path.join(here, package))
mapping[package] = key
# unroll dependencies
unrolled = unroll_dependencies(deps)
# make a reverse mapping: package name -> subdirectory
- reverse_mapping = dict([(j,i) for i, j in mapping.items()])
+ reverse_mapping = dict([(j, i) for i, j in mapping.items()])
# we only care about dependencies in mozbase
unrolled = [package for package in unrolled if package in reverse_mapping]
if options.list:
# list what will be installed
for package in unrolled:
print package
@@ -239,17 +249,17 @@ def main(args=sys.argv[1:]):
os.environ['PATH'] = '%s%s%s' % (os.path.dirname(os.path.abspath(sys.executable)),
os.path.pathsep,
os.environ.get('PATH', '').strip(os.path.pathsep))
# install non-mozbase dependencies
# these need to be installed separately and the --no-deps flag
# subsequently used due to a bug in setuptools; see
# https://bugzilla.mozilla.org/show_bug.cgi?id=759836
- pypi_deps = dict([(i, j) for i,j in alldeps.items()
+ pypi_deps = dict([(i, j) for i, j in alldeps.items()
if i not in unrolled])
for package, version in pypi_deps.items():
# easy_install should be available since we rely on setuptools
call(['easy_install', version])
# install packages required for unit testing
for package in test_packages:
call(['easy_install', package])
--- a/testing/mozbase/test.py
+++ b/testing/mozbase/test.py
@@ -18,41 +18,43 @@ import sys
import unittest
import mozlog
from moztest.results import TestResultCollection
from moztest.adapters.unit import StructuredTestRunner
here = os.path.dirname(os.path.abspath(__file__))
+
def unittests(path):
"""return the unittests in a .py file"""
path = os.path.abspath(path)
unittests = []
assert os.path.exists(path)
directory = os.path.dirname(path)
- sys.path.insert(0, directory) # insert directory into path for top-level imports
+ sys.path.insert(0, directory) # insert directory into path for top-level imports
modname = os.path.splitext(os.path.basename(path))[0]
module = imp.load_source(modname, path)
- sys.path.pop(0) # remove directory from global path
+ sys.path.pop(0) # remove directory from global path
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(module)
for test in suite:
unittests.append(test)
return unittests
+
def main(args=sys.argv[1:]):
# parse command line options
usage = '%prog [options] manifest.ini <manifest.ini> <...>'
parser = optparse.OptionParser(usage=usage, description=__doc__)
parser.add_option('-b', "--binary",
- dest="binary", help="Binary path",
- metavar=None, default=None)
+ dest="binary", help="Binary path",
+ metavar=None, default=None)
parser.add_option('--list', dest='list_tests',
action='store_true', default=False,
help="list paths of tests to be run")
mozlog.commandline.add_logging_group(parser)
options, args = parser.parse_args(args)
logger = mozlog.commandline.setup_logging("mozbase", options,
{"tbpl": sys.stdout})
--- a/testing/mozbase/versioninfo.py
+++ b/testing/mozbase/versioninfo.py
@@ -15,16 +15,17 @@ import argparse
import os
import subprocess
import sys
import setup_development
here = os.path.abspath(os.path.dirname(__file__))
+
def run_hg(command):
command = command[:]
if not isinstance(command, Iterable):
command = command.split()
command.insert(0, 'hg')
try:
output = subprocess.check_output(command, cwd=here)
except subprocess.CalledProcessError:
@@ -68,17 +69,19 @@ def changelog(args):
to_ref = args.to_ref or 'tip'
if '.' in from_ref:
from_ref = get_version_rev(from_ref)
if '.' in to_ref:
to_ref = get_version_rev(to_ref)
delim = '\x12\x59\x52\x99\x05'
- changelog = run_hg(['log', '-r', '%s:children(%s)' % (to_ref, from_ref), '--template={desc}%s' % delim, '-M', args.module]).split(delim)[:-1]
+ changelog = run_hg(['log', '-r', '%s:children(%s)' % (to_ref, from_ref),
+ '--template={desc}%s' % delim,
+ '-M', args.module]).split(delim)[:-1]
def prettify(desc):
lines = desc.splitlines()
lines = [('* %s' if i == 0 else ' %s') % l for i, l in enumerate(lines)]
return '\n'.join(lines)
changelog = map(prettify, changelog)
print '\n'.join(changelog)
@@ -106,19 +109,21 @@ def main(args=sys.argv[1:]):
subcommands = parser.add_subparsers(help="Sub-commands")
p_deps = subcommands.add_parser('dependencies', help="Print dependencies.")
p_deps.set_defaults(func=dependencies)
p_changelog = subcommands.add_parser('changelog', help="Print a changelog.")
p_changelog.add_argument('module', help="Module to get changelog from.")
p_changelog.add_argument('--from', dest='from_ref', default=None,
- help="Starting version or revision to list changes from. [defaults to latest version]")
+ help="Starting version or revision to list "
+ "changes from. [defaults to latest version]")
p_changelog.add_argument('--to', dest='to_ref', default=None,
- help="Ending version or revision to list changes to. [defaults to tip]")
+ help="Ending version or revision to list "
+ "changes to. [defaults to tip]")
p_changelog.set_defaults(func=changelog)
# default to showing dependencies
if args == []:
args.append('dependencies')
args = parser.parse_args(args)
args.func(args)
--- a/tools/lint/flake8.lint
+++ b/tools/lint/flake8.lint
@@ -130,18 +130,20 @@ LINTER = {
'name': "flake8",
'description': "Python linter",
'include': [
'python/mozlint',
'taskcluster',
'testing/firefox-ui',
'testing/marionette/client',
'testing/marionette/harness',
+ 'testing/mozbase',
'testing/mochitest',
'testing/puppeteer',
'testing/talos/',
'tools/lint',
],
- 'exclude': ['testing/mochitest/pywebsocket'],
+ 'exclude': ["testing/mozbase/mozdevice/mozdevice/Zeroconf.py",
+ 'testing/mochitest/pywebsocket'],
'extensions': EXTENSIONS,
'type': 'external',
'payload': lint,
}