Skip to content
Snippets Groups Projects
Commit 7566c9a8 authored by Masood Malekghassemi's avatar Masood Malekghassemi
Browse files

Make Python testing predictable again

This reorganizes the Python code, scraps the current testing
infrastructure, and implements a simple test discovery and run script
based on the standard Python unittest library so we can trust that our
tests are running.
parent 31c16e52
Branches
Tags
No related merge requests found
Showing
with 1043 additions and 120 deletions
......@@ -5,5 +5,10 @@ dist/
*.egg
*.egg/
*.eggs/
*_pb2.py
.coverage
.coverage.*
.cache/
nosetests.xml
doc/
_grpcio_metadata.py
graft grpc
graft tests
include commands.py
include requirements.txt
......@@ -29,14 +29,18 @@
"""Provides distutils command classes for the GRPC Python setup process."""
import distutils
import os
import os.path
import re
import subprocess
import sys
import setuptools
from setuptools.command import build_py
from setuptools.command import test
_CONF_PY_ADDENDUM = """
CONF_PY_ADDENDUM = """
extensions.append('sphinx.ext.napoleon')
napoleon_google_docstring = True
napoleon_numpy_docstring = True
......@@ -48,7 +52,7 @@ html_theme = 'sphinx_rtd_theme'
class SphinxDocumentation(setuptools.Command):
"""Command to generate documentation via sphinx."""
description = ''
description = 'generate sphinx documentation'
user_options = []
def initialize_options(self):
......@@ -72,14 +76,61 @@ class SphinxDocumentation(setuptools.Command):
'-o', os.path.join('doc', 'src'), src_dir])
conf_filepath = os.path.join('doc', 'src', 'conf.py')
with open(conf_filepath, 'a') as conf_file:
conf_file.write(_CONF_PY_ADDENDUM)
conf_file.write(CONF_PY_ADDENDUM)
sphinx.main(['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build protobuf modules'
user_options = [
('include=', None, 'path patterns to include in protobuf generation'),
('exclude=', None, 'path patterns to exclude from protobuf generation')
]
def initialize_options(self):
self.exclude = None
self.include = r'.*\.proto$'
self.protoc_command = None
self.grpc_python_plugin_command = None
def finalize_options(self):
self.protoc_command = distutils.spawn.find_executable('protoc')
self.grpc_python_plugin_command = distutils.spawn.find_executable(
'grpc_python_plugin')
def run(self):
include_regex = re.compile(self.include)
exclude_regex = re.compile(self.exclude) if self.exclude else None
paths = []
root_directory = os.getcwd()
for walk_root, directories, filenames in os.walk(root_directory):
for filename in filenames:
path = os.path.join(walk_root, filename)
if include_regex.match(path) and not (
exclude_regex and exclude_regex.match(path)):
paths.append(path)
command = [
self.protoc_command,
'--plugin=protoc-gen-python-grpc={}'.format(
self.grpc_python_plugin_command),
'-I {}'.format(root_directory),
'--python_out={}'.format(root_directory),
'--python-grpc_out={}'.format(root_directory),
] + paths
try:
subprocess.check_output(' '.join(command), cwd=root_directory, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception('Command:\n{}\nMessage:\n{}\nOutput:\n{}'.format(
command, e.message, e.output))
class BuildProjectMetadata(setuptools.Command):
"""Command to generate project metadata in a module."""
description = ''
description = 'build grpcio project metadata files'
user_options = []
def initialize_options(self):
......@@ -98,5 +149,73 @@ class BuildPy(build_py.build_py):
"""Custom project build command."""
def run(self):
self.run_command('build_proto_modules')
self.run_command('build_project_metadata')
build_py.build_py.run(self)
class Gather(setuptools.Command):
"""Command to gather project dependencies."""
description = 'gather dependencies for grpcio'
user_options = [
('test', 't', 'flag indicating to gather test dependencies'),
('install', 'i', 'flag indicating to gather install dependencies')
]
def initialize_options(self):
self.test = False
self.install = False
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
if self.install and self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.test and self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class RunInterop(test.test):
description = 'run interop test client/server'
user_options = [
('args=', 'a', 'pass-thru arguments for the client/server'),
('client', 'c', 'flag indicating to run the client'),
('server', 's', 'flag indicating to run the server')
]
def initialize_options(self):
self.args = ''
self.client = False
self.server = False
def finalize_options(self):
if self.client and self.server:
raise DistutilsOptionError('you may only specify one of client or server')
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.client:
self.run_client()
elif self.server:
self.run_server()
def run_server(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import server
sys.argv[1:] = self.args.split()
server.serve()
def run_client(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import client
sys.argv[1:] = self.args.split()
client.test_interoperability()
enum34>=1.0.4
futures>=2.2.0
cython>=0.23
coverage>=4.0
[coverage:run]
plugins = Cython.Coverage
[build_ext]
inplace=1
[build_proto_modules]
exclude=.*protoc_plugin/protoc_plugin_test\.proto$
......@@ -43,12 +43,21 @@ os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import-style to ensure we can actually find our commands module.
import commands
# Use environment variables to determine whether or not the Cython extension
# should *use* Cython or use the generated C files. Note that this requires the
# C files to have been generated by building first *with* Cython support.
_BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
_C_EXTENSION_SOURCES = (
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = os.environ.get(
'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False)
# Environment variable to determine whether or not to include the test files in
# the installation.
INSTALL_TESTS = os.environ.get('GRPC_PYTHON_INSTALL_TESTS', False)
C_EXTENSION_SOURCES = (
'grpc/_adapter/_c/module.c',
'grpc/_adapter/_c/types.c',
'grpc/_adapter/_c/utility.c',
......@@ -61,9 +70,9 @@ _C_EXTENSION_SOURCES = (
'grpc/_adapter/_c/types/server.c',
)
_CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_PACKAGE_NAMES = ()
_CYTHON_EXTENSION_MODULE_NAMES = (
CYTHON_EXTENSION_MODULE_NAMES = (
'grpc._cython.cygrpc',
'grpc._cython._cygrpc.call',
'grpc._cython._cygrpc.channel',
......@@ -73,24 +82,24 @@ _CYTHON_EXTENSION_MODULE_NAMES = (
'grpc._cython._cygrpc.server',
)
_EXTENSION_INCLUDE_DIRECTORIES = (
EXTENSION_INCLUDE_DIRECTORIES = (
'.',
)
_EXTENSION_LIBRARIES = (
EXTENSION_LIBRARIES = (
'grpc',
'gpr',
)
if not "darwin" in sys.platform:
_EXTENSION_LIBRARIES += ('rt',)
EXTENSION_LIBRARIES += ('rt',)
_C_EXTENSION_MODULE = _core.Extension(
'grpc._adapter._c', sources=list(_C_EXTENSION_SOURCES),
include_dirs=list(_EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(_EXTENSION_LIBRARIES),
C_EXTENSION_MODULE = _core.Extension(
'grpc._adapter._c', sources=list(C_EXTENSION_SOURCES),
include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(EXTENSION_LIBRARIES)
)
_EXTENSION_MODULES = [_C_EXTENSION_MODULE]
EXTENSION_MODULES = [C_EXTENSION_MODULE]
def cython_extensions(package_names, module_names, include_dirs, libraries,
......@@ -101,48 +110,89 @@ def cython_extensions(package_names, module_names, include_dirs, libraries,
extensions = [
_extension.Extension(
name=module_name, sources=[module_file],
include_dirs=include_dirs, libraries=libraries
include_dirs=include_dirs, libraries=libraries,
define_macros=[('CYTHON_TRACE_NOGIL', 1)] if ENABLE_CYTHON_TRACING else []
) for (module_name, module_file) in zip(module_names, module_files)
]
if build_with_cython:
import Cython.Build
return Cython.Build.cythonize(extensions)
return Cython.Build.cythonize(
extensions,
compiler_directives={'linetrace': bool(ENABLE_CYTHON_TRACING)})
else:
return extensions
_CYTHON_EXTENSION_MODULES = cython_extensions(
list(_CYTHON_EXTENSION_PACKAGE_NAMES), list(_CYTHON_EXTENSION_MODULE_NAMES),
list(_EXTENSION_INCLUDE_DIRECTORIES), list(_EXTENSION_LIBRARIES),
bool(_BUILD_WITH_CYTHON))
_PACKAGES = setuptools.find_packages('.')
CYTHON_EXTENSION_MODULES = cython_extensions(
list(CYTHON_EXTENSION_PACKAGE_NAMES), list(CYTHON_EXTENSION_MODULE_NAMES),
list(EXTENSION_INCLUDE_DIRECTORIES), list(EXTENSION_LIBRARIES),
bool(BUILD_WITH_CYTHON))
_PACKAGE_DIRECTORIES = {
PACKAGE_DIRECTORIES = {
'': '.',
}
_INSTALL_REQUIRES = (
INSTALL_REQUIRES = (
'enum34>=1.0.4',
'futures>=2.2.0',
)
_SETUP_REQUIRES = (
SETUP_REQUIRES = (
'sphinx>=1.3',
) + _INSTALL_REQUIRES
) + INSTALL_REQUIRES
_COMMAND_CLASS = {
COMMAND_CLASS = {
'doc': commands.SphinxDocumentation,
'build_proto_modules': commands.BuildProtoModules,
'build_project_metadata': commands.BuildProjectMetadata,
'build_py': commands.BuildPy,
'gather': commands.Gather,
'run_interop': commands.RunInterop,
}
TEST_PACKAGE_DATA = {
'tests.interop': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
'tests.protoc_plugin': [
'protoc_plugin_test.proto',
],
'tests.unit': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
}
TESTS_REQUIRE = (
'oauth2client>=1.4.7',
'protobuf==3.0.0a3',
'coverage>=4.0',
) + INSTALL_REQUIRES
TEST_SUITE = 'tests'
TEST_LOADER = 'tests:Loader'
TEST_RUNNER = 'tests:Runner'
PACKAGE_DATA = {}
if INSTALL_TESTS:
PACKAGE_DATA = dict(PACKAGE_DATA, **TEST_PACKAGE_DATA)
PACKAGES = setuptools.find_packages('.')
else:
PACKAGES = setuptools.find_packages('.', exclude=['tests', 'tests.*'])
setuptools.setup(
name='grpcio',
version='0.11.0b1',
ext_modules=_EXTENSION_MODULES + _CYTHON_EXTENSION_MODULES,
packages=list(_PACKAGES),
package_dir=_PACKAGE_DIRECTORIES,
install_requires=_INSTALL_REQUIRES,
setup_requires=_SETUP_REQUIRES,
cmdclass=_COMMAND_CLASS
version='0.11.0b2',
ext_modules=EXTENSION_MODULES + CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
tests_require=TESTS_REQUIRE,
test_suite=TEST_SUITE,
test_loader=TEST_LOADER,
test_runner=TEST_RUNNER,
)
......@@ -27,68 +27,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A setup module for the GRPC Python interop testing package."""
from tests import _loader
from tests import _runner
import os
import os.path
import setuptools
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import-style to ensure we can actually find our commands module.
import commands
_PACKAGES = setuptools.find_packages('.')
_PACKAGE_DIRECTORIES = {
'': '.',
}
_PACKAGE_DATA = {
'grpc_interop': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
'grpc_protoc_plugin': [
'test.proto',
],
'grpc_test': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
}
_SETUP_REQUIRES = (
'pytest>=2.6',
'pytest-cov>=2.0',
'pytest-xdist>=1.11',
'pytest-timeout>=0.5',
)
_INSTALL_REQUIRES = (
'oauth2client>=1.4.7',
'grpcio>=0.11.0b0',
# TODO(issue 3321): Unpin protobuf dependency.
'protobuf==3.0.0a3',
)
_COMMAND_CLASS = {
'test': commands.RunTests,
'build_proto_modules': commands.BuildProtoModules,
'build_py': commands.BuildPy,
}
setuptools.setup(
name='grpcio_test',
version='0.11.0b0',
packages=_PACKAGES,
package_dir=_PACKAGE_DIRECTORIES,
package_data=_PACKAGE_DATA,
install_requires=_INSTALL_REQUIRES + _SETUP_REQUIRES,
setup_requires=_SETUP_REQUIRES,
cmdclass=_COMMAND_CLASS,
)
Loader = _loader.Loader
Runner = _runner.Runner
......@@ -27,80 +27,101 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides distutils command classes for the GRPC Python test setup process."""
import distutils
import os
import os.path
import subprocess
import sys
import setuptools
from setuptools.command import build_py
class RunTests(setuptools.Command):
"""Command to run all tests via py.test."""
description = ''
user_options = [('pytest-args=', 'a', 'arguments to pass to py.test')]
def initialize_options(self):
self.pytest_args = []
def finalize_options(self):
pass
def run(self):
# We import here to ensure that setup.py has had a chance to install the
# relevant package eggs first.
import pytest
self.run_command('build_proto_modules')
result = pytest.main(self.pytest_args)
if result != 0:
raise SystemExit(result)
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = ''
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
self.protoc_command = distutils.spawn.find_executable('protoc')
self.grpc_python_plugin_command = distutils.spawn.find_executable(
'grpc_python_plugin')
def run(self):
paths = []
root_directory = os.getcwd()
for walk_root, directories, filenames in os.walk(root_directory):
for filename in filenames:
if filename.endswith('.proto'):
paths.append(os.path.join(walk_root, filename))
command = [
self.protoc_command,
'--plugin=protoc-gen-python-grpc={}'.format(
self.grpc_python_plugin_command),
'-I {}'.format(root_directory),
'--python_out={}'.format(root_directory),
'--python-grpc_out={}'.format(root_directory),
] + paths
import importlib
import pkgutil
import re
import unittest
import coverage
# Some global spooky-action-at-a-distance hackery to get around
# system-installation issues where the google namespace is defaulted to the
# system even though the egg is higher priority on sys.path. This inverts the
# path priority on package module paths thus giving any installed eggs higher
# priority and having little effect otherwise.
import google
google.__path__.reverse()
TEST_MODULE_REGEX = r'^.*_test$'
class Loader(object):
"""Test loader for setuptools test suite support.
Attributes:
suite (unittest.TestSuite): All tests collected by the loader.
loader (unittest.TestLoader): Standard Python unittest loader to be ran per
module discovered.
module_matcher (re.RegexObject): A regular expression object to match
against module names and determine whether or not the discovered module
contributes to the test suite.
"""
def __init__(self):
self.suite = unittest.TestSuite()
self.loader = unittest.TestLoader()
self.module_matcher = re.compile(TEST_MODULE_REGEX)
def loadTestsFromNames(self, names, module=None):
"""Function mirroring TestLoader::loadTestsFromNames, as expected by
setuptools.setup argument `test_loader`."""
# ensure that we capture decorators and definitions (else our coverage
# measure unnecessarily suffers)
coverage_context = coverage.Coverage(data_suffix=True)
coverage_context.start()
modules = [importlib.import_module(name) for name in names]
for module in modules:
self.visit_module(module)
for module in modules:
try:
subprocess.check_output(' '.join(command), cwd=root_directory, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception('{}\nOutput:\n{}'.format(e.message, e.output))
class BuildPy(build_py.build_py):
"""Custom project build command."""
def run(self):
self.run_command('build_proto_modules')
build_py.build_py.run(self)
package_paths = module.__path__
except:
continue
self.walk_packages(package_paths)
coverage_context.stop()
coverage_context.save()
return self.suite
def walk_packages(self, package_paths):
"""Walks over the packages, dispatching `visit_module` calls.
Args:
package_paths (list): A list of paths over which to walk through modules
along.
"""
for importer, module_name, is_package in (
pkgutil.iter_modules(package_paths)):
module = importer.find_module(module_name).load_module(module_name)
self.visit_module(module)
if is_package:
self.walk_packages(module.__path__)
def visit_module(self, module):
"""Visits the module, adding discovered tests to the test suite.
Args:
module (module): Module to match against self.module_matcher; if matched
it has its tests loaded via self.loader into self.suite.
"""
if self.module_matcher.match(module.__name__):
module_suite = self.loader.loadTestsFromModule(module)
self.suite.addTest(module_suite)
def iterate_suite_cases(suite):
"""Generator over all unittest.TestCases in a unittest.TestSuite.
Args:
suite (unittest.TestSuite): Suite to iterate over in the generator.
Returns:
generator: A generator over all unittest.TestCases in `suite`.
"""
for item in suite:
if isinstance(item, unittest.TestSuite):
for child_item in iterate_suite_cases(item):
yield child_item
elif isinstance(item, unittest.TestCase):
yield item
else:
raise ValueError('unexpected suite item of type {}'.format(type(item)))
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cStringIO as StringIO
import collections
import itertools
import traceback
import unittest
from xml.etree import ElementTree
import coverage
from tests import _loader
class CaseResult(collections.namedtuple('CaseResult', [
'id', 'name', 'kind', 'stdout', 'stderr', 'skip_reason', 'traceback'])):
"""A serializable result of a single test case.
Attributes:
id (object): Any serializable object used to denote the identity of this
test case.
name (str or None): A human-readable name of the test case.
kind (CaseResult.Kind): The kind of test result.
stdout (object or None): Output on stdout, or None if nothing was captured.
stderr (object or None): Output on stderr, or None if nothing was captured.
skip_reason (object or None): The reason the test was skipped. Must be
something if self.kind is CaseResult.Kind.SKIP, else None.
traceback (object or None): The traceback of the test. Must be something if
self.kind is CaseResult.Kind.{ERROR, FAILURE, EXPECTED_FAILURE}, else
None.
"""
class Kind:
UNTESTED = 'untested'
RUNNING = 'running'
ERROR = 'error'
FAILURE = 'failure'
SUCCESS = 'success'
SKIP = 'skip'
EXPECTED_FAILURE = 'expected failure'
UNEXPECTED_SUCCESS = 'unexpected success'
def __new__(cls, id=None, name=None, kind=None, stdout=None, stderr=None,
skip_reason=None, traceback=None):
"""Helper keyword constructor for the namedtuple.
See this class' attributes for information on the arguments."""
assert id is not None
assert name is None or isinstance(name, str)
if kind is CaseResult.Kind.UNTESTED:
pass
elif kind is CaseResult.Kind.RUNNING:
pass
elif kind is CaseResult.Kind.ERROR:
assert traceback is not None
elif kind is CaseResult.Kind.FAILURE:
assert traceback is not None
elif kind is CaseResult.Kind.SUCCESS:
pass
elif kind is CaseResult.Kind.SKIP:
assert skip_reason is not None
elif kind is CaseResult.Kind.EXPECTED_FAILURE:
assert traceback is not None
elif kind is CaseResult.Kind.UNEXPECTED_SUCCESS:
pass
else:
assert False
return super(cls, CaseResult).__new__(
cls, id, name, kind, stdout, stderr, skip_reason, traceback)
def updated(self, name=None, kind=None, stdout=None, stderr=None,
skip_reason=None, traceback=None):
"""Get a new validated CaseResult with the fields updated.
See this class' attributes for information on the arguments."""
name = self.name if name is None else name
kind = self.kind if kind is None else kind
stdout = self.stdout if stdout is None else stdout
stderr = self.stderr if stderr is None else stderr
skip_reason = self.skip_reason if skip_reason is None else skip_reason
traceback = self.traceback if traceback is None else traceback
return CaseResult(id=self.id, name=name, kind=kind, stdout=stdout,
stderr=stderr, skip_reason=skip_reason,
traceback=traceback)
class AugmentedResult(unittest.TestResult):
"""unittest.Result that keeps track of additional information.
Uses CaseResult objects to store test-case results, providing additional
information beyond that of the standard Python unittest library, such as
standard output.
Attributes:
id_map (callable): A unary callable mapping unittest.TestCase objects to
unique identifiers.
cases (dict): A dictionary mapping from the identifiers returned by id_map
to CaseResult objects corresponding to those IDs.
"""
def __init__(self, id_map):
"""Initialize the object with an identifier mapping.
Arguments:
id_map (callable): Corresponds to the attribute `id_map`."""
super(AugmentedResult, self).__init__()
self.id_map = id_map
self.cases = None
def startTestRun(self):
"""See unittest.TestResult.startTestRun."""
super(AugmentedResult, self).startTestRun()
self.cases = dict()
def stopTestRun(self):
"""See unittest.TestResult.stopTestRun."""
super(AugmentedResult, self).stopTestRun()
def startTest(self, test):
"""See unittest.TestResult.startTest."""
super(AugmentedResult, self).startTest(test)
case_id = self.id_map(test)
self.cases[case_id] = CaseResult(
id=case_id, name=test.id(), kind=CaseResult.Kind.RUNNING)
def addError(self, test, error):
"""See unittest.TestResult.addError."""
super(AugmentedResult, self).addError(test, error)
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
kind=CaseResult.Kind.ERROR, traceback=error)
def addFailure(self, test, error):
"""See unittest.TestResult.addFailure."""
super(AugmentedResult, self).addFailure(test, error)
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
kind=CaseResult.Kind.FAILURE, traceback=error)
def addSuccess(self, test):
"""See unittest.TestResult.addSuccess."""
super(AugmentedResult, self).addSuccess(test)
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
kind=CaseResult.Kind.SUCCESS)
def addSkip(self, test, reason):
"""See unittest.TestResult.addSkip."""
super(AugmentedResult, self).addSkip(test, reason)
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
kind=CaseResult.Kind.SKIP, skip_reason=reason)
def addExpectedFailure(self, test, error):
"""See unittest.TestResult.addExpectedFailure."""
super(AugmentedResult, self).addExpectedFailure(test, error)
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
kind=CaseResult.Kind.EXPECTED_FAILURE, traceback=error)
def addUnexpectedSuccess(self, test):
"""See unittest.TestResult.addUnexpectedSuccess."""
super(AugmentedResult, self).addUnexpectedSuccess(test)
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
kind=CaseResult.Kind.UNEXPECTED_SUCCESS)
def set_output(self, test, stdout, stderr):
"""Set the output attributes for the CaseResult corresponding to a test.
Args:
test (unittest.TestCase): The TestCase to set the outputs of.
stdout (str): Output from stdout to assign to self.id_map(test).
stderr (str): Output from stderr to assign to self.id_map(test).
"""
case_id = self.id_map(test)
self.cases[case_id] = self.cases[case_id].updated(
stdout=stdout, stderr=stderr)
def augmented_results(self, filter):
"""Convenience method to retrieve filtered case results.
Args:
filter (callable): A unary predicate to filter over CaseResult objects.
"""
return (self.cases[case_id] for case_id in self.cases
if filter(self.cases[case_id]))
class CoverageResult(AugmentedResult):
"""Extension to AugmentedResult adding coverage.py support per test.\
Attributes:
coverage_context (coverage.Coverage): coverage.py management object.
"""
def __init__(self, id_map):
"""See AugmentedResult.__init__."""
super(CoverageResult, self).__init__(id_map=id_map)
self.coverage_context = None
def startTest(self, test):
"""See unittest.TestResult.startTest.
Additionally initializes and begins code coverage tracking."""
super(CoverageResult, self).startTest(test)
self.coverage_context = coverage.Coverage(data_suffix=True)
self.coverage_context.start()
def stopTest(self, test):
"""See unittest.TestResult.stopTest.
Additionally stops and deinitializes code coverage tracking."""
super(CoverageResult, self).stopTest(test)
self.coverage_context.stop()
self.coverage_context.save()
self.coverage_context = None
def stopTestRun(self):
"""See unittest.TestResult.stopTestRun."""
super(CoverageResult, self).stopTestRun()
# TODO(atash): Dig deeper into why the following line fails to properly
# combine coverage data from the Cython plugin.
#coverage.Coverage().combine()
class _Colors:
"""Namespaced constants for terminal color magic numbers."""
HEADER = '\033[95m'
INFO = '\033[94m'
OK = '\033[92m'
WARN = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class TerminalResult(CoverageResult):
"""Extension to CoverageResult adding basic terminal reporting."""
def __init__(self, out, id_map):
"""Initialize the result object.
Args:
out (file-like): Output file to which terminal-colored live results will
be written.
id_map (callable): See AugmentedResult.__init__.
"""
super(TerminalResult, self).__init__(id_map=id_map)
self.out = out
def startTestRun(self):
"""See unittest.TestResult.startTestRun."""
super(TerminalResult, self).startTestRun()
self.out.write(
_Colors.HEADER +
'Testing gRPC Python...\n' +
_Colors.END)
def stopTestRun(self):
"""See unittest.TestResult.stopTestRun."""
super(TerminalResult, self).stopTestRun()
self.out.write(summary(self))
self.out.flush()
def addError(self, test, error):
"""See unittest.TestResult.addError."""
super(TerminalResult, self).addError(test, error)
self.out.write(
_Colors.FAIL +
'ERROR {}\n'.format(test.id()) +
_Colors.END)
self.out.flush()
def addFailure(self, test, error):
"""See unittest.TestResult.addFailure."""
super(TerminalResult, self).addFailure(test, error)
self.out.write(
_Colors.FAIL +
'FAILURE {}\n'.format(test.id()) +
_Colors.END)
self.out.flush()
def addSuccess(self, test):
"""See unittest.TestResult.addSuccess."""
super(TerminalResult, self).addSuccess(test)
self.out.write(
_Colors.OK +
'SUCCESS {}\n'.format(test.id()) +
_Colors.END)
self.out.flush()
def addSkip(self, test, reason):
"""See unittest.TestResult.addSkip."""
super(TerminalResult, self).addSkip(test, reason)
self.out.write(
_Colors.INFO +
'SKIP {}\n'.format(test.id()) +
_Colors.END)
self.out.flush()
def addExpectedFailure(self, test, error):
"""See unittest.TestResult.addExpectedFailure."""
super(TerminalResult, self).addExpectedFailure(test, error)
self.out.write(
_Colors.INFO +
'FAILURE_OK {}\n'.format(test.id()) +
_Colors.END)
self.out.flush()
def addUnexpectedSuccess(self, test):
"""See unittest.TestResult.addUnexpectedSuccess."""
super(TerminalResult, self).addUnexpectedSuccess(test)
self.out.write(
_Colors.INFO +
'UNEXPECTED_OK {}\n'.format(test.id()) +
_Colors.END)
self.out.flush()
def _traceback_string(type, value, trace):
"""Generate a descriptive string of a Python exception traceback.
Args:
type (class): The type of the exception.
value (Exception): The value of the exception.
trace (traceback): Traceback of the exception.
Returns:
str: Formatted exception descriptive string.
"""
buffer = StringIO.StringIO()
traceback.print_exception(type, value, trace, file=buffer)
return buffer.getvalue()
def summary(result):
"""A summary string of a result object.
Args:
result (AugmentedResult): The result object to get the summary of.
Returns:
str: The summary string.
"""
assert isinstance(result, AugmentedResult)
untested = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.UNTESTED))
running = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.RUNNING))
failures = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.FAILURE))
errors = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.ERROR))
successes = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.SUCCESS))
skips = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.SKIP))
expected_failures = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.EXPECTED_FAILURE))
unexpected_successes = list(result.augmented_results(
lambda case_result: case_result.kind is CaseResult.Kind.UNEXPECTED_SUCCESS))
running_names = [case.name for case in running]
finished_count = (len(failures) + len(errors) + len(successes) +
len(expected_failures) + len(unexpected_successes))
statistics = (
'{finished} tests finished:\n'
'\t{successful} successful\n'
'\t{unsuccessful} unsuccessful\n'
'\t{skipped} skipped\n'
'\t{expected_fail} expected failures\n'
'\t{unexpected_successful} unexpected successes\n'
'Interrupted Tests:\n'
'\t{interrupted}\n'
.format(finished=finished_count,
successful=len(successes),
unsuccessful=(len(failures)+len(errors)),
skipped=len(skips),
expected_fail=len(expected_failures),
unexpected_successful=len(unexpected_successes),
interrupted=str(running_names)))
tracebacks = '\n\n'.join([
(_Colors.FAIL + '{test_name}' + _Colors.END + + '\n' +
_Colors.BOLD + 'traceback:' + _Colors.END + '\n' +
'{traceback}\n' +
_Colors.BOLD + 'stdout:' + _Colors.END + '\n' +
'{stdout}\n' +
_Colors.BOLD + 'stderr:' + _Colors.END + '\n' +
'{stderr}\n').format(
test_name=result.name,
traceback=_traceback_string(*result.traceback),
stdout=result.stdout, stderr=result.stderr)
for result in itertools.chain(failures, errors)
])
notes = 'Unexpected successes: {}\n'.format([
result.name for result in unexpected_successes])
return statistics + '\nErrors/Failures: \n' + tracebacks + '\n' + notes
def jenkins_junit_xml(result):
"""An XML tree object that when written is recognizable by Jenkins.
Args:
result (AugmentedResult): The result object to get the junit xml output of.
Returns:
ElementTree.ElementTree: The XML tree.
"""
assert isinstance(result, AugmentedResult)
root = ElementTree.Element('testsuites')
suite = ElementTree.SubElement(root, 'testsuite', {
'name': 'Python gRPC tests',
})
for case in result.cases.values():
if case.kind is CaseResult.Kind.SUCCESS:
ElementTree.SubElement(suite, 'testcase', {
'name': case.name,
})
elif case.kind in (CaseResult.Kind.ERROR, CaseResult.Kind.FAILURE):
case_xml = ElementTree.SubElement(suite, 'testcase', {
'name': case.name,
})
error_xml = ElementTree.SubElement(case_xml, 'error', {})
error_xml.text = ''.format(case.stderr, case.traceback)
return ElementTree.ElementTree(element=root)
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cStringIO as StringIO
import collections
import fcntl
import multiprocessing
import os
import select
import signal
import sys
import threading
import time
import unittest
import uuid
from tests import _loader
from tests import _result
class CapturePipe(object):
"""A context-manager pipe to redirect output to a byte array.
Attributes:
_redirect_fd (int): File descriptor of file to redirect writes from.
_saved_fd (int): A copy of the original value of the redirected file
descriptor.
_read_thread (threading.Thread or None): Thread upon which reads through the
pipe are performed. Only non-None when self is started.
_read_fd (int or None): File descriptor of the read end of the redirect
pipe. Only non-None when self is started.
_write_fd (int or None): File descriptor of the write end of the redirect
pipe. Only non-None when self is started.
output (bytearray or None): Redirected output from writes to the redirected
file descriptor. Only valid during and after self has started.
"""
def __init__(self, fd):
self._redirect_fd = fd
self._saved_fd = os.dup(self._redirect_fd)
self._read_thread = None
self._read_fd = None
self._write_fd = None
self.output = None
def start(self):
"""Start redirection of writes to the file descriptor."""
self._read_fd, self._write_fd = os.pipe()
os.dup2(self._write_fd, self._redirect_fd)
flags = fcntl.fcntl(self._read_fd, fcntl.F_GETFL)
fcntl.fcntl(self._read_fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self._read_thread = threading.Thread(target=self._read)
self._read_thread.start()
def stop(self):
"""Stop redirection of writes to the file descriptor."""
os.close(self._write_fd)
os.dup2(self._saved_fd, self._redirect_fd) # auto-close self._redirect_fd
self._read_thread.join()
self._read_thread = None
# we waited for the read thread to finish, so _read_fd has been read and we
# can close it.
os.close(self._read_fd)
def _read(self):
"""Read-thread target for self."""
self.output = bytearray()
while True:
select.select([self._read_fd], [], [])
read_bytes = os.read(self._read_fd, 1024)
if read_bytes:
self.output.extend(read_bytes)
else:
break
def write_bypass(self, value):
"""Bypass the redirection and write directly to the original file.
Arguments:
value (str): What to write to the original file.
"""
if self._saved_fd is None:
os.write(self._redirect_fd, value)
else:
os.write(self._saved_fd, value)
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def close(self):
"""Close any resources used by self not closed by stop()."""
os.close(self._saved_fd)
class AugmentedCase(collections.namedtuple('AugmentedCase', [
'case', 'id'])):
"""A test case with a guaranteed unique externally specified identifier.
Attributes:
case (unittest.TestCase): TestCase we're decorating with an additional
identifier.
id (object): Any identifier that may be considered 'unique' for testing
purposes.
"""
def __new__(cls, case, id=None):
if id is None:
id = uuid.uuid4()
return super(cls, AugmentedCase).__new__(cls, case, id)
class Runner(object):
def run(self, suite):
"""See setuptools' test_runner setup argument for information."""
# Ensure that every test case has no collision with any other test case in
# the augmented results.
augmented_cases = [AugmentedCase(case, uuid.uuid4())
for case in _loader.iterate_suite_cases(suite)]
case_id_by_case = dict((augmented_case.case, augmented_case.id)
for augmented_case in augmented_cases)
result_out = StringIO.StringIO()
result = _result.TerminalResult(
result_out, id_map=lambda case: case_id_by_case[case])
stdout_pipe = CapturePipe(sys.stdout.fileno())
stderr_pipe = CapturePipe(sys.stderr.fileno())
kill_flag = [False]
def sigint_handler(signal_number, frame):
if signal_number == signal.SIGINT:
kill_flag[0] = True # Python 2.7 not having 'local'... :-(
signal.signal(signal_number, signal.SIG_DFL)
def fault_handler(signal_number, frame):
stdout_pipe.write_bypass(
'Received fault signal {}\nstdout:\n{}\n\nstderr:{}\n'
.format(signal_number, stdout_pipe.output, stderr_pipe.output))
os._exit(1)
def check_kill_self():
if kill_flag[0]:
stdout_pipe.write_bypass('Stopping tests short...')
result.stopTestRun()
stdout_pipe.write_bypass(result_out.getvalue())
stdout_pipe.write_bypass(
'\ninterrupted stdout:\n{}\n'.format(stdout_pipe.output))
stderr_pipe.write_bypass(
'\ninterrupted stderr:\n{}\n'.format(stderr_pipe.output))
os._exit(1)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGSEGV, fault_handler)
signal.signal(signal.SIGBUS, fault_handler)
signal.signal(signal.SIGABRT, fault_handler)
signal.signal(signal.SIGFPE, fault_handler)
signal.signal(signal.SIGILL, fault_handler)
# Sometimes output will lag after a test has successfully finished; we
# ignore such writes to our pipes.
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
# Run the tests
result.startTestRun()
for augmented_case in augmented_cases:
sys.stdout.write('Running {}\n'.format(augmented_case.case.id()))
sys.stdout.flush()
case_thread = threading.Thread(
target=augmented_case.case.run, args=(result,))
try:
with stdout_pipe, stderr_pipe:
case_thread.start()
while case_thread.is_alive():
check_kill_self()
time.sleep(0)
case_thread.join()
except:
# re-raise the exception after forcing the with-block to end
raise
result.set_output(
augmented_case.case, stdout_pipe.output, stderr_pipe.output)
sys.stdout.write(result_out.getvalue())
sys.stdout.flush()
result_out.truncate(0)
check_kill_self()
result.stopTestRun()
stdout_pipe.close()
stderr_pipe.close()
# Report results
sys.stdout.write(result_out.getvalue())
sys.stdout.flush()
signal.signal(signal.SIGINT, signal.SIG_DFL)
with open('report.xml', 'w') as report_xml_file:
_result.jenkins_junit_xml(result).write(report_xml_file)
return result
......@@ -33,10 +33,10 @@ import unittest
from grpc.beta import implementations
from grpc_interop import _interop_test_case
from grpc_interop import methods
from grpc_interop import server
from grpc_interop import test_pb2
from tests.interop import _interop_test_case
from tests.interop import methods
from tests.interop import server
from tests.interop import test_pb2
class InsecureInteropTest(
......
......@@ -29,7 +29,7 @@
"""Common code for unit tests of the interoperability test code."""
from grpc_interop import methods
from tests.interop import methods
class InteropTestCase(object):
......
......@@ -33,12 +33,12 @@ import unittest
from grpc.beta import implementations
from grpc_test.beta import test_utilities
from tests.interop import _interop_test_case
from tests.interop import methods
from tests.interop import resources
from tests.interop import test_pb2
from grpc_interop import _interop_test_case
from grpc_interop import methods
from grpc_interop import resources
from grpc_interop import test_pb2
from tests.unit.beta import test_utilities
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
......
......@@ -34,11 +34,10 @@ from oauth2client import client as oauth2client_client
from grpc.beta import implementations
from grpc_test.beta import test_utilities
from grpc_interop import methods
from grpc_interop import resources
from grpc_interop import test_pb2
from tests.interop import methods
from tests.interop import resources
from tests.interop import test_pb2
from tests.unit.beta import test_utilities
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
......@@ -114,7 +113,7 @@ def _test_case_from_arg(test_case_arg):
raise ValueError('No test case "%s"!' % test_case_arg)
def _test_interoperability():
def test_interoperability():
args = _args()
stub = _stub(args)
test_case = _test_case_from_arg(args.test_case)
......@@ -122,4 +121,4 @@ def _test_interoperability():
if __name__ == '__main__':
_test_interoperability()
test_interoperability()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment