from subunit.test_results import AutoTimingTestResultDecorator
from testtools import ConcurrentTestSuite, iterate_tests
+from testtools.content import TracebackContent, text_content
_all__ = [
CPU_COUNT = cpu_count()
-def fork_for_tests(concurrency_num=CPU_COUNT):
+class BufferingTestProtocolClient(TestProtocolClient):
+ """A TestProtocolClient which can buffer the test outputs
+
+ This class captures the stdout and stderr output streams of the
+ tests as it runs them, and includes the output texts in the subunit
+ stream as additional details.
+
+ Args:
+ stream: A file-like object to write a subunit stream to
+ buffer (bool): True to capture test stdout/stderr outputs and
+ include them in the test details
+ """
+ def __init__(self, stream, buffer=True):
+ super().__init__(stream)
+ self.buffer = buffer
+
+ def _addOutcome(self, outcome, test, error=None, details=None,
+ error_permitted=True):
+ """Report a test outcome to the subunit stream
+
+ The parent class uses this function as a common implementation
+ for various methods that report successes, errors, failures, etc.
+
+ This version automatically upgrades the error tracebacks to the
+ new 'details' format by wrapping them in a Content object, so
+ that we can include the captured test output in the test result
+ details.
+
+ Args:
+ outcome: A string describing the outcome - used as the
+ event name in the subunit stream.
+ test: The test case whose outcome is to be reported
+ error: Standard unittest positional argument form - an
+ exc_info tuple.
+ details: New Testing-in-python drafted API; a dict from
+ string to subunit.Content objects.
+ error_permitted: If True then one and only one of error or
+ details must be supplied. If False then error must not
+ be supplied and details is still optional.
+ """
+ if details is None:
+ details = {}
+
+ # Parent will raise an exception if error_permitted is False but
+ # error is not None. We want that exception in that case, so
+ # don't touch error when error_permitted is explicitly False.
+ if error_permitted and error is not None:
+ # Parent class prefers error over details
+ details['traceback'] = TracebackContent(error, test)
+ error_permitted = False
+ error = None
+
+ if self.buffer:
+ stdout = sys.stdout.getvalue()
+ if stdout:
+ details['stdout'] = text_content(stdout)
+
+ stderr = sys.stderr.getvalue()
+ if stderr:
+ details['stderr'] = text_content(stderr)
+
+ return super()._addOutcome(outcome, test, error=error,
+ details=details, error_permitted=error_permitted)
+
+
+def fork_for_tests(concurrency_num=CPU_COUNT, buffer=False):
"""Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
:param concurrency_num: number of processes to use.
"""
+ if buffer:
+ test_protocol_client_class = BufferingTestProtocolClient
+ else:
+ test_protocol_client_class = TestProtocolClient
+
def do_fork(suite):
"""Take suite and start up multiple runners by forking (Unix only).
# child actually gets keystrokes for pdb etc).
sys.stdin.close()
subunit_result = AutoTimingTestResultDecorator(
- TestProtocolClient(stream)
+ test_protocol_client_class(stream)
)
process_suite.run(subunit_result)
except:
else:
os.close(c2pwrite)
stream = os.fdopen(c2pread, 'rb')
- test = ProtocolTestCase(stream)
+ # If we don't pass the second argument here, it defaults
+ # to sys.stdout.buffer down the line. But if we don't
+ # pass it *now*, it may be resolved after sys.stdout is
+ # replaced with a StringIO (to capture tests' outputs)
+ # which doesn't have a buffer attribute and can end up
+ # occasionally causing a 'broken-runner' error.
+ test = ProtocolTestCase(stream, sys.stdout.buffer)
result.append(test)
return result
return do_fork
from io import StringIO
+buffer_outputs = True
use_concurrent = True
try:
from concurrencytest.concurrencytest import ConcurrentTestSuite
0: Print nothing
1: Print a dot per test
2: Print test names
+ 3: Print test names, and buffered outputs for failing tests
"""
def __init__(self, stream, descriptions, verbosity):
self.verbosity = verbosity
self.printErrorList('XFAIL', self.expectedFailures)
self.printErrorList('XPASS', unexpected_successes)
+ def addError(self, test, err):
+ """Called when an error has occurred."""
+ super().addError(test, err)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addFailure(self, test, err):
+ """Called when a test has failed."""
+ super().addFailure(test, err)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addSubTest(self, test, subtest, err):
+ """Called at the end of a subtest."""
+ super().addSubTest(test, subtest, err)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addSuccess(self, test):
+ """Called when a test has completed successfully"""
+ super().addSuccess(test)
+ # Don't print stdout/stderr for successful tests
+ self._mirrorOutput = False
+
def addSkip(self, test, reason):
"""Called when a test is skipped."""
# Add empty line to keep spacing consistent with other results
if not reason.endswith('\n'):
reason += '\n'
super().addSkip(test, reason)
+ self._mirrorOutput &= self.verbosity >= 3
+
+ def addExpectedFailure(self, test, err):
+ """Called when an expected failure/error occurred."""
+ super().addExpectedFailure(test, err)
+ self._mirrorOutput &= self.verbosity >= 3
def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
runner = unittest.TextTestRunner(
stream=sys.stdout,
verbosity=(1 if verbosity is None else verbosity),
+ buffer=buffer_outputs,
resultclass=FullTextTestResult,
)
if use_concurrent and processes != 1:
suite = ConcurrentTestSuite(suite,
- fork_for_tests(processes or multiprocessing.cpu_count()))
+ fork_for_tests(processes or multiprocessing.cpu_count(),
+ buffer=buffer_outputs))
for module in class_and_module_list:
if isinstance(module, str) and (not test_name or test_name == module):