mirror of https://github.com/python/cpython.git
gh-108834: regrtest reruns failed tests in subprocesses (#108839)
When using --rerun option, regrtest now re-runs failed tests in verbose mode in fresh worker processes to have more deterministic behavior. So it can write its final report even if a test killed a worker progress. Add --fail-rerun option to regrtest: exit with non-zero exit code if a test failed pass passed when re-run in verbose mode (in a fresh process). That's now more useful since tests can pass when re-run in a fresh worker progress, whereas they failed when run after other tests when tests are run sequentially. Rename --verbose2 option (-w) to --rerun. Keep --verbose2 as a deprecated alias. Changes: * Fix and enhance statistics in regrtest summary. Add "(filtered)" when --match and/or --ignore options are used. * Add RunTests class. * Add TestResult.get_rerun_match_tests() method * Rewrite code to serialize/deserialize worker arguments as JSON using a new WorkerJob class. * Fix stats when a test is run with --forever --rerun. * If failed test names cannot be parsed, log a warning and don't filter tests. * test_regrtest.test_rerun_success() now uses a marker file, since the test is re-run in a separated process. * Add tests on normalize_test_name() function. * Add test_success() and test_skip() tests to test_regrtest.
This commit is contained in:
parent
c2ec174d24
commit
31c2945f14
|
@ -109,9 +109,10 @@ def parse_args():
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
if '-w' in args.test_args or '--verbose2' in args.test_args:
|
for opt in ('-w', '--rerun', '--verbose2'):
|
||||||
print("WARNING: -w/--verbose2 option should not be used to bisect!")
|
if opt in args.test_args:
|
||||||
print()
|
print(f"WARNING: {opt} option should not be used to bisect!")
|
||||||
|
print()
|
||||||
|
|
||||||
if args.input:
|
if args.input:
|
||||||
with open(args.input) as fp:
|
with open(args.input) as fp:
|
||||||
|
|
|
@ -156,7 +156,7 @@ def __init__(self, **kwargs) -> None:
|
||||||
self.coverdir = 'coverage'
|
self.coverdir = 'coverage'
|
||||||
self.runleaks = False
|
self.runleaks = False
|
||||||
self.huntrleaks = False
|
self.huntrleaks = False
|
||||||
self.verbose2 = False
|
self.rerun = False
|
||||||
self.verbose3 = False
|
self.verbose3 = False
|
||||||
self.print_slow = False
|
self.print_slow = False
|
||||||
self.random_seed = None
|
self.random_seed = None
|
||||||
|
@ -213,8 +213,10 @@ def _create_parser():
|
||||||
group = parser.add_argument_group('Verbosity')
|
group = parser.add_argument_group('Verbosity')
|
||||||
group.add_argument('-v', '--verbose', action='count',
|
group.add_argument('-v', '--verbose', action='count',
|
||||||
help='run tests in verbose mode with output to stdout')
|
help='run tests in verbose mode with output to stdout')
|
||||||
group.add_argument('-w', '--verbose2', action='store_true',
|
group.add_argument('-w', '--rerun', action='store_true',
|
||||||
help='re-run failed tests in verbose mode')
|
help='re-run failed tests in verbose mode')
|
||||||
|
group.add_argument('--verbose2', action='store_true', dest='rerun',
|
||||||
|
help='deprecated alias to --rerun')
|
||||||
group.add_argument('-W', '--verbose3', action='store_true',
|
group.add_argument('-W', '--verbose3', action='store_true',
|
||||||
help='display test output on failure')
|
help='display test output on failure')
|
||||||
group.add_argument('-q', '--quiet', action='store_true',
|
group.add_argument('-q', '--quiet', action='store_true',
|
||||||
|
@ -309,6 +311,9 @@ def _create_parser():
|
||||||
group.add_argument('--fail-env-changed', action='store_true',
|
group.add_argument('--fail-env-changed', action='store_true',
|
||||||
help='if a test file alters the environment, mark '
|
help='if a test file alters the environment, mark '
|
||||||
'the test as failed')
|
'the test as failed')
|
||||||
|
group.add_argument('--fail-rerun', action='store_true',
|
||||||
|
help='if a test failed and then passed when re-run, '
|
||||||
|
'mark the tests as failed')
|
||||||
|
|
||||||
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
|
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
|
||||||
help='writes JUnit-style XML results to the specified '
|
help='writes JUnit-style XML results to the specified '
|
||||||
|
@ -380,7 +385,7 @@ def _parse_args(args, **kwargs):
|
||||||
ns.python = shlex.split(ns.python)
|
ns.python = shlex.split(ns.python)
|
||||||
if ns.failfast and not (ns.verbose or ns.verbose3):
|
if ns.failfast and not (ns.verbose or ns.verbose3):
|
||||||
parser.error("-G/--failfast needs either -v or -W")
|
parser.error("-G/--failfast needs either -v or -W")
|
||||||
if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
|
if ns.pgo and (ns.verbose or ns.rerun or ns.verbose3):
|
||||||
parser.error("--pgo/-v don't go together!")
|
parser.error("--pgo/-v don't go together!")
|
||||||
if ns.pgo_extended:
|
if ns.pgo_extended:
|
||||||
ns.pgo = True # pgo_extended implies pgo
|
ns.pgo = True # pgo_extended implies pgo
|
||||||
|
|
|
@ -11,11 +11,11 @@
|
||||||
import unittest
|
import unittest
|
||||||
from test.libregrtest.cmdline import _parse_args
|
from test.libregrtest.cmdline import _parse_args
|
||||||
from test.libregrtest.runtest import (
|
from test.libregrtest.runtest import (
|
||||||
findtests, split_test_packages, runtest, get_abs_module,
|
findtests, split_test_packages, runtest, abs_module_name,
|
||||||
PROGRESS_MIN_TIME, State)
|
PROGRESS_MIN_TIME, State, MatchTestsDict, RunTests)
|
||||||
from test.libregrtest.setup import setup_tests
|
from test.libregrtest.setup import setup_tests
|
||||||
from test.libregrtest.pgo import setup_pgo_tests
|
from test.libregrtest.pgo import setup_pgo_tests
|
||||||
from test.libregrtest.utils import (removepy, count, format_duration,
|
from test.libregrtest.utils import (strip_py_suffix, count, format_duration,
|
||||||
printlist, get_build_info)
|
printlist, get_build_info)
|
||||||
from test import support
|
from test import support
|
||||||
from test.support import TestStats
|
from test.support import TestStats
|
||||||
|
@ -28,14 +28,6 @@
|
||||||
# Must be smaller than buildbot "1200 seconds without output" limit.
|
# Must be smaller than buildbot "1200 seconds without output" limit.
|
||||||
EXIT_TIMEOUT = 120.0
|
EXIT_TIMEOUT = 120.0
|
||||||
|
|
||||||
# gh-90681: When rerunning tests, we might need to rerun the whole
|
|
||||||
# class or module suite if some its life-cycle hooks fail.
|
|
||||||
# Test level hooks are not affected.
|
|
||||||
_TEST_LIFECYCLE_HOOKS = frozenset((
|
|
||||||
'setUpClass', 'tearDownClass',
|
|
||||||
'setUpModule', 'tearDownModule',
|
|
||||||
))
|
|
||||||
|
|
||||||
EXITCODE_BAD_TEST = 2
|
EXITCODE_BAD_TEST = 2
|
||||||
EXITCODE_INTERRUPTED = 130
|
EXITCODE_INTERRUPTED = 130
|
||||||
EXITCODE_ENV_CHANGED = 3
|
EXITCODE_ENV_CHANGED = 3
|
||||||
|
@ -72,19 +64,22 @@ def __init__(self):
|
||||||
# tests
|
# tests
|
||||||
self.tests = []
|
self.tests = []
|
||||||
self.selected = []
|
self.selected = []
|
||||||
|
self.all_runtests: list[RunTests] = []
|
||||||
|
|
||||||
# test results
|
# test results
|
||||||
self.good = []
|
self.good: list[str] = []
|
||||||
self.bad = []
|
self.bad: list[str] = []
|
||||||
self.skipped = []
|
self.rerun_bad: list[str] = []
|
||||||
self.resource_denied = []
|
self.skipped: list[str] = []
|
||||||
self.environment_changed = []
|
self.resource_denied: list[str] = []
|
||||||
self.run_no_tests = []
|
self.environment_changed: list[str] = []
|
||||||
self.need_rerun = []
|
self.run_no_tests: list[str] = []
|
||||||
self.rerun = []
|
self.rerun: list[str] = []
|
||||||
self.first_result = None
|
|
||||||
|
self.need_rerun: list[TestResult] = []
|
||||||
|
self.first_state: str | None = None
|
||||||
self.interrupted = False
|
self.interrupted = False
|
||||||
self.stats_dict: dict[str, TestStats] = {}
|
self.total_stats = TestStats()
|
||||||
|
|
||||||
# used by --slow
|
# used by --slow
|
||||||
self.test_times = []
|
self.test_times = []
|
||||||
|
@ -94,7 +89,7 @@ def __init__(self):
|
||||||
|
|
||||||
# used to display the progress bar "[ 3/100]"
|
# used to display the progress bar "[ 3/100]"
|
||||||
self.start_time = time.perf_counter()
|
self.start_time = time.perf_counter()
|
||||||
self.test_count = ''
|
self.test_count_text = ''
|
||||||
self.test_count_width = 1
|
self.test_count_width = 1
|
||||||
|
|
||||||
# used by --single
|
# used by --single
|
||||||
|
@ -107,7 +102,6 @@ def __init__(self):
|
||||||
# misc
|
# misc
|
||||||
self.win_load_tracker = None
|
self.win_load_tracker = None
|
||||||
self.tmp_dir = None
|
self.tmp_dir = None
|
||||||
self.worker_test_name = None
|
|
||||||
|
|
||||||
def get_executed(self):
|
def get_executed(self):
|
||||||
return (set(self.good) | set(self.bad) | set(self.skipped)
|
return (set(self.good) | set(self.bad) | set(self.skipped)
|
||||||
|
@ -115,11 +109,9 @@ def get_executed(self):
|
||||||
| set(self.run_no_tests))
|
| set(self.run_no_tests))
|
||||||
|
|
||||||
def accumulate_result(self, result, rerun=False):
|
def accumulate_result(self, result, rerun=False):
|
||||||
|
fail_env_changed = self.ns.fail_env_changed
|
||||||
test_name = result.test_name
|
test_name = result.test_name
|
||||||
|
|
||||||
if result.has_meaningful_duration() and not rerun:
|
|
||||||
self.test_times.append((result.duration, test_name))
|
|
||||||
|
|
||||||
match result.state:
|
match result.state:
|
||||||
case State.PASSED:
|
case State.PASSED:
|
||||||
self.good.append(test_name)
|
self.good.append(test_name)
|
||||||
|
@ -128,25 +120,24 @@ def accumulate_result(self, result, rerun=False):
|
||||||
case State.SKIPPED:
|
case State.SKIPPED:
|
||||||
self.skipped.append(test_name)
|
self.skipped.append(test_name)
|
||||||
case State.RESOURCE_DENIED:
|
case State.RESOURCE_DENIED:
|
||||||
self.skipped.append(test_name)
|
|
||||||
self.resource_denied.append(test_name)
|
self.resource_denied.append(test_name)
|
||||||
case State.INTERRUPTED:
|
case State.INTERRUPTED:
|
||||||
self.interrupted = True
|
self.interrupted = True
|
||||||
case State.DID_NOT_RUN:
|
case State.DID_NOT_RUN:
|
||||||
self.run_no_tests.append(test_name)
|
self.run_no_tests.append(test_name)
|
||||||
case _:
|
case _:
|
||||||
if result.is_failed(self.ns.fail_env_changed):
|
if result.is_failed(fail_env_changed):
|
||||||
if not rerun:
|
self.bad.append(test_name)
|
||||||
self.bad.append(test_name)
|
self.need_rerun.append(result)
|
||||||
self.need_rerun.append(result)
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"invalid test state: {state!r}")
|
raise ValueError(f"invalid test state: {result.state!r}")
|
||||||
|
|
||||||
|
if result.has_meaningful_duration() and not rerun:
|
||||||
|
self.test_times.append((result.duration, test_name))
|
||||||
if result.stats is not None:
|
if result.stats is not None:
|
||||||
self.stats_dict[result.test_name] = result.stats
|
self.total_stats.accumulate(result.stats)
|
||||||
|
if rerun:
|
||||||
if rerun and not(result.is_failed(False) or result.state == State.INTERRUPTED):
|
self.rerun.append(test_name)
|
||||||
self.bad.remove(test_name)
|
|
||||||
|
|
||||||
xml_data = result.xml_data
|
xml_data = result.xml_data
|
||||||
if xml_data:
|
if xml_data:
|
||||||
|
@ -180,13 +171,15 @@ def log(self, line=''):
|
||||||
print(line, flush=True)
|
print(line, flush=True)
|
||||||
|
|
||||||
def display_progress(self, test_index, text):
|
def display_progress(self, test_index, text):
|
||||||
if self.ns.quiet:
|
quiet = self.ns.quiet
|
||||||
|
pgo = self.ns.pgo
|
||||||
|
if quiet:
|
||||||
return
|
return
|
||||||
|
|
||||||
# "[ 51/405/1] test_tcl passed"
|
# "[ 51/405/1] test_tcl passed"
|
||||||
line = f"{test_index:{self.test_count_width}}{self.test_count}"
|
line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
|
||||||
fails = len(self.bad) + len(self.environment_changed)
|
fails = len(self.bad) + len(self.environment_changed)
|
||||||
if fails and not self.ns.pgo:
|
if fails and not pgo:
|
||||||
line = f"{line}/{fails}"
|
line = f"{line}/{fails}"
|
||||||
self.log(f"[{line}] {text}")
|
self.log(f"[{line}] {text}")
|
||||||
|
|
||||||
|
@ -196,15 +189,7 @@ def parse_args(self, kwargs):
|
||||||
if ns.xmlpath:
|
if ns.xmlpath:
|
||||||
support.junit_xml_list = self.testsuite_xml = []
|
support.junit_xml_list = self.testsuite_xml = []
|
||||||
|
|
||||||
worker_args = ns.worker_args
|
strip_py_suffix(ns.args)
|
||||||
if worker_args is not None:
|
|
||||||
from test.libregrtest.runtest_mp import parse_worker_args
|
|
||||||
ns, test_name = parse_worker_args(ns.worker_args)
|
|
||||||
ns.worker_args = worker_args
|
|
||||||
self.worker_test_name = test_name
|
|
||||||
|
|
||||||
# Strip .py extensions.
|
|
||||||
removepy(ns.args)
|
|
||||||
|
|
||||||
if ns.huntrleaks:
|
if ns.huntrleaks:
|
||||||
warmup, repetitions, _ = ns.huntrleaks
|
warmup, repetitions, _ = ns.huntrleaks
|
||||||
|
@ -221,9 +206,18 @@ def parse_args(self, kwargs):
|
||||||
self.ns = ns
|
self.ns = ns
|
||||||
|
|
||||||
def find_tests(self, tests):
|
def find_tests(self, tests):
|
||||||
|
ns = self.ns
|
||||||
|
single = ns.single
|
||||||
|
fromfile = ns.fromfile
|
||||||
|
pgo = ns.pgo
|
||||||
|
exclude = ns.exclude
|
||||||
|
test_dir = ns.testdir
|
||||||
|
starting_test = ns.start
|
||||||
|
randomize = ns.randomize
|
||||||
|
|
||||||
self.tests = tests
|
self.tests = tests
|
||||||
|
|
||||||
if self.ns.single:
|
if single:
|
||||||
self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
|
self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
|
||||||
try:
|
try:
|
||||||
with open(self.next_single_filename, 'r') as fp:
|
with open(self.next_single_filename, 'r') as fp:
|
||||||
|
@ -232,12 +226,12 @@ def find_tests(self, tests):
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if self.ns.fromfile:
|
if fromfile:
|
||||||
self.tests = []
|
self.tests = []
|
||||||
# regex to match 'test_builtin' in line:
|
# regex to match 'test_builtin' in line:
|
||||||
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
|
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
|
||||||
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
|
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
|
||||||
with open(os.path.join(os_helper.SAVEDCWD, self.ns.fromfile)) as fp:
|
with open(os.path.join(os_helper.SAVEDCWD, fromfile)) as fp:
|
||||||
for line in fp:
|
for line in fp:
|
||||||
line = line.split('#', 1)[0]
|
line = line.split('#', 1)[0]
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
@ -245,22 +239,22 @@ def find_tests(self, tests):
|
||||||
if match is not None:
|
if match is not None:
|
||||||
self.tests.append(match.group())
|
self.tests.append(match.group())
|
||||||
|
|
||||||
removepy(self.tests)
|
strip_py_suffix(self.tests)
|
||||||
|
|
||||||
if self.ns.pgo:
|
if pgo:
|
||||||
# add default PGO tests if no tests are specified
|
# add default PGO tests if no tests are specified
|
||||||
setup_pgo_tests(self.ns)
|
setup_pgo_tests(ns)
|
||||||
|
|
||||||
exclude = set()
|
exclude_tests = set()
|
||||||
if self.ns.exclude:
|
if exclude:
|
||||||
for arg in self.ns.args:
|
for arg in ns.args:
|
||||||
exclude.add(arg)
|
exclude_tests.add(arg)
|
||||||
self.ns.args = []
|
ns.args = []
|
||||||
|
|
||||||
alltests = findtests(testdir=self.ns.testdir, exclude=exclude)
|
alltests = findtests(testdir=test_dir, exclude=exclude_tests)
|
||||||
|
|
||||||
if not self.ns.fromfile:
|
if not fromfile:
|
||||||
self.selected = self.tests or self.ns.args
|
self.selected = self.tests or ns.args
|
||||||
if self.selected:
|
if self.selected:
|
||||||
self.selected = split_test_packages(self.selected)
|
self.selected = split_test_packages(self.selected)
|
||||||
else:
|
else:
|
||||||
|
@ -268,7 +262,7 @@ def find_tests(self, tests):
|
||||||
else:
|
else:
|
||||||
self.selected = self.tests
|
self.selected = self.tests
|
||||||
|
|
||||||
if self.ns.single:
|
if single:
|
||||||
self.selected = self.selected[:1]
|
self.selected = self.selected[:1]
|
||||||
try:
|
try:
|
||||||
pos = alltests.index(self.selected[0])
|
pos = alltests.index(self.selected[0])
|
||||||
|
@ -277,17 +271,17 @@ def find_tests(self, tests):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Remove all the selected tests that precede start if it's set.
|
# Remove all the selected tests that precede start if it's set.
|
||||||
if self.ns.start:
|
if starting_test:
|
||||||
try:
|
try:
|
||||||
del self.selected[:self.selected.index(self.ns.start)]
|
del self.selected[:self.selected.index(starting_test)]
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("Couldn't find starting test (%s), using all tests"
|
print(f"Cannot find starting test: {starting_test}")
|
||||||
% self.ns.start, file=sys.stderr)
|
sys.exit(1)
|
||||||
|
|
||||||
if self.ns.randomize:
|
if randomize:
|
||||||
if self.ns.random_seed is None:
|
if ns.random_seed is None:
|
||||||
self.ns.random_seed = random.randrange(10000000)
|
ns.random_seed = random.randrange(10000000)
|
||||||
random.seed(self.ns.random_seed)
|
random.seed(ns.random_seed)
|
||||||
random.shuffle(self.selected)
|
random.shuffle(self.selected)
|
||||||
|
|
||||||
def list_tests(self):
|
def list_tests(self):
|
||||||
|
@ -305,25 +299,63 @@ def _list_cases(self, suite):
|
||||||
print(test.id())
|
print(test.id())
|
||||||
|
|
||||||
def list_cases(self):
|
def list_cases(self):
|
||||||
|
ns = self.ns
|
||||||
|
test_dir = ns.testdir
|
||||||
support.verbose = False
|
support.verbose = False
|
||||||
support.set_match_tests(self.ns.match_tests, self.ns.ignore_tests)
|
support.set_match_tests(ns.match_tests, ns.ignore_tests)
|
||||||
|
|
||||||
|
skipped = []
|
||||||
for test_name in self.selected:
|
for test_name in self.selected:
|
||||||
abstest = get_abs_module(self.ns, test_name)
|
module_name = abs_module_name(test_name, test_dir)
|
||||||
try:
|
try:
|
||||||
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
|
suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
|
||||||
self._list_cases(suite)
|
self._list_cases(suite)
|
||||||
except unittest.SkipTest:
|
except unittest.SkipTest:
|
||||||
self.skipped.append(test_name)
|
skipped.append(test_name)
|
||||||
|
|
||||||
if self.skipped:
|
if skipped:
|
||||||
print(file=sys.stderr)
|
sys.stdout.flush()
|
||||||
print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
|
stderr = sys.stderr
|
||||||
printlist(self.skipped, file=sys.stderr)
|
print(file=stderr)
|
||||||
|
print(count(len(skipped), "test"), "skipped:", file=stderr)
|
||||||
|
printlist(skipped, file=stderr)
|
||||||
|
|
||||||
def rerun_failed_tests(self):
|
def get_rerun_match(self, rerun_list) -> MatchTestsDict:
|
||||||
self.log()
|
rerun_match_tests = {}
|
||||||
|
for result in rerun_list:
|
||||||
|
match_tests = result.get_rerun_match_tests()
|
||||||
|
# ignore empty match list
|
||||||
|
if match_tests:
|
||||||
|
rerun_match_tests[result.test_name] = match_tests
|
||||||
|
return rerun_match_tests
|
||||||
|
|
||||||
|
def _rerun_failed_tests(self, need_rerun):
|
||||||
|
# Configure the runner to re-run tests
|
||||||
|
ns = self.ns
|
||||||
|
ns.verbose = True
|
||||||
|
ns.failfast = False
|
||||||
|
ns.verbose3 = False
|
||||||
|
ns.forever = False
|
||||||
|
if ns.use_mp is None:
|
||||||
|
ns.use_mp = 1
|
||||||
|
|
||||||
|
# Get tests to re-run
|
||||||
|
tests = [result.test_name for result in need_rerun]
|
||||||
|
match_tests = self.get_rerun_match(need_rerun)
|
||||||
|
self.set_tests(tests)
|
||||||
|
|
||||||
|
# Clear previously failed tests
|
||||||
|
self.rerun_bad.extend(self.bad)
|
||||||
|
self.bad.clear()
|
||||||
|
self.need_rerun.clear()
|
||||||
|
|
||||||
|
# Re-run failed tests
|
||||||
|
self.log(f"Re-running {len(tests)} failed tests in verbose mode in subprocesses")
|
||||||
|
runtests = RunTests(tests, match_tests=match_tests, rerun=True)
|
||||||
|
self.all_runtests.append(runtests)
|
||||||
|
self._run_tests_mp(runtests)
|
||||||
|
|
||||||
|
def rerun_failed_tests(self, need_rerun):
|
||||||
if self.ns.python:
|
if self.ns.python:
|
||||||
# Temp patch for https://github.com/python/cpython/issues/94052
|
# Temp patch for https://github.com/python/cpython/issues/94052
|
||||||
self.log(
|
self.log(
|
||||||
|
@ -332,45 +364,10 @@ def rerun_failed_tests(self):
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
self.ns.verbose = True
|
self.first_state = self.get_tests_state()
|
||||||
self.ns.failfast = False
|
|
||||||
self.ns.verbose3 = False
|
|
||||||
|
|
||||||
self.first_result = self.get_tests_result()
|
print()
|
||||||
|
self._rerun_failed_tests(need_rerun)
|
||||||
self.log("Re-running failed tests in verbose mode")
|
|
||||||
rerun_list = list(self.need_rerun)
|
|
||||||
self.need_rerun.clear()
|
|
||||||
for result in rerun_list:
|
|
||||||
test_name = result.test_name
|
|
||||||
self.rerun.append(test_name)
|
|
||||||
|
|
||||||
errors = result.errors or []
|
|
||||||
failures = result.failures or []
|
|
||||||
error_names = [
|
|
||||||
self.normalize_test_name(test_full_name, is_error=True)
|
|
||||||
for (test_full_name, *_) in errors]
|
|
||||||
failure_names = [
|
|
||||||
self.normalize_test_name(test_full_name)
|
|
||||||
for (test_full_name, *_) in failures]
|
|
||||||
self.ns.verbose = True
|
|
||||||
orig_match_tests = self.ns.match_tests
|
|
||||||
if errors or failures:
|
|
||||||
if self.ns.match_tests is None:
|
|
||||||
self.ns.match_tests = []
|
|
||||||
self.ns.match_tests.extend(error_names)
|
|
||||||
self.ns.match_tests.extend(failure_names)
|
|
||||||
matching = "matching: " + ", ".join(self.ns.match_tests)
|
|
||||||
self.log(f"Re-running {test_name} in verbose mode ({matching})")
|
|
||||||
else:
|
|
||||||
self.log(f"Re-running {test_name} in verbose mode")
|
|
||||||
result = runtest(self.ns, test_name)
|
|
||||||
self.ns.match_tests = orig_match_tests
|
|
||||||
|
|
||||||
self.accumulate_result(result, rerun=True)
|
|
||||||
|
|
||||||
if result.state == State.INTERRUPTED:
|
|
||||||
break
|
|
||||||
|
|
||||||
if self.bad:
|
if self.bad:
|
||||||
print(count(len(self.bad), 'test'), "failed again:")
|
print(count(len(self.bad), 'test'), "failed again:")
|
||||||
|
@ -378,28 +375,17 @@ def rerun_failed_tests(self):
|
||||||
|
|
||||||
self.display_result()
|
self.display_result()
|
||||||
|
|
||||||
def normalize_test_name(self, test_full_name, *, is_error=False):
|
|
||||||
short_name = test_full_name.split(" ")[0]
|
|
||||||
if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
|
|
||||||
# This means that we have a failure in a life-cycle hook,
|
|
||||||
# we need to rerun the whole module or class suite.
|
|
||||||
# Basically the error looks like this:
|
|
||||||
# ERROR: setUpClass (test.test_reg_ex.RegTest)
|
|
||||||
# or
|
|
||||||
# ERROR: setUpModule (test.test_reg_ex)
|
|
||||||
# So, we need to parse the class / module name.
|
|
||||||
lpar = test_full_name.index('(')
|
|
||||||
rpar = test_full_name.index(')')
|
|
||||||
return test_full_name[lpar + 1: rpar].split('.')[-1]
|
|
||||||
return short_name
|
|
||||||
|
|
||||||
def display_result(self):
|
def display_result(self):
|
||||||
|
pgo = self.ns.pgo
|
||||||
|
quiet = self.ns.quiet
|
||||||
|
print_slow = self.ns.print_slow
|
||||||
|
|
||||||
# If running the test suite for PGO then no one cares about results.
|
# If running the test suite for PGO then no one cares about results.
|
||||||
if self.ns.pgo:
|
if pgo:
|
||||||
return
|
return
|
||||||
|
|
||||||
print()
|
print()
|
||||||
print("== Tests result: %s ==" % self.get_tests_result())
|
print("== Tests result: %s ==" % self.get_tests_state())
|
||||||
|
|
||||||
if self.interrupted:
|
if self.interrupted:
|
||||||
print("Test suite interrupted by signal SIGINT.")
|
print("Test suite interrupted by signal SIGINT.")
|
||||||
|
@ -410,7 +396,7 @@ def display_result(self):
|
||||||
print(count(len(omitted), "test"), "omitted:")
|
print(count(len(omitted), "test"), "omitted:")
|
||||||
printlist(omitted)
|
printlist(omitted)
|
||||||
|
|
||||||
if self.good and not self.ns.quiet:
|
if self.good and not quiet:
|
||||||
print()
|
print()
|
||||||
if (not self.bad
|
if (not self.bad
|
||||||
and not self.skipped
|
and not self.skipped
|
||||||
|
@ -419,7 +405,7 @@ def display_result(self):
|
||||||
print("All", end=' ')
|
print("All", end=' ')
|
||||||
print(count(len(self.good), "test"), "OK.")
|
print(count(len(self.good), "test"), "OK.")
|
||||||
|
|
||||||
if self.ns.print_slow:
|
if print_slow:
|
||||||
self.test_times.sort(reverse=True)
|
self.test_times.sort(reverse=True)
|
||||||
print()
|
print()
|
||||||
print("10 slowest tests:")
|
print("10 slowest tests:")
|
||||||
|
@ -437,11 +423,16 @@ def display_result(self):
|
||||||
count(len(self.environment_changed), "test")))
|
count(len(self.environment_changed), "test")))
|
||||||
printlist(self.environment_changed)
|
printlist(self.environment_changed)
|
||||||
|
|
||||||
if self.skipped and not self.ns.quiet:
|
if self.skipped and not quiet:
|
||||||
print()
|
print()
|
||||||
print(count(len(self.skipped), "test"), "skipped:")
|
print(count(len(self.skipped), "test"), "skipped:")
|
||||||
printlist(self.skipped)
|
printlist(self.skipped)
|
||||||
|
|
||||||
|
if self.resource_denied and not quiet:
|
||||||
|
print()
|
||||||
|
print(count(len(self.resource_denied), "test"), "skipped (resource denied):")
|
||||||
|
printlist(self.resource_denied)
|
||||||
|
|
||||||
if self.rerun:
|
if self.rerun:
|
||||||
print()
|
print()
|
||||||
print("%s:" % count(len(self.rerun), "re-run test"))
|
print("%s:" % count(len(self.rerun), "re-run test"))
|
||||||
|
@ -452,40 +443,58 @@ def display_result(self):
|
||||||
print(count(len(self.run_no_tests), "test"), "run no tests:")
|
print(count(len(self.run_no_tests), "test"), "run no tests:")
|
||||||
printlist(self.run_no_tests)
|
printlist(self.run_no_tests)
|
||||||
|
|
||||||
def run_tests_sequential(self):
|
def run_test(self, test_index, test_name, previous_test, save_modules):
|
||||||
if self.ns.trace:
|
text = test_name
|
||||||
|
if previous_test:
|
||||||
|
text = '%s -- %s' % (text, previous_test)
|
||||||
|
self.display_progress(test_index, text)
|
||||||
|
|
||||||
|
if self.tracer:
|
||||||
|
# If we're tracing code coverage, then we don't exit with status
|
||||||
|
# if on a false return value from main.
|
||||||
|
cmd = ('result = runtest(self.ns, test_name); '
|
||||||
|
'self.accumulate_result(result)')
|
||||||
|
ns = dict(locals())
|
||||||
|
self.tracer.runctx(cmd, globals=globals(), locals=ns)
|
||||||
|
result = ns['result']
|
||||||
|
else:
|
||||||
|
result = runtest(self.ns, test_name)
|
||||||
|
self.accumulate_result(result)
|
||||||
|
|
||||||
|
# Unload the newly imported modules (best effort finalization)
|
||||||
|
for module in sys.modules.keys():
|
||||||
|
if module not in save_modules and module.startswith("test."):
|
||||||
|
support.unload(module)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def run_tests_sequentially(self, runtests):
|
||||||
|
ns = self.ns
|
||||||
|
coverage = ns.trace
|
||||||
|
fail_fast = ns.failfast
|
||||||
|
fail_env_changed = ns.fail_env_changed
|
||||||
|
timeout = ns.timeout
|
||||||
|
|
||||||
|
if coverage:
|
||||||
import trace
|
import trace
|
||||||
self.tracer = trace.Trace(trace=False, count=True)
|
self.tracer = trace.Trace(trace=False, count=True)
|
||||||
|
|
||||||
save_modules = sys.modules.keys()
|
save_modules = sys.modules.keys()
|
||||||
|
|
||||||
msg = "Run tests sequentially"
|
msg = "Run tests sequentially"
|
||||||
if self.ns.timeout:
|
if timeout:
|
||||||
msg += " (timeout: %s)" % format_duration(self.ns.timeout)
|
msg += " (timeout: %s)" % format_duration(timeout)
|
||||||
self.log(msg)
|
self.log(msg)
|
||||||
|
|
||||||
previous_test = None
|
previous_test = None
|
||||||
for test_index, test_name in enumerate(self.tests, 1):
|
tests_iter = runtests.iter_tests()
|
||||||
|
for test_index, test_name in enumerate(tests_iter, 1):
|
||||||
start_time = time.perf_counter()
|
start_time = time.perf_counter()
|
||||||
|
|
||||||
text = test_name
|
result = self.run_test(test_index, test_name,
|
||||||
if previous_test:
|
previous_test, save_modules)
|
||||||
text = '%s -- %s' % (text, previous_test)
|
|
||||||
self.display_progress(test_index, text)
|
|
||||||
|
|
||||||
if self.tracer:
|
if result.must_stop(fail_fast, fail_env_changed):
|
||||||
# If we're tracing code coverage, then we don't exit with status
|
|
||||||
# if on a false return value from main.
|
|
||||||
cmd = ('result = runtest(self.ns, test_name); '
|
|
||||||
'self.accumulate_result(result)')
|
|
||||||
ns = dict(locals())
|
|
||||||
self.tracer.runctx(cmd, globals=globals(), locals=ns)
|
|
||||||
result = ns['result']
|
|
||||||
else:
|
|
||||||
result = runtest(self.ns, test_name)
|
|
||||||
self.accumulate_result(result)
|
|
||||||
|
|
||||||
if result.state == State.INTERRUPTED:
|
|
||||||
break
|
break
|
||||||
|
|
||||||
previous_test = str(result)
|
previous_test = str(result)
|
||||||
|
@ -496,26 +505,9 @@ def run_tests_sequential(self):
|
||||||
# be quiet: say nothing if the test passed shortly
|
# be quiet: say nothing if the test passed shortly
|
||||||
previous_test = None
|
previous_test = None
|
||||||
|
|
||||||
# Unload the newly imported modules (best effort finalization)
|
|
||||||
for module in sys.modules.keys():
|
|
||||||
if module not in save_modules and module.startswith("test."):
|
|
||||||
support.unload(module)
|
|
||||||
|
|
||||||
if self.ns.failfast and result.is_failed(self.ns.fail_env_changed):
|
|
||||||
break
|
|
||||||
|
|
||||||
if previous_test:
|
if previous_test:
|
||||||
print(previous_test)
|
print(previous_test)
|
||||||
|
|
||||||
def _test_forever(self, tests):
|
|
||||||
while True:
|
|
||||||
for test_name in tests:
|
|
||||||
yield test_name
|
|
||||||
if self.bad:
|
|
||||||
return
|
|
||||||
if self.ns.fail_env_changed and self.environment_changed:
|
|
||||||
return
|
|
||||||
|
|
||||||
def display_header(self):
|
def display_header(self):
|
||||||
# Print basic platform information
|
# Print basic platform information
|
||||||
print("==", platform.python_implementation(), *sys.version.split())
|
print("==", platform.python_implementation(), *sys.version.split())
|
||||||
|
@ -560,11 +552,13 @@ def no_tests_run(self):
|
||||||
return not any((self.good, self.bad, self.skipped, self.interrupted,
|
return not any((self.good, self.bad, self.skipped, self.interrupted,
|
||||||
self.environment_changed))
|
self.environment_changed))
|
||||||
|
|
||||||
def get_tests_result(self):
|
def get_tests_state(self):
|
||||||
|
fail_env_changed = self.ns.fail_env_changed
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
if self.bad:
|
if self.bad:
|
||||||
result.append("FAILURE")
|
result.append("FAILURE")
|
||||||
elif self.ns.fail_env_changed and self.environment_changed:
|
elif fail_env_changed and self.environment_changed:
|
||||||
result.append("ENV CHANGED")
|
result.append("ENV CHANGED")
|
||||||
elif self.no_tests_run():
|
elif self.no_tests_run():
|
||||||
result.append("NO TESTS RAN")
|
result.append("NO TESTS RAN")
|
||||||
|
@ -576,10 +570,40 @@ def get_tests_result(self):
|
||||||
result.append("SUCCESS")
|
result.append("SUCCESS")
|
||||||
|
|
||||||
result = ', '.join(result)
|
result = ', '.join(result)
|
||||||
if self.first_result:
|
if self.first_state:
|
||||||
result = '%s then %s' % (self.first_result, result)
|
result = '%s then %s' % (self.first_state, result)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def _run_tests_mp(self, runtests: RunTests) -> None:
|
||||||
|
from test.libregrtest.runtest_mp import run_tests_multiprocess
|
||||||
|
# If we're on windows and this is the parent runner (not a worker),
|
||||||
|
# track the load average.
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
from test.libregrtest.win_utils import WindowsLoadTracker
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.win_load_tracker = WindowsLoadTracker()
|
||||||
|
except PermissionError as error:
|
||||||
|
# Standard accounts may not have access to the performance
|
||||||
|
# counters.
|
||||||
|
print(f'Failed to create WindowsLoadTracker: {error}')
|
||||||
|
|
||||||
|
try:
|
||||||
|
run_tests_multiprocess(self, runtests)
|
||||||
|
finally:
|
||||||
|
if self.win_load_tracker is not None:
|
||||||
|
self.win_load_tracker.close()
|
||||||
|
self.win_load_tracker = None
|
||||||
|
|
||||||
|
def set_tests(self, tests):
|
||||||
|
self.tests = tests
|
||||||
|
if self.ns.forever:
|
||||||
|
self.test_count_text = ''
|
||||||
|
self.test_count_width = 3
|
||||||
|
else:
|
||||||
|
self.test_count_text = '/{}'.format(len(self.tests))
|
||||||
|
self.test_count_width = len(self.test_count_text) - 1
|
||||||
|
|
||||||
def run_tests(self):
|
def run_tests(self):
|
||||||
# For a partial run, we do not need to clutter the output.
|
# For a partial run, we do not need to clutter the output.
|
||||||
if (self.ns.header
|
if (self.ns.header
|
||||||
|
@ -597,37 +621,14 @@ def run_tests(self):
|
||||||
if self.ns.randomize:
|
if self.ns.randomize:
|
||||||
print("Using random seed", self.ns.random_seed)
|
print("Using random seed", self.ns.random_seed)
|
||||||
|
|
||||||
if self.ns.forever:
|
tests = self.selected
|
||||||
self.tests = self._test_forever(list(self.selected))
|
self.set_tests(tests)
|
||||||
self.test_count = ''
|
runtests = RunTests(tests, forever=self.ns.forever)
|
||||||
self.test_count_width = 3
|
self.all_runtests.append(runtests)
|
||||||
else:
|
|
||||||
self.tests = iter(self.selected)
|
|
||||||
self.test_count = '/{}'.format(len(self.selected))
|
|
||||||
self.test_count_width = len(self.test_count) - 1
|
|
||||||
|
|
||||||
if self.ns.use_mp:
|
if self.ns.use_mp:
|
||||||
from test.libregrtest.runtest_mp import run_tests_multiprocess
|
self._run_tests_mp(runtests)
|
||||||
# If we're on windows and this is the parent runner (not a worker),
|
|
||||||
# track the load average.
|
|
||||||
if sys.platform == 'win32' and self.worker_test_name is None:
|
|
||||||
from test.libregrtest.win_utils import WindowsLoadTracker
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.win_load_tracker = WindowsLoadTracker()
|
|
||||||
except PermissionError as error:
|
|
||||||
# Standard accounts may not have access to the performance
|
|
||||||
# counters.
|
|
||||||
print(f'Failed to create WindowsLoadTracker: {error}')
|
|
||||||
|
|
||||||
try:
|
|
||||||
run_tests_multiprocess(self)
|
|
||||||
finally:
|
|
||||||
if self.win_load_tracker is not None:
|
|
||||||
self.win_load_tracker.close()
|
|
||||||
self.win_load_tracker = None
|
|
||||||
else:
|
else:
|
||||||
self.run_tests_sequential()
|
self.run_tests_sequentially(runtests)
|
||||||
|
|
||||||
def finalize(self):
|
def finalize(self):
|
||||||
if self.next_single_filename:
|
if self.next_single_filename:
|
||||||
|
@ -642,23 +643,29 @@ def finalize(self):
|
||||||
r.write_results(show_missing=True, summary=True,
|
r.write_results(show_missing=True, summary=True,
|
||||||
coverdir=self.ns.coverdir)
|
coverdir=self.ns.coverdir)
|
||||||
|
|
||||||
print()
|
|
||||||
self.display_summary()
|
|
||||||
|
|
||||||
if self.ns.runleaks:
|
if self.ns.runleaks:
|
||||||
os.system("leaks %d" % os.getpid())
|
os.system("leaks %d" % os.getpid())
|
||||||
|
|
||||||
|
self.save_xml_result()
|
||||||
|
|
||||||
def display_summary(self):
|
def display_summary(self):
|
||||||
duration = time.perf_counter() - self.start_time
|
duration = time.perf_counter() - self.start_time
|
||||||
|
first_runtests = self.all_runtests[0]
|
||||||
|
# the second runtests (re-run failed tests) disables forever,
|
||||||
|
# use the first runtests
|
||||||
|
forever = first_runtests.forever
|
||||||
|
filtered = bool(self.ns.match_tests) or bool(self.ns.ignore_tests)
|
||||||
|
|
||||||
# Total duration
|
# Total duration
|
||||||
|
print()
|
||||||
print("Total duration: %s" % format_duration(duration))
|
print("Total duration: %s" % format_duration(duration))
|
||||||
|
|
||||||
# Total tests
|
# Total tests
|
||||||
total = TestStats()
|
total = self.total_stats
|
||||||
for stats in self.stats_dict.values():
|
text = f'run={total.tests_run:,}'
|
||||||
total.accumulate(stats)
|
if filtered:
|
||||||
stats = [f'run={total.tests_run:,}']
|
text = f"{text} (filtered)"
|
||||||
|
stats = [text]
|
||||||
if total.failures:
|
if total.failures:
|
||||||
stats.append(f'failures={total.failures:,}')
|
stats.append(f'failures={total.failures:,}')
|
||||||
if total.skipped:
|
if total.skipped:
|
||||||
|
@ -666,23 +673,31 @@ def display_summary(self):
|
||||||
print(f"Total tests: {' '.join(stats)}")
|
print(f"Total tests: {' '.join(stats)}")
|
||||||
|
|
||||||
# Total test files
|
# Total test files
|
||||||
report = [f'success={len(self.good)}']
|
all_tests = [self.good, self.bad, self.rerun,
|
||||||
if self.bad:
|
self.skipped,
|
||||||
report.append(f'failed={len(self.bad)}')
|
self.environment_changed, self.run_no_tests]
|
||||||
if self.environment_changed:
|
run = sum(map(len, all_tests))
|
||||||
report.append(f'env_changed={len(self.environment_changed)}')
|
text = f'run={run}'
|
||||||
if self.skipped:
|
if not forever:
|
||||||
report.append(f'skipped={len(self.skipped)}')
|
ntest = len(first_runtests.tests)
|
||||||
if self.resource_denied:
|
text = f"{text}/{ntest}"
|
||||||
report.append(f'resource_denied={len(self.resource_denied)}')
|
if filtered:
|
||||||
if self.rerun:
|
text = f"{text} (filtered)"
|
||||||
report.append(f'rerun={len(self.rerun)}')
|
report = [text]
|
||||||
if self.run_no_tests:
|
for name, tests in (
|
||||||
report.append(f'run_no_tests={len(self.run_no_tests)}')
|
('failed', self.bad),
|
||||||
|
('env_changed', self.environment_changed),
|
||||||
|
('skipped', self.skipped),
|
||||||
|
('resource_denied', self.resource_denied),
|
||||||
|
('rerun', self.rerun),
|
||||||
|
('run_no_tests', self.run_no_tests),
|
||||||
|
):
|
||||||
|
if tests:
|
||||||
|
report.append(f'{name}={len(tests)}')
|
||||||
print(f"Total test files: {' '.join(report)}")
|
print(f"Total test files: {' '.join(report)}")
|
||||||
|
|
||||||
# Result
|
# Result
|
||||||
result = self.get_tests_result()
|
result = self.get_tests_state()
|
||||||
print(f"Result: {result}")
|
print(f"Result: {result}")
|
||||||
|
|
||||||
def save_xml_result(self):
|
def save_xml_result(self):
|
||||||
|
@ -742,6 +757,9 @@ def set_temp_dir(self):
|
||||||
|
|
||||||
self.tmp_dir = os.path.abspath(self.tmp_dir)
|
self.tmp_dir = os.path.abspath(self.tmp_dir)
|
||||||
|
|
||||||
|
def is_worker(self):
|
||||||
|
return (self.ns.worker_args is not None)
|
||||||
|
|
||||||
def create_temp_dir(self):
|
def create_temp_dir(self):
|
||||||
os.makedirs(self.tmp_dir, exist_ok=True)
|
os.makedirs(self.tmp_dir, exist_ok=True)
|
||||||
|
|
||||||
|
@ -754,7 +772,8 @@ def create_temp_dir(self):
|
||||||
nounce = random.randint(0, 1_000_000)
|
nounce = random.randint(0, 1_000_000)
|
||||||
else:
|
else:
|
||||||
nounce = os.getpid()
|
nounce = os.getpid()
|
||||||
if self.worker_test_name is not None:
|
|
||||||
|
if self.is_worker():
|
||||||
test_cwd = 'test_python_worker_{}'.format(nounce)
|
test_cwd = 'test_python_worker_{}'.format(nounce)
|
||||||
else:
|
else:
|
||||||
test_cwd = 'test_python_{}'.format(nounce)
|
test_cwd = 'test_python_{}'.format(nounce)
|
||||||
|
@ -817,48 +836,53 @@ def getloadavg(self):
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_exitcode(self):
|
||||||
|
exitcode = 0
|
||||||
|
if self.bad:
|
||||||
|
exitcode = EXITCODE_BAD_TEST
|
||||||
|
elif self.interrupted:
|
||||||
|
exitcode = EXITCODE_INTERRUPTED
|
||||||
|
elif self.ns.fail_env_changed and self.environment_changed:
|
||||||
|
exitcode = EXITCODE_ENV_CHANGED
|
||||||
|
elif self.no_tests_run():
|
||||||
|
exitcode = EXITCODE_NO_TESTS_RAN
|
||||||
|
elif self.rerun and self.ns.fail_rerun:
|
||||||
|
exitcode = EXITCODE_BAD_TEST
|
||||||
|
return exitcode
|
||||||
|
|
||||||
|
def action_run_tests(self):
|
||||||
|
self.run_tests()
|
||||||
|
self.display_result()
|
||||||
|
|
||||||
|
need_rerun = self.need_rerun
|
||||||
|
if self.ns.rerun and need_rerun:
|
||||||
|
self.rerun_failed_tests(need_rerun)
|
||||||
|
|
||||||
|
self.display_summary()
|
||||||
|
self.finalize()
|
||||||
|
|
||||||
def _main(self, tests, kwargs):
|
def _main(self, tests, kwargs):
|
||||||
if self.worker_test_name is not None:
|
if self.is_worker():
|
||||||
from test.libregrtest.runtest_mp import run_tests_worker
|
from test.libregrtest.runtest_mp import run_tests_worker
|
||||||
run_tests_worker(self.ns, self.worker_test_name)
|
run_tests_worker(self.ns.worker_args)
|
||||||
|
return
|
||||||
|
|
||||||
if self.ns.wait:
|
if self.ns.wait:
|
||||||
input("Press any key to continue...")
|
input("Press any key to continue...")
|
||||||
|
|
||||||
support.PGO = self.ns.pgo
|
|
||||||
support.PGO_EXTENDED = self.ns.pgo_extended
|
|
||||||
|
|
||||||
setup_tests(self.ns)
|
setup_tests(self.ns)
|
||||||
|
|
||||||
self.find_tests(tests)
|
self.find_tests(tests)
|
||||||
|
|
||||||
|
exitcode = 0
|
||||||
if self.ns.list_tests:
|
if self.ns.list_tests:
|
||||||
self.list_tests()
|
self.list_tests()
|
||||||
sys.exit(0)
|
elif self.ns.list_cases:
|
||||||
|
|
||||||
if self.ns.list_cases:
|
|
||||||
self.list_cases()
|
self.list_cases()
|
||||||
sys.exit(0)
|
else:
|
||||||
|
self.action_run_tests()
|
||||||
|
exitcode = self.get_exitcode()
|
||||||
|
|
||||||
self.run_tests()
|
sys.exit(exitcode)
|
||||||
self.display_result()
|
|
||||||
|
|
||||||
if self.ns.verbose2 and self.bad:
|
|
||||||
self.rerun_failed_tests()
|
|
||||||
|
|
||||||
self.finalize()
|
|
||||||
|
|
||||||
self.save_xml_result()
|
|
||||||
|
|
||||||
if self.bad:
|
|
||||||
sys.exit(EXITCODE_BAD_TEST)
|
|
||||||
if self.interrupted:
|
|
||||||
sys.exit(EXITCODE_INTERRUPTED)
|
|
||||||
if self.ns.fail_env_changed and self.environment_changed:
|
|
||||||
sys.exit(EXITCODE_ENV_CHANGED)
|
|
||||||
if self.no_tests_run():
|
|
||||||
sys.exit(EXITCODE_NO_TESTS_RAN)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
def main(tests=None, **kwargs):
|
def main(tests=None, **kwargs):
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import dataclasses
|
import dataclasses
|
||||||
import doctest
|
import doctest
|
||||||
import faulthandler
|
import faulthandler
|
||||||
import functools
|
|
||||||
import gc
|
import gc
|
||||||
import importlib
|
import importlib
|
||||||
import io
|
import io
|
||||||
|
@ -20,6 +19,10 @@
|
||||||
from test.libregrtest.utils import clear_caches, format_duration, print_warning
|
from test.libregrtest.utils import clear_caches, format_duration, print_warning
|
||||||
|
|
||||||
|
|
||||||
|
MatchTests = list[str]
|
||||||
|
MatchTestsDict = dict[str, MatchTests]
|
||||||
|
|
||||||
|
|
||||||
# Avoid enum.Enum to reduce the number of imports when tests are run
|
# Avoid enum.Enum to reduce the number of imports when tests are run
|
||||||
class State:
|
class State:
|
||||||
PASSED = "PASSED"
|
PASSED = "PASSED"
|
||||||
|
@ -56,6 +59,41 @@ def has_meaningful_duration(state):
|
||||||
State.MULTIPROCESSING_ERROR,
|
State.MULTIPROCESSING_ERROR,
|
||||||
State.DID_NOT_RUN}
|
State.DID_NOT_RUN}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def must_stop(state):
|
||||||
|
return state in {
|
||||||
|
State.INTERRUPTED,
|
||||||
|
State.MULTIPROCESSING_ERROR}
|
||||||
|
|
||||||
|
|
||||||
|
# gh-90681: When rerunning tests, we might need to rerun the whole
|
||||||
|
# class or module suite if some its life-cycle hooks fail.
|
||||||
|
# Test level hooks are not affected.
|
||||||
|
_TEST_LIFECYCLE_HOOKS = frozenset((
|
||||||
|
'setUpClass', 'tearDownClass',
|
||||||
|
'setUpModule', 'tearDownModule',
|
||||||
|
))
|
||||||
|
|
||||||
|
def normalize_test_name(test_full_name, *, is_error=False):
|
||||||
|
short_name = test_full_name.split(" ")[0]
|
||||||
|
if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
|
||||||
|
if test_full_name.startswith(('setUpModule (', 'tearDownModule (')):
|
||||||
|
# if setUpModule() or tearDownModule() failed, don't filter
|
||||||
|
# tests with the test file name, don't use use filters.
|
||||||
|
return None
|
||||||
|
|
||||||
|
# This means that we have a failure in a life-cycle hook,
|
||||||
|
# we need to rerun the whole module or class suite.
|
||||||
|
# Basically the error looks like this:
|
||||||
|
# ERROR: setUpClass (test.test_reg_ex.RegTest)
|
||||||
|
# or
|
||||||
|
# ERROR: setUpModule (test.test_reg_ex)
|
||||||
|
# So, we need to parse the class / module name.
|
||||||
|
lpar = test_full_name.index('(')
|
||||||
|
rpar = test_full_name.index(')')
|
||||||
|
return test_full_name[lpar + 1: rpar].split('.')[-1]
|
||||||
|
return short_name
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass(slots=True)
|
@dataclasses.dataclass(slots=True)
|
||||||
class TestResult:
|
class TestResult:
|
||||||
|
@ -129,6 +167,58 @@ def set_env_changed(self):
|
||||||
if self.state is None or self.state == State.PASSED:
|
if self.state is None or self.state == State.PASSED:
|
||||||
self.state = State.ENV_CHANGED
|
self.state = State.ENV_CHANGED
|
||||||
|
|
||||||
|
def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool:
|
||||||
|
if State.must_stop(self.state):
|
||||||
|
return True
|
||||||
|
if fail_fast and self.is_failed(fail_env_changed):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_rerun_match_tests(self):
|
||||||
|
match_tests = []
|
||||||
|
|
||||||
|
errors = self.errors or []
|
||||||
|
failures = self.failures or []
|
||||||
|
for error_list, is_error in (
|
||||||
|
(errors, True),
|
||||||
|
(failures, False),
|
||||||
|
):
|
||||||
|
for full_name, *_ in error_list:
|
||||||
|
match_name = normalize_test_name(full_name, is_error=is_error)
|
||||||
|
if match_name is None:
|
||||||
|
# 'setUpModule (test.test_sys)': don't filter tests
|
||||||
|
return None
|
||||||
|
if not match_name:
|
||||||
|
error_type = "ERROR" if is_error else "FAIL"
|
||||||
|
print_warning(f"rerun failed to parse {error_type} test name: "
|
||||||
|
f"{full_name!r}: don't filter tests")
|
||||||
|
return None
|
||||||
|
match_tests.append(match_name)
|
||||||
|
|
||||||
|
return match_tests
|
||||||
|
|
||||||
|
|
||||||
|
@dataclasses.dataclass(slots=True, frozen=True)
|
||||||
|
class RunTests:
|
||||||
|
tests: list[str]
|
||||||
|
match_tests: MatchTestsDict | None = None
|
||||||
|
rerun: bool = False
|
||||||
|
forever: bool = False
|
||||||
|
|
||||||
|
def get_match_tests(self, test_name) -> MatchTests | None:
|
||||||
|
if self.match_tests is not None:
|
||||||
|
return self.match_tests.get(test_name, None)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def iter_tests(self):
|
||||||
|
tests = tuple(self.tests)
|
||||||
|
if self.forever:
|
||||||
|
while True:
|
||||||
|
yield from tests
|
||||||
|
else:
|
||||||
|
yield from tests
|
||||||
|
|
||||||
|
|
||||||
# Minimum duration of a test to display its duration or to mention that
|
# Minimum duration of a test to display its duration or to mention that
|
||||||
# the test is running in background
|
# the test is running in background
|
||||||
|
@ -147,9 +237,6 @@ def set_env_changed(self):
|
||||||
"test_multiprocessing_spawn",
|
"test_multiprocessing_spawn",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Storage of uncollectable objects
|
|
||||||
FOUND_GARBAGE = []
|
|
||||||
|
|
||||||
|
|
||||||
def findtestdir(path=None):
|
def findtestdir(path=None):
|
||||||
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
|
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
|
||||||
|
@ -189,31 +276,41 @@ def split_test_packages(tests, *, testdir=None, exclude=(),
|
||||||
return splitted
|
return splitted
|
||||||
|
|
||||||
|
|
||||||
def get_abs_module(ns: Namespace, test_name: str) -> str:
|
def abs_module_name(test_name: str, test_dir: str | None) -> str:
|
||||||
if test_name.startswith('test.') or ns.testdir:
|
if test_name.startswith('test.') or test_dir:
|
||||||
return test_name
|
return test_name
|
||||||
else:
|
else:
|
||||||
# Import it from the test package
|
# Import it from the test package
|
||||||
return 'test.' + test_name
|
return 'test.' + test_name
|
||||||
|
|
||||||
|
|
||||||
def _runtest_capture_output_timeout_junit(result: TestResult, ns: Namespace) -> None:
|
def setup_support(ns: Namespace):
|
||||||
|
support.PGO = ns.pgo
|
||||||
|
support.PGO_EXTENDED = ns.pgo_extended
|
||||||
|
support.set_match_tests(ns.match_tests, ns.ignore_tests)
|
||||||
|
support.failfast = ns.failfast
|
||||||
|
support.verbose = ns.verbose
|
||||||
|
if ns.xmlpath:
|
||||||
|
support.junit_xml_list = []
|
||||||
|
else:
|
||||||
|
support.junit_xml_list = None
|
||||||
|
|
||||||
|
|
||||||
|
def _runtest(result: TestResult, ns: Namespace) -> None:
|
||||||
# Capture stdout and stderr, set faulthandler timeout,
|
# Capture stdout and stderr, set faulthandler timeout,
|
||||||
# and create JUnit XML report.
|
# and create JUnit XML report.
|
||||||
|
verbose = ns.verbose
|
||||||
output_on_failure = ns.verbose3
|
output_on_failure = ns.verbose3
|
||||||
|
timeout = ns.timeout
|
||||||
|
|
||||||
use_timeout = (
|
use_timeout = (
|
||||||
ns.timeout is not None and threading_helper.can_start_thread
|
timeout is not None and threading_helper.can_start_thread
|
||||||
)
|
)
|
||||||
if use_timeout:
|
if use_timeout:
|
||||||
faulthandler.dump_traceback_later(ns.timeout, exit=True)
|
faulthandler.dump_traceback_later(timeout, exit=True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
support.set_match_tests(ns.match_tests, ns.ignore_tests)
|
setup_support(ns)
|
||||||
support.junit_xml_list = xml_list = [] if ns.xmlpath else None
|
|
||||||
if ns.failfast:
|
|
||||||
support.failfast = True
|
|
||||||
|
|
||||||
if output_on_failure:
|
if output_on_failure:
|
||||||
support.verbose = True
|
support.verbose = True
|
||||||
|
@ -247,11 +344,10 @@ def _runtest_capture_output_timeout_junit(result: TestResult, ns: Namespace) ->
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
else:
|
else:
|
||||||
# Tell tests to be moderately quiet
|
# Tell tests to be moderately quiet
|
||||||
support.verbose = ns.verbose
|
support.verbose = verbose
|
||||||
|
_runtest_env_changed_exc(result, ns, display_failure=not verbose)
|
||||||
_runtest_env_changed_exc(result, ns,
|
|
||||||
display_failure=not ns.verbose)
|
|
||||||
|
|
||||||
|
xml_list = support.junit_xml_list
|
||||||
if xml_list:
|
if xml_list:
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
result.xml_data = [ET.tostring(x).decode('us-ascii')
|
result.xml_data = [ET.tostring(x).decode('us-ascii')
|
||||||
|
@ -276,7 +372,7 @@ def runtest(ns: Namespace, test_name: str) -> TestResult:
|
||||||
start_time = time.perf_counter()
|
start_time = time.perf_counter()
|
||||||
result = TestResult(test_name)
|
result = TestResult(test_name)
|
||||||
try:
|
try:
|
||||||
_runtest_capture_output_timeout_junit(result, ns)
|
_runtest(result, ns)
|
||||||
except:
|
except:
|
||||||
if not ns.pgo:
|
if not ns.pgo:
|
||||||
msg = traceback.format_exc()
|
msg = traceback.format_exc()
|
||||||
|
@ -287,9 +383,9 @@ def runtest(ns: Namespace, test_name: str) -> TestResult:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _test_module(the_module):
|
def run_unittest(test_mod):
|
||||||
loader = unittest.TestLoader()
|
loader = unittest.TestLoader()
|
||||||
tests = loader.loadTestsFromModule(the_module)
|
tests = loader.loadTestsFromModule(test_mod)
|
||||||
for error in loader.errors:
|
for error in loader.errors:
|
||||||
print(error, file=sys.stderr)
|
print(error, file=sys.stderr)
|
||||||
if loader.errors:
|
if loader.errors:
|
||||||
|
@ -304,7 +400,6 @@ def save_env(ns: Namespace, test_name: str):
|
||||||
def regrtest_runner(result, test_func, ns) -> None:
|
def regrtest_runner(result, test_func, ns) -> None:
|
||||||
# Run test_func(), collect statistics, and detect reference and memory
|
# Run test_func(), collect statistics, and detect reference and memory
|
||||||
# leaks.
|
# leaks.
|
||||||
|
|
||||||
if ns.huntrleaks:
|
if ns.huntrleaks:
|
||||||
from test.libregrtest.refleak import dash_R
|
from test.libregrtest.refleak import dash_R
|
||||||
refleak, test_result = dash_R(ns, result.test_name, test_func)
|
refleak, test_result = dash_R(ns, result.test_name, test_func)
|
||||||
|
@ -332,24 +427,27 @@ def regrtest_runner(result, test_func, ns) -> None:
|
||||||
result.stats = stats
|
result.stats = stats
|
||||||
|
|
||||||
|
|
||||||
|
# Storage of uncollectable objects
|
||||||
|
FOUND_GARBAGE = []
|
||||||
|
|
||||||
|
|
||||||
def _load_run_test(result: TestResult, ns: Namespace) -> None:
|
def _load_run_test(result: TestResult, ns: Namespace) -> None:
|
||||||
# Load the test function, run the test function.
|
# Load the test function, run the test function.
|
||||||
|
module_name = abs_module_name(result.test_name, ns.testdir)
|
||||||
|
|
||||||
abstest = get_abs_module(ns, result.test_name)
|
# Remove the module from sys.module to reload it if it was already imported
|
||||||
|
sys.modules.pop(module_name, None)
|
||||||
|
|
||||||
# remove the module from sys.module to reload it if it was already imported
|
test_mod = importlib.import_module(module_name)
|
||||||
try:
|
|
||||||
del sys.modules[abstest]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
the_module = importlib.import_module(abstest)
|
|
||||||
|
|
||||||
# If the test has a test_main, that will run the appropriate
|
# If the test has a test_main, that will run the appropriate
|
||||||
# tests. If not, use normal unittest test loading.
|
# tests. If not, use normal unittest test runner.
|
||||||
test_func = getattr(the_module, "test_main", None)
|
test_main = getattr(test_mod, "test_main", None)
|
||||||
if test_func is None:
|
if test_main is not None:
|
||||||
test_func = functools.partial(_test_module, the_module)
|
test_func = test_main
|
||||||
|
else:
|
||||||
|
def test_func():
|
||||||
|
return run_unittest(test_mod)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with save_env(ns, result.test_name):
|
with save_env(ns, result.test_name):
|
||||||
|
@ -361,12 +459,12 @@ def _load_run_test(result: TestResult, ns: Namespace) -> None:
|
||||||
# failures.
|
# failures.
|
||||||
support.gc_collect()
|
support.gc_collect()
|
||||||
|
|
||||||
cleanup_test_droppings(result.test_name, ns.verbose)
|
remove_testfn(result.test_name, ns.verbose)
|
||||||
|
|
||||||
if gc.garbage:
|
if gc.garbage:
|
||||||
support.environment_altered = True
|
support.environment_altered = True
|
||||||
print_warning(f"{result.test_name} created {len(gc.garbage)} "
|
print_warning(f"{result.test_name} created {len(gc.garbage)} "
|
||||||
f"uncollectable object(s).")
|
f"uncollectable object(s)")
|
||||||
|
|
||||||
# move the uncollectable objects somewhere,
|
# move the uncollectable objects somewhere,
|
||||||
# so we don't see them again
|
# so we don't see them again
|
||||||
|
@ -444,35 +542,37 @@ def _runtest_env_changed_exc(result: TestResult, ns: Namespace,
|
||||||
result.state = State.PASSED
|
result.state = State.PASSED
|
||||||
|
|
||||||
|
|
||||||
def cleanup_test_droppings(test_name: str, verbose: int) -> None:
|
def remove_testfn(test_name: str, verbose: int) -> None:
|
||||||
# Try to clean up junk commonly left behind. While tests shouldn't leave
|
# Try to clean up os_helper.TESTFN if left behind.
|
||||||
# any files or directories behind, when a test fails that can be tedious
|
#
|
||||||
# for it to arrange. The consequences can be especially nasty on Windows,
|
# While tests shouldn't leave any files or directories behind, when a test
|
||||||
# since if a test leaves a file open, it cannot be deleted by name (while
|
# fails that can be tedious for it to arrange. The consequences can be
|
||||||
# there's nothing we can do about that here either, we can display the
|
# especially nasty on Windows, since if a test leaves a file open, it
|
||||||
# name of the offending test, which is a real help).
|
# cannot be deleted by name (while there's nothing we can do about that
|
||||||
for name in (os_helper.TESTFN,):
|
# here either, we can display the name of the offending test, which is a
|
||||||
if not os.path.exists(name):
|
# real help).
|
||||||
continue
|
name = os_helper.TESTFN
|
||||||
|
if not os.path.exists(name):
|
||||||
|
return
|
||||||
|
|
||||||
if os.path.isdir(name):
|
if os.path.isdir(name):
|
||||||
import shutil
|
import shutil
|
||||||
kind, nuker = "directory", shutil.rmtree
|
kind, nuker = "directory", shutil.rmtree
|
||||||
elif os.path.isfile(name):
|
elif os.path.isfile(name):
|
||||||
kind, nuker = "file", os.unlink
|
kind, nuker = "file", os.unlink
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(f"os.path says {name!r} exists but is neither "
|
raise RuntimeError(f"os.path says {name!r} exists but is neither "
|
||||||
f"directory nor file")
|
f"directory nor file")
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print_warning(f"{test_name} left behind {kind} {name!r}")
|
print_warning(f"{test_name} left behind {kind} {name!r}")
|
||||||
support.environment_altered = True
|
support.environment_altered = True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import stat
|
import stat
|
||||||
# fix possible permissions problems that might prevent cleanup
|
# fix possible permissions problems that might prevent cleanup
|
||||||
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
|
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
|
||||||
nuker(name)
|
nuker(name)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
print_warning(f"{test_name} left behind {kind} {name!r} "
|
print_warning(f"{test_name} left behind {kind} {name!r} "
|
||||||
f"and it couldn't be removed: {exc}")
|
f"and it couldn't be removed: {exc}")
|
||||||
|
|
|
@ -19,8 +19,8 @@
|
||||||
from test.libregrtest.cmdline import Namespace
|
from test.libregrtest.cmdline import Namespace
|
||||||
from test.libregrtest.main import Regrtest
|
from test.libregrtest.main import Regrtest
|
||||||
from test.libregrtest.runtest import (
|
from test.libregrtest.runtest import (
|
||||||
runtest, TestResult, State,
|
runtest, TestResult, State, PROGRESS_MIN_TIME,
|
||||||
PROGRESS_MIN_TIME)
|
MatchTests, RunTests)
|
||||||
from test.libregrtest.setup import setup_tests
|
from test.libregrtest.setup import setup_tests
|
||||||
from test.libregrtest.utils import format_duration, print_warning
|
from test.libregrtest.utils import format_duration, print_warning
|
||||||
|
|
||||||
|
@ -44,26 +44,54 @@
|
||||||
USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
|
USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
|
||||||
|
|
||||||
|
|
||||||
def must_stop(result: TestResult, ns: Namespace) -> bool:
|
@dataclasses.dataclass(slots=True)
|
||||||
if result.state == State.INTERRUPTED:
|
class WorkerJob:
|
||||||
return True
|
test_name: str
|
||||||
if ns.failfast and result.is_failed(ns.fail_env_changed):
|
namespace: Namespace
|
||||||
return True
|
rerun: bool = False
|
||||||
return False
|
match_tests: MatchTests | None = None
|
||||||
|
|
||||||
|
|
||||||
def parse_worker_args(worker_args) -> tuple[Namespace, str]:
|
class _EncodeWorkerJob(json.JSONEncoder):
|
||||||
ns_dict, test_name = json.loads(worker_args)
|
def default(self, o: Any) -> dict[str, Any]:
|
||||||
ns = Namespace(**ns_dict)
|
match o:
|
||||||
return (ns, test_name)
|
case WorkerJob():
|
||||||
|
result = dataclasses.asdict(o)
|
||||||
|
result["__worker_job__"] = True
|
||||||
|
return result
|
||||||
|
case Namespace():
|
||||||
|
result = vars(o)
|
||||||
|
result["__namespace__"] = True
|
||||||
|
return result
|
||||||
|
case _:
|
||||||
|
return super().default(o)
|
||||||
|
|
||||||
|
|
||||||
def run_test_in_subprocess(testname: str, ns: Namespace, tmp_dir: str, stdout_fh: TextIO) -> subprocess.Popen:
|
def _decode_worker_job(d: dict[str, Any]) -> WorkerJob | dict[str, Any]:
|
||||||
ns_dict = vars(ns)
|
if "__worker_job__" in d:
|
||||||
worker_args = (ns_dict, testname)
|
d.pop('__worker_job__')
|
||||||
worker_args = json.dumps(worker_args)
|
return WorkerJob(**d)
|
||||||
if ns.python is not None:
|
if "__namespace__" in d:
|
||||||
executable = ns.python
|
d.pop('__namespace__')
|
||||||
|
return Namespace(**d)
|
||||||
|
else:
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_worker_args(worker_json: str) -> tuple[Namespace, str]:
|
||||||
|
return json.loads(worker_json,
|
||||||
|
object_hook=_decode_worker_job)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_in_subprocess(worker_job: WorkerJob,
|
||||||
|
output_file: TextIO,
|
||||||
|
tmp_dir: str | None = None) -> subprocess.Popen:
|
||||||
|
ns = worker_job.namespace
|
||||||
|
python = ns.python
|
||||||
|
worker_args = json.dumps(worker_job, cls=_EncodeWorkerJob)
|
||||||
|
|
||||||
|
if python is not None:
|
||||||
|
executable = python
|
||||||
else:
|
else:
|
||||||
executable = [sys.executable]
|
executable = [sys.executable]
|
||||||
cmd = [*executable, *support.args_from_interpreter_flags(),
|
cmd = [*executable, *support.args_from_interpreter_flags(),
|
||||||
|
@ -82,9 +110,9 @@ def run_test_in_subprocess(testname: str, ns: Namespace, tmp_dir: str, stdout_fh
|
||||||
# sysconfig.is_python_build() is true. See issue 15300.
|
# sysconfig.is_python_build() is true. See issue 15300.
|
||||||
kw = dict(
|
kw = dict(
|
||||||
env=env,
|
env=env,
|
||||||
stdout=stdout_fh,
|
stdout=output_file,
|
||||||
# bpo-45410: Write stderr into stdout to keep messages order
|
# bpo-45410: Write stderr into stdout to keep messages order
|
||||||
stderr=stdout_fh,
|
stderr=output_file,
|
||||||
text=True,
|
text=True,
|
||||||
close_fds=(os.name != 'nt'),
|
close_fds=(os.name != 'nt'),
|
||||||
cwd=os_helper.SAVEDCWD,
|
cwd=os_helper.SAVEDCWD,
|
||||||
|
@ -94,11 +122,27 @@ def run_test_in_subprocess(testname: str, ns: Namespace, tmp_dir: str, stdout_fh
|
||||||
return subprocess.Popen(cmd, **kw)
|
return subprocess.Popen(cmd, **kw)
|
||||||
|
|
||||||
|
|
||||||
def run_tests_worker(ns: Namespace, test_name: str) -> NoReturn:
|
def run_tests_worker(worker_json: str) -> NoReturn:
|
||||||
|
worker_job = _parse_worker_args(worker_json)
|
||||||
|
ns = worker_job.namespace
|
||||||
|
test_name = worker_job.test_name
|
||||||
|
rerun = worker_job.rerun
|
||||||
|
match_tests = worker_job.match_tests
|
||||||
|
|
||||||
setup_tests(ns)
|
setup_tests(ns)
|
||||||
|
|
||||||
result = runtest(ns, test_name)
|
if rerun:
|
||||||
|
if match_tests:
|
||||||
|
matching = "matching: " + ", ".join(match_tests)
|
||||||
|
print(f"Re-running {test_name} in verbose mode ({matching})", flush=True)
|
||||||
|
else:
|
||||||
|
print(f"Re-running {test_name} in verbose mode", flush=True)
|
||||||
|
ns.verbose = True
|
||||||
|
|
||||||
|
if match_tests is not None:
|
||||||
|
ns.match_tests = match_tests
|
||||||
|
|
||||||
|
result = runtest(ns, test_name)
|
||||||
print() # Force a newline (just in case)
|
print() # Force a newline (just in case)
|
||||||
|
|
||||||
# Serialize TestResult as dict in JSON
|
# Serialize TestResult as dict in JSON
|
||||||
|
@ -148,11 +192,13 @@ class TestWorkerProcess(threading.Thread):
|
||||||
def __init__(self, worker_id: int, runner: "MultiprocessTestRunner") -> None:
|
def __init__(self, worker_id: int, runner: "MultiprocessTestRunner") -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.worker_id = worker_id
|
self.worker_id = worker_id
|
||||||
|
self.runtests = runner.runtests
|
||||||
self.pending = runner.pending
|
self.pending = runner.pending
|
||||||
self.output = runner.output
|
self.output = runner.output
|
||||||
self.ns = runner.ns
|
self.ns = runner.ns
|
||||||
self.timeout = runner.worker_timeout
|
self.timeout = runner.worker_timeout
|
||||||
self.regrtest = runner.regrtest
|
self.regrtest = runner.regrtest
|
||||||
|
self.rerun = runner.rerun
|
||||||
self.current_test_name = None
|
self.current_test_name = None
|
||||||
self.start_time = None
|
self.start_time = None
|
||||||
self._popen = None
|
self._popen = None
|
||||||
|
@ -216,10 +262,11 @@ def mp_result_error(
|
||||||
) -> MultiprocessResult:
|
) -> MultiprocessResult:
|
||||||
return MultiprocessResult(test_result, stdout, err_msg)
|
return MultiprocessResult(test_result, stdout, err_msg)
|
||||||
|
|
||||||
def _run_process(self, test_name: str, tmp_dir: str, stdout_fh: TextIO) -> int:
|
def _run_process(self, worker_job, output_file: TextIO,
|
||||||
self.current_test_name = test_name
|
tmp_dir: str | None = None) -> int:
|
||||||
|
self.current_test_name = worker_job.test_name
|
||||||
try:
|
try:
|
||||||
popen = run_test_in_subprocess(test_name, self.ns, tmp_dir, stdout_fh)
|
popen = run_test_in_subprocess(worker_job, output_file, tmp_dir)
|
||||||
|
|
||||||
self._killed = False
|
self._killed = False
|
||||||
self._popen = popen
|
self._popen = popen
|
||||||
|
@ -277,9 +324,15 @@ def _runtest(self, test_name: str) -> MultiprocessResult:
|
||||||
else:
|
else:
|
||||||
encoding = sys.stdout.encoding
|
encoding = sys.stdout.encoding
|
||||||
|
|
||||||
|
match_tests = self.runtests.get_match_tests(test_name)
|
||||||
|
|
||||||
# gh-94026: Write stdout+stderr to a tempfile as workaround for
|
# gh-94026: Write stdout+stderr to a tempfile as workaround for
|
||||||
# non-blocking pipes on Emscripten with NodeJS.
|
# non-blocking pipes on Emscripten with NodeJS.
|
||||||
with tempfile.TemporaryFile('w+', encoding=encoding) as stdout_fh:
|
with tempfile.TemporaryFile('w+', encoding=encoding) as stdout_file:
|
||||||
|
worker_job = WorkerJob(test_name,
|
||||||
|
namespace=self.ns,
|
||||||
|
rerun=self.rerun,
|
||||||
|
match_tests=match_tests)
|
||||||
# gh-93353: Check for leaked temporary files in the parent process,
|
# gh-93353: Check for leaked temporary files in the parent process,
|
||||||
# since the deletion of temporary files can happen late during
|
# since the deletion of temporary files can happen late during
|
||||||
# Python finalization: too late for libregrtest.
|
# Python finalization: too late for libregrtest.
|
||||||
|
@ -290,17 +343,17 @@ def _runtest(self, test_name: str) -> MultiprocessResult:
|
||||||
tmp_dir = tempfile.mkdtemp(prefix="test_python_")
|
tmp_dir = tempfile.mkdtemp(prefix="test_python_")
|
||||||
tmp_dir = os.path.abspath(tmp_dir)
|
tmp_dir = os.path.abspath(tmp_dir)
|
||||||
try:
|
try:
|
||||||
retcode = self._run_process(test_name, tmp_dir, stdout_fh)
|
retcode = self._run_process(worker_job, stdout_file, tmp_dir)
|
||||||
finally:
|
finally:
|
||||||
tmp_files = os.listdir(tmp_dir)
|
tmp_files = os.listdir(tmp_dir)
|
||||||
os_helper.rmtree(tmp_dir)
|
os_helper.rmtree(tmp_dir)
|
||||||
else:
|
else:
|
||||||
retcode = self._run_process(test_name, None, stdout_fh)
|
retcode = self._run_process(worker_job, stdout_file)
|
||||||
tmp_files = ()
|
tmp_files = ()
|
||||||
stdout_fh.seek(0)
|
stdout_file.seek(0)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
stdout = stdout_fh.read().strip()
|
stdout = stdout_file.read().strip()
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
# gh-101634: Catch UnicodeDecodeError if stdout cannot be
|
# gh-101634: Catch UnicodeDecodeError if stdout cannot be
|
||||||
# decoded from encoding
|
# decoded from encoding
|
||||||
|
@ -342,6 +395,8 @@ def _runtest(self, test_name: str) -> MultiprocessResult:
|
||||||
return MultiprocessResult(result, stdout)
|
return MultiprocessResult(result, stdout)
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
|
fail_fast = self.ns.failfast
|
||||||
|
fail_env_changed = self.ns.fail_env_changed
|
||||||
while not self._stopped:
|
while not self._stopped:
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
|
@ -354,7 +409,7 @@ def run(self) -> None:
|
||||||
mp_result.result.duration = time.monotonic() - self.start_time
|
mp_result.result.duration = time.monotonic() - self.start_time
|
||||||
self.output.put((False, mp_result))
|
self.output.put((False, mp_result))
|
||||||
|
|
||||||
if must_stop(mp_result.result, self.ns):
|
if mp_result.result.must_stop(fail_fast, fail_env_changed):
|
||||||
break
|
break
|
||||||
except ExitThread:
|
except ExitThread:
|
||||||
break
|
break
|
||||||
|
@ -410,29 +465,36 @@ def get_running(workers: list[TestWorkerProcess]) -> list[TestWorkerProcess]:
|
||||||
|
|
||||||
|
|
||||||
class MultiprocessTestRunner:
|
class MultiprocessTestRunner:
|
||||||
def __init__(self, regrtest: Regrtest) -> None:
|
def __init__(self, regrtest: Regrtest, runtests: RunTests) -> None:
|
||||||
|
ns = regrtest.ns
|
||||||
|
timeout = ns.timeout
|
||||||
|
|
||||||
self.regrtest = regrtest
|
self.regrtest = regrtest
|
||||||
|
self.runtests = runtests
|
||||||
|
self.rerun = runtests.rerun
|
||||||
self.log = self.regrtest.log
|
self.log = self.regrtest.log
|
||||||
self.ns = regrtest.ns
|
self.ns = ns
|
||||||
self.output: queue.Queue[QueueOutput] = queue.Queue()
|
self.output: queue.Queue[QueueOutput] = queue.Queue()
|
||||||
self.pending = MultiprocessIterator(self.regrtest.tests)
|
tests_iter = runtests.iter_tests()
|
||||||
if self.ns.timeout is not None:
|
self.pending = MultiprocessIterator(tests_iter)
|
||||||
|
if timeout is not None:
|
||||||
# Rely on faulthandler to kill a worker process. This timouet is
|
# Rely on faulthandler to kill a worker process. This timouet is
|
||||||
# when faulthandler fails to kill a worker process. Give a maximum
|
# when faulthandler fails to kill a worker process. Give a maximum
|
||||||
# of 5 minutes to faulthandler to kill the worker.
|
# of 5 minutes to faulthandler to kill the worker.
|
||||||
self.worker_timeout = min(self.ns.timeout * 1.5,
|
self.worker_timeout = min(timeout * 1.5, timeout + 5 * 60)
|
||||||
self.ns.timeout + 5 * 60)
|
|
||||||
else:
|
else:
|
||||||
self.worker_timeout = None
|
self.worker_timeout = None
|
||||||
self.workers = None
|
self.workers = None
|
||||||
|
|
||||||
def start_workers(self) -> None:
|
def start_workers(self) -> None:
|
||||||
|
use_mp = self.ns.use_mp
|
||||||
|
timeout = self.ns.timeout
|
||||||
self.workers = [TestWorkerProcess(index, self)
|
self.workers = [TestWorkerProcess(index, self)
|
||||||
for index in range(1, self.ns.use_mp + 1)]
|
for index in range(1, use_mp + 1)]
|
||||||
msg = f"Run tests in parallel using {len(self.workers)} child processes"
|
msg = f"Run tests in parallel using {len(self.workers)} child processes"
|
||||||
if self.ns.timeout:
|
if timeout:
|
||||||
msg += (" (timeout: %s, worker timeout: %s)"
|
msg += (" (timeout: %s, worker timeout: %s)"
|
||||||
% (format_duration(self.ns.timeout),
|
% (format_duration(timeout),
|
||||||
format_duration(self.worker_timeout)))
|
format_duration(self.worker_timeout)))
|
||||||
self.log(msg)
|
self.log(msg)
|
||||||
for worker in self.workers:
|
for worker in self.workers:
|
||||||
|
@ -446,6 +508,7 @@ def stop_workers(self) -> None:
|
||||||
worker.wait_stopped(start_time)
|
worker.wait_stopped(start_time)
|
||||||
|
|
||||||
def _get_result(self) -> QueueOutput | None:
|
def _get_result(self) -> QueueOutput | None:
|
||||||
|
pgo = self.ns.pgo
|
||||||
use_faulthandler = (self.ns.timeout is not None)
|
use_faulthandler = (self.ns.timeout is not None)
|
||||||
timeout = PROGRESS_UPDATE
|
timeout = PROGRESS_UPDATE
|
||||||
|
|
||||||
|
@ -464,7 +527,7 @@ def _get_result(self) -> QueueOutput | None:
|
||||||
|
|
||||||
# display progress
|
# display progress
|
||||||
running = get_running(self.workers)
|
running = get_running(self.workers)
|
||||||
if running and not self.ns.pgo:
|
if running and not pgo:
|
||||||
self.log('running: %s' % ', '.join(running))
|
self.log('running: %s' % ', '.join(running))
|
||||||
|
|
||||||
# all worker threads are done: consume pending results
|
# all worker threads are done: consume pending results
|
||||||
|
@ -475,42 +538,46 @@ def _get_result(self) -> QueueOutput | None:
|
||||||
|
|
||||||
def display_result(self, mp_result: MultiprocessResult) -> None:
|
def display_result(self, mp_result: MultiprocessResult) -> None:
|
||||||
result = mp_result.result
|
result = mp_result.result
|
||||||
|
pgo = self.ns.pgo
|
||||||
|
|
||||||
text = str(result)
|
text = str(result)
|
||||||
if mp_result.err_msg:
|
if mp_result.err_msg:
|
||||||
# MULTIPROCESSING_ERROR
|
# MULTIPROCESSING_ERROR
|
||||||
text += ' (%s)' % mp_result.err_msg
|
text += ' (%s)' % mp_result.err_msg
|
||||||
elif (result.duration >= PROGRESS_MIN_TIME and not self.ns.pgo):
|
elif (result.duration >= PROGRESS_MIN_TIME and not pgo):
|
||||||
text += ' (%s)' % format_duration(result.duration)
|
text += ' (%s)' % format_duration(result.duration)
|
||||||
running = get_running(self.workers)
|
running = get_running(self.workers)
|
||||||
if running and not self.ns.pgo:
|
if running and not pgo:
|
||||||
text += ' -- running: %s' % ', '.join(running)
|
text += ' -- running: %s' % ', '.join(running)
|
||||||
self.regrtest.display_progress(self.test_index, text)
|
self.regrtest.display_progress(self.test_index, text)
|
||||||
|
|
||||||
def _process_result(self, item: QueueOutput) -> bool:
|
def _process_result(self, item: QueueOutput) -> bool:
|
||||||
"""Returns True if test runner must stop."""
|
"""Returns True if test runner must stop."""
|
||||||
|
rerun = self.runtests.rerun
|
||||||
if item[0]:
|
if item[0]:
|
||||||
# Thread got an exception
|
# Thread got an exception
|
||||||
format_exc = item[1]
|
format_exc = item[1]
|
||||||
print_warning(f"regrtest worker thread failed: {format_exc}")
|
print_warning(f"regrtest worker thread failed: {format_exc}")
|
||||||
result = TestResult("<regrtest worker>", state=State.MULTIPROCESSING_ERROR)
|
result = TestResult("<regrtest worker>", state=State.MULTIPROCESSING_ERROR)
|
||||||
self.regrtest.accumulate_result(result)
|
self.regrtest.accumulate_result(result, rerun=rerun)
|
||||||
return True
|
return result
|
||||||
|
|
||||||
self.test_index += 1
|
self.test_index += 1
|
||||||
mp_result = item[1]
|
mp_result = item[1]
|
||||||
self.regrtest.accumulate_result(mp_result.result)
|
result = mp_result.result
|
||||||
|
self.regrtest.accumulate_result(result, rerun=rerun)
|
||||||
self.display_result(mp_result)
|
self.display_result(mp_result)
|
||||||
|
|
||||||
if mp_result.worker_stdout:
|
if mp_result.worker_stdout:
|
||||||
print(mp_result.worker_stdout, flush=True)
|
print(mp_result.worker_stdout, flush=True)
|
||||||
|
|
||||||
if must_stop(mp_result.result, self.ns):
|
return result
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def run_tests(self) -> None:
|
def run_tests(self) -> None:
|
||||||
|
fail_fast = self.ns.failfast
|
||||||
|
fail_env_changed = self.ns.fail_env_changed
|
||||||
|
timeout = self.ns.timeout
|
||||||
|
|
||||||
self.start_workers()
|
self.start_workers()
|
||||||
|
|
||||||
self.test_index = 0
|
self.test_index = 0
|
||||||
|
@ -520,14 +587,14 @@ def run_tests(self) -> None:
|
||||||
if item is None:
|
if item is None:
|
||||||
break
|
break
|
||||||
|
|
||||||
stop = self._process_result(item)
|
result = self._process_result(item)
|
||||||
if stop:
|
if result.must_stop(fail_fast, fail_env_changed):
|
||||||
break
|
break
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print()
|
print()
|
||||||
self.regrtest.interrupted = True
|
self.regrtest.interrupted = True
|
||||||
finally:
|
finally:
|
||||||
if self.ns.timeout is not None:
|
if timeout is not None:
|
||||||
faulthandler.cancel_dump_traceback_later()
|
faulthandler.cancel_dump_traceback_later()
|
||||||
|
|
||||||
# Always ensure that all worker processes are no longer
|
# Always ensure that all worker processes are no longer
|
||||||
|
@ -536,8 +603,8 @@ def run_tests(self) -> None:
|
||||||
self.stop_workers()
|
self.stop_workers()
|
||||||
|
|
||||||
|
|
||||||
def run_tests_multiprocess(regrtest: Regrtest) -> None:
|
def run_tests_multiprocess(regrtest: Regrtest, runtests: RunTests) -> None:
|
||||||
MultiprocessTestRunner(regrtest).run_tests()
|
MultiprocessTestRunner(regrtest, runtests).run_tests()
|
||||||
|
|
||||||
|
|
||||||
class EncodeTestResult(json.JSONEncoder):
|
class EncodeTestResult(json.JSONEncoder):
|
||||||
|
@ -552,7 +619,7 @@ def default(self, o: Any) -> dict[str, Any]:
|
||||||
return super().default(o)
|
return super().default(o)
|
||||||
|
|
||||||
|
|
||||||
def decode_test_result(d: dict[str, Any]) -> TestResult | TestStats | dict[str, Any]:
|
def decode_test_result(d: dict[str, Any]) -> TestResult | dict[str, Any]:
|
||||||
"""Decode a TestResult (sub)class object from a JSON dict."""
|
"""Decode a TestResult (sub)class object from a JSON dict."""
|
||||||
|
|
||||||
if "__test_result__" not in d:
|
if "__test_result__" not in d:
|
||||||
|
|
|
@ -31,7 +31,7 @@ def format_duration(seconds):
|
||||||
return ' '.join(parts)
|
return ' '.join(parts)
|
||||||
|
|
||||||
|
|
||||||
def removepy(names):
|
def strip_py_suffix(names: list[str]):
|
||||||
if not names:
|
if not names:
|
||||||
return
|
return
|
||||||
for idx, name in enumerate(names):
|
for idx, name in enumerate(names):
|
||||||
|
|
|
@ -1189,7 +1189,6 @@ def _is_full_match_test(pattern):
|
||||||
def set_match_tests(accept_patterns=None, ignore_patterns=None):
|
def set_match_tests(accept_patterns=None, ignore_patterns=None):
|
||||||
global _match_test_func, _accept_test_patterns, _ignore_test_patterns
|
global _match_test_func, _accept_test_patterns, _ignore_test_patterns
|
||||||
|
|
||||||
|
|
||||||
if accept_patterns is None:
|
if accept_patterns is None:
|
||||||
accept_patterns = ()
|
accept_patterns = ()
|
||||||
if ignore_patterns is None:
|
if ignore_patterns is None:
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import unittest
|
import unittest
|
||||||
|
from test import support
|
||||||
|
|
||||||
class RegressionTestResult(unittest.TextTestResult):
|
class RegressionTestResult(unittest.TextTestResult):
|
||||||
USE_XML = False
|
USE_XML = False
|
||||||
|
@ -112,6 +113,8 @@ def addExpectedFailure(self, test, err):
|
||||||
def addFailure(self, test, err):
|
def addFailure(self, test, err):
|
||||||
self._add_result(test, True, failure=self.__makeErrorDict(*err))
|
self._add_result(test, True, failure=self.__makeErrorDict(*err))
|
||||||
super().addFailure(test, err)
|
super().addFailure(test, err)
|
||||||
|
if support.failfast:
|
||||||
|
self.stop()
|
||||||
|
|
||||||
def addSkip(self, test, reason):
|
def addSkip(self, test, reason):
|
||||||
self._add_result(test, skipped=reason)
|
self._add_result(test, skipped=reason)
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import dataclasses
|
||||||
import glob
|
import glob
|
||||||
import io
|
import io
|
||||||
import locale
|
import locale
|
||||||
|
@ -21,6 +22,7 @@
|
||||||
from test import support
|
from test import support
|
||||||
from test.support import os_helper, TestStats
|
from test.support import os_helper, TestStats
|
||||||
from test.libregrtest import utils, setup
|
from test.libregrtest import utils, setup
|
||||||
|
from test.libregrtest.runtest import normalize_test_name
|
||||||
|
|
||||||
if not support.has_subprocess_support:
|
if not support.has_subprocess_support:
|
||||||
raise unittest.SkipTest("test module requires subprocess")
|
raise unittest.SkipTest("test module requires subprocess")
|
||||||
|
@ -96,11 +98,11 @@ def test_verbose(self):
|
||||||
ns = libregrtest._parse_args([])
|
ns = libregrtest._parse_args([])
|
||||||
self.assertEqual(ns.verbose, 0)
|
self.assertEqual(ns.verbose, 0)
|
||||||
|
|
||||||
def test_verbose2(self):
|
def test_rerun(self):
|
||||||
for opt in '-w', '--verbose2':
|
for opt in '-w', '--rerun', '--verbose2':
|
||||||
with self.subTest(opt=opt):
|
with self.subTest(opt=opt):
|
||||||
ns = libregrtest._parse_args([opt])
|
ns = libregrtest._parse_args([opt])
|
||||||
self.assertTrue(ns.verbose2)
|
self.assertTrue(ns.rerun)
|
||||||
|
|
||||||
def test_verbose3(self):
|
def test_verbose3(self):
|
||||||
for opt in '-W', '--verbose3':
|
for opt in '-W', '--verbose3':
|
||||||
|
@ -362,6 +364,13 @@ def test_unknown_option(self):
|
||||||
'unrecognized arguments: --unknown-option')
|
'unrecognized arguments: --unknown-option')
|
||||||
|
|
||||||
|
|
||||||
|
@dataclasses.dataclass(slots=True)
|
||||||
|
class Rerun:
|
||||||
|
name: str
|
||||||
|
match: str | None
|
||||||
|
success: bool
|
||||||
|
|
||||||
|
|
||||||
class BaseTestCase(unittest.TestCase):
|
class BaseTestCase(unittest.TestCase):
|
||||||
TEST_UNIQUE_ID = 1
|
TEST_UNIQUE_ID = 1
|
||||||
TESTNAME_PREFIX = 'test_regrtest_'
|
TESTNAME_PREFIX = 'test_regrtest_'
|
||||||
|
@ -423,11 +432,11 @@ def parse_executed_tests(self, output):
|
||||||
|
|
||||||
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||||
env_changed=(), omitted=(),
|
env_changed=(), omitted=(),
|
||||||
rerun={}, run_no_tests=(),
|
rerun=None, run_no_tests=(),
|
||||||
resource_denied=(),
|
resource_denied=(),
|
||||||
randomize=False, interrupted=False,
|
randomize=False, interrupted=False,
|
||||||
fail_env_changed=False,
|
fail_env_changed=False,
|
||||||
*, stats):
|
*, stats, forever=False, filtered=False):
|
||||||
if isinstance(tests, str):
|
if isinstance(tests, str):
|
||||||
tests = [tests]
|
tests = [tests]
|
||||||
if isinstance(skipped, str):
|
if isinstance(skipped, str):
|
||||||
|
@ -445,11 +454,20 @@ def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||||
if isinstance(stats, int):
|
if isinstance(stats, int):
|
||||||
stats = TestStats(stats)
|
stats = TestStats(stats)
|
||||||
|
|
||||||
|
rerun_failed = []
|
||||||
|
if rerun is not None:
|
||||||
|
failed = [rerun.name]
|
||||||
|
if not rerun.success:
|
||||||
|
rerun_failed.append(rerun.name)
|
||||||
|
|
||||||
executed = self.parse_executed_tests(output)
|
executed = self.parse_executed_tests(output)
|
||||||
|
total_tests = list(tests)
|
||||||
|
if rerun is not None:
|
||||||
|
total_tests.append(rerun.name)
|
||||||
if randomize:
|
if randomize:
|
||||||
self.assertEqual(set(executed), set(tests), output)
|
self.assertEqual(set(executed), set(total_tests), output)
|
||||||
else:
|
else:
|
||||||
self.assertEqual(executed, tests, output)
|
self.assertEqual(executed, total_tests, output)
|
||||||
|
|
||||||
def plural(count):
|
def plural(count):
|
||||||
return 's' if count != 1 else ''
|
return 's' if count != 1 else ''
|
||||||
|
@ -465,6 +483,10 @@ def list_regex(line_format, tests):
|
||||||
regex = list_regex('%s test%s skipped', skipped)
|
regex = list_regex('%s test%s skipped', skipped)
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
||||||
|
if resource_denied:
|
||||||
|
regex = list_regex(r'%s test%s skipped \(resource denied\)', resource_denied)
|
||||||
|
self.check_line(output, regex)
|
||||||
|
|
||||||
if failed:
|
if failed:
|
||||||
regex = list_regex('%s test%s failed', failed)
|
regex = list_regex('%s test%s failed', failed)
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
@ -478,32 +500,36 @@ def list_regex(line_format, tests):
|
||||||
regex = list_regex('%s test%s omitted', omitted)
|
regex = list_regex('%s test%s omitted', omitted)
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
||||||
if rerun:
|
if rerun is not None:
|
||||||
regex = list_regex('%s re-run test%s', rerun.keys())
|
regex = list_regex('%s re-run test%s', [rerun.name])
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
|
regex = LOG_PREFIX + fr"Re-running 1 failed tests in verbose mode"
|
||||||
|
self.check_line(output, regex)
|
||||||
|
regex = fr"Re-running {rerun.name} in verbose mode"
|
||||||
|
if rerun.match:
|
||||||
|
regex = fr"{regex} \(matching: {rerun.match}\)"
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
for name, match in rerun.items():
|
|
||||||
regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
|
|
||||||
self.check_line(output, regex)
|
|
||||||
|
|
||||||
if run_no_tests:
|
if run_no_tests:
|
||||||
regex = list_regex('%s test%s run no tests', run_no_tests)
|
regex = list_regex('%s test%s run no tests', run_no_tests)
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
||||||
good = (len(tests) - len(skipped) - len(failed)
|
good = (len(tests) - len(skipped) - len(resource_denied) - len(failed)
|
||||||
- len(omitted) - len(env_changed) - len(run_no_tests))
|
- len(omitted) - len(env_changed) - len(run_no_tests))
|
||||||
if good:
|
if good:
|
||||||
regex = r'%s test%s OK\.$' % (good, plural(good))
|
regex = r'%s test%s OK\.' % (good, plural(good))
|
||||||
if not skipped and not failed and good > 1:
|
if not skipped and not failed and (rerun is None or rerun.success) and good > 1:
|
||||||
regex = 'All %s' % regex
|
regex = 'All %s' % regex
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex, full=True)
|
||||||
|
|
||||||
if interrupted:
|
if interrupted:
|
||||||
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
|
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
|
||||||
|
|
||||||
# Total tests
|
# Total tests
|
||||||
parts = [f'run={stats.tests_run:,}']
|
text = f'run={stats.tests_run:,}'
|
||||||
|
if filtered:
|
||||||
|
text = fr'{text} \(filtered\)'
|
||||||
|
parts = [text]
|
||||||
if stats.failures:
|
if stats.failures:
|
||||||
parts.append(f'failures={stats.failures:,}')
|
parts.append(f'failures={stats.failures:,}')
|
||||||
if stats.skipped:
|
if stats.skipped:
|
||||||
|
@ -512,39 +538,52 @@ def list_regex(line_format, tests):
|
||||||
self.check_line(output, line, full=True)
|
self.check_line(output, line, full=True)
|
||||||
|
|
||||||
# Total test files
|
# Total test files
|
||||||
report = [f'success={good}']
|
run = len(total_tests) - len(resource_denied)
|
||||||
if failed:
|
if rerun is not None:
|
||||||
report.append(f'failed={len(failed)}')
|
total_failed = len(rerun_failed)
|
||||||
if env_changed:
|
total_rerun = 1
|
||||||
report.append(f'env_changed={len(env_changed)}')
|
else:
|
||||||
if skipped:
|
total_failed = len(failed)
|
||||||
report.append(f'skipped={len(skipped)}')
|
total_rerun = 0
|
||||||
if resource_denied:
|
if interrupted:
|
||||||
report.append(f'resource_denied={len(resource_denied)}')
|
run = 0
|
||||||
if rerun:
|
text = f'run={run}'
|
||||||
report.append(f'rerun={len(rerun)}')
|
if not forever:
|
||||||
if run_no_tests:
|
text = f'{text}/{len(tests)}'
|
||||||
report.append(f'run_no_tests={len(run_no_tests)}')
|
if filtered:
|
||||||
|
text = fr'{text} \(filtered\)'
|
||||||
|
report = [text]
|
||||||
|
for name, ntest in (
|
||||||
|
('failed', total_failed),
|
||||||
|
('env_changed', len(env_changed)),
|
||||||
|
('skipped', len(skipped)),
|
||||||
|
('resource_denied', len(resource_denied)),
|
||||||
|
('rerun', total_rerun),
|
||||||
|
('run_no_tests', len(run_no_tests)),
|
||||||
|
):
|
||||||
|
if ntest:
|
||||||
|
report.append(f'{name}={ntest}')
|
||||||
line = fr'Total test files: {" ".join(report)}'
|
line = fr'Total test files: {" ".join(report)}'
|
||||||
self.check_line(output, line, full=True)
|
self.check_line(output, line, full=True)
|
||||||
|
|
||||||
# Result
|
# Result
|
||||||
result = []
|
state = []
|
||||||
if failed:
|
if failed:
|
||||||
result.append('FAILURE')
|
state.append('FAILURE')
|
||||||
elif fail_env_changed and env_changed:
|
elif fail_env_changed and env_changed:
|
||||||
result.append('ENV CHANGED')
|
state.append('ENV CHANGED')
|
||||||
if interrupted:
|
if interrupted:
|
||||||
result.append('INTERRUPTED')
|
state.append('INTERRUPTED')
|
||||||
if not any((good, result, failed, interrupted, skipped,
|
if not any((good, failed, interrupted, skipped,
|
||||||
env_changed, fail_env_changed)):
|
env_changed, fail_env_changed)):
|
||||||
result.append("NO TESTS RAN")
|
state.append("NO TESTS RAN")
|
||||||
elif not result:
|
elif not state:
|
||||||
result.append('SUCCESS')
|
state.append('SUCCESS')
|
||||||
result = ', '.join(result)
|
state = ', '.join(state)
|
||||||
if rerun:
|
if rerun is not None:
|
||||||
result = 'FAILURE then %s' % result
|
new_state = 'SUCCESS' if rerun.success else 'FAILURE'
|
||||||
self.check_line(output, f'Result: {result}', full=True)
|
state = 'FAILURE then ' + new_state
|
||||||
|
self.check_line(output, f'Result: {state}', full=True)
|
||||||
|
|
||||||
def parse_random_seed(self, output):
|
def parse_random_seed(self, output):
|
||||||
match = self.regex_search(r'Using random seed ([0-9]+)', output)
|
match = self.regex_search(r'Using random seed ([0-9]+)', output)
|
||||||
|
@ -563,13 +602,13 @@ def run_command(self, args, input=None, exitcode=0, **kw):
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
**kw)
|
**kw)
|
||||||
if proc.returncode != exitcode:
|
if proc.returncode != exitcode:
|
||||||
msg = ("Command %s failed with exit code %s\n"
|
msg = ("Command %s failed with exit code %s, but exit code %s expected!\n"
|
||||||
"\n"
|
"\n"
|
||||||
"stdout:\n"
|
"stdout:\n"
|
||||||
"---\n"
|
"---\n"
|
||||||
"%s\n"
|
"%s\n"
|
||||||
"---\n"
|
"---\n"
|
||||||
% (str(args), proc.returncode, proc.stdout))
|
% (str(args), proc.returncode, exitcode, proc.stdout))
|
||||||
if proc.stderr:
|
if proc.stderr:
|
||||||
msg += ("\n"
|
msg += ("\n"
|
||||||
"stderr:\n"
|
"stderr:\n"
|
||||||
|
@ -738,6 +777,40 @@ def run_tests(self, *testargs, **kw):
|
||||||
cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
|
cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
|
||||||
return self.run_python(cmdargs, **kw)
|
return self.run_python(cmdargs, **kw)
|
||||||
|
|
||||||
|
def test_success(self):
|
||||||
|
code = textwrap.dedent("""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
class PassingTests(unittest.TestCase):
|
||||||
|
def test_test1(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_test2(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def test_test3(self):
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
tests = [self.create_test(f'ok{i}', code=code) for i in range(1, 6)]
|
||||||
|
|
||||||
|
output = self.run_tests(*tests)
|
||||||
|
self.check_executed_tests(output, tests,
|
||||||
|
stats=3 * len(tests))
|
||||||
|
|
||||||
|
def test_skip(self):
|
||||||
|
code = textwrap.dedent("""
|
||||||
|
import unittest
|
||||||
|
raise unittest.SkipTest("nope")
|
||||||
|
""")
|
||||||
|
test_ok = self.create_test('ok')
|
||||||
|
test_skip = self.create_test('skip', code=code)
|
||||||
|
tests = [test_ok, test_skip]
|
||||||
|
|
||||||
|
output = self.run_tests(*tests)
|
||||||
|
self.check_executed_tests(output, tests,
|
||||||
|
skipped=[test_skip],
|
||||||
|
stats=1)
|
||||||
|
|
||||||
def test_failing_test(self):
|
def test_failing_test(self):
|
||||||
# test a failing test
|
# test a failing test
|
||||||
code = textwrap.dedent("""
|
code = textwrap.dedent("""
|
||||||
|
@ -777,14 +850,12 @@ def test_pass(self):
|
||||||
# -u audio: 1 resource enabled
|
# -u audio: 1 resource enabled
|
||||||
output = self.run_tests('-uaudio', *test_names)
|
output = self.run_tests('-uaudio', *test_names)
|
||||||
self.check_executed_tests(output, test_names,
|
self.check_executed_tests(output, test_names,
|
||||||
skipped=tests['network'],
|
|
||||||
resource_denied=tests['network'],
|
resource_denied=tests['network'],
|
||||||
stats=1)
|
stats=1)
|
||||||
|
|
||||||
# no option: 0 resources enabled
|
# no option: 0 resources enabled
|
||||||
output = self.run_tests(*test_names)
|
output = self.run_tests(*test_names, exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
self.check_executed_tests(output, test_names,
|
self.check_executed_tests(output, test_names,
|
||||||
skipped=test_names,
|
|
||||||
resource_denied=test_names,
|
resource_denied=test_names,
|
||||||
stats=0)
|
stats=0)
|
||||||
|
|
||||||
|
@ -930,9 +1001,21 @@ def test_run(self):
|
||||||
builtins.__dict__['RUN'] = 1
|
builtins.__dict__['RUN'] = 1
|
||||||
""")
|
""")
|
||||||
test = self.create_test('forever', code=code)
|
test = self.create_test('forever', code=code)
|
||||||
|
|
||||||
|
# --forever
|
||||||
output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, [test]*3, failed=test,
|
self.check_executed_tests(output, [test]*3, failed=test,
|
||||||
stats=TestStats(1, 1))
|
stats=TestStats(3, 1),
|
||||||
|
forever=True)
|
||||||
|
|
||||||
|
# --forever --rerun
|
||||||
|
output = self.run_tests('--forever', '--rerun', test, exitcode=0)
|
||||||
|
self.check_executed_tests(output, [test]*3,
|
||||||
|
rerun=Rerun(test,
|
||||||
|
match='test_run',
|
||||||
|
success=True),
|
||||||
|
stats=TestStats(4, 1),
|
||||||
|
forever=True)
|
||||||
|
|
||||||
def check_leak(self, code, what):
|
def check_leak(self, code, what):
|
||||||
test = self.create_test('huntrleaks', code=code)
|
test = self.create_test('huntrleaks', code=code)
|
||||||
|
@ -1143,33 +1226,55 @@ def test_fail_always(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
failed=testname,
|
rerun=Rerun(testname,
|
||||||
rerun={testname: "test_fail_always"},
|
"test_fail_always",
|
||||||
stats=TestStats(1, 1))
|
success=False),
|
||||||
|
stats=TestStats(3, 2))
|
||||||
|
|
||||||
def test_rerun_success(self):
|
def test_rerun_success(self):
|
||||||
# FAILURE then SUCCESS
|
# FAILURE then SUCCESS
|
||||||
code = textwrap.dedent("""
|
marker_filename = os.path.abspath("regrtest_marker_filename")
|
||||||
import builtins
|
self.addCleanup(os_helper.unlink, marker_filename)
|
||||||
|
self.assertFalse(os.path.exists(marker_filename))
|
||||||
|
|
||||||
|
code = textwrap.dedent(f"""
|
||||||
|
import os.path
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
|
marker_filename = {marker_filename!r}
|
||||||
|
|
||||||
class Tests(unittest.TestCase):
|
class Tests(unittest.TestCase):
|
||||||
def test_succeed(self):
|
def test_succeed(self):
|
||||||
return
|
return
|
||||||
|
|
||||||
def test_fail_once(self):
|
def test_fail_once(self):
|
||||||
if not hasattr(builtins, '_test_failed'):
|
if not os.path.exists(marker_filename):
|
||||||
builtins._test_failed = True
|
open(marker_filename, "w").close()
|
||||||
self.fail("bug")
|
self.fail("bug")
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=0)
|
# FAILURE then SUCCESS => exit code 0
|
||||||
|
output = self.run_tests("--rerun", testname, exitcode=0)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
rerun={testname: "test_fail_once"},
|
rerun=Rerun(testname,
|
||||||
stats=1)
|
match="test_fail_once",
|
||||||
|
success=True),
|
||||||
|
stats=TestStats(3, 1))
|
||||||
|
os_helper.unlink(marker_filename)
|
||||||
|
|
||||||
|
# with --fail-rerun, exit code EXITCODE_BAD_TEST
|
||||||
|
# on "FAILURE then SUCCESS" state.
|
||||||
|
output = self.run_tests("--rerun", "--fail-rerun", testname,
|
||||||
|
exitcode=EXITCODE_BAD_TEST)
|
||||||
|
self.check_executed_tests(output, [testname],
|
||||||
|
rerun=Rerun(testname,
|
||||||
|
match="test_fail_once",
|
||||||
|
success=True),
|
||||||
|
stats=TestStats(3, 1))
|
||||||
|
os_helper.unlink(marker_filename)
|
||||||
|
|
||||||
def test_rerun_setup_class_hook_failure(self):
|
def test_rerun_setup_class_hook_failure(self):
|
||||||
# FAILURE then FAILURE
|
# FAILURE then FAILURE
|
||||||
|
@ -1186,10 +1291,12 @@ def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: "ExampleTests"},
|
rerun=Rerun(testname,
|
||||||
|
match="ExampleTests",
|
||||||
|
success=False),
|
||||||
stats=0)
|
stats=0)
|
||||||
|
|
||||||
def test_rerun_teardown_class_hook_failure(self):
|
def test_rerun_teardown_class_hook_failure(self):
|
||||||
|
@ -1207,11 +1314,13 @@ def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: "ExampleTests"},
|
rerun=Rerun(testname,
|
||||||
stats=1)
|
match="ExampleTests",
|
||||||
|
success=False),
|
||||||
|
stats=2)
|
||||||
|
|
||||||
def test_rerun_setup_module_hook_failure(self):
|
def test_rerun_setup_module_hook_failure(self):
|
||||||
# FAILURE then FAILURE
|
# FAILURE then FAILURE
|
||||||
|
@ -1227,10 +1336,12 @@ def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: testname},
|
rerun=Rerun(testname,
|
||||||
|
match=None,
|
||||||
|
success=False),
|
||||||
stats=0)
|
stats=0)
|
||||||
|
|
||||||
def test_rerun_teardown_module_hook_failure(self):
|
def test_rerun_teardown_module_hook_failure(self):
|
||||||
|
@ -1247,11 +1358,13 @@ def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, [testname],
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: testname},
|
rerun=Rerun(testname,
|
||||||
stats=1)
|
match=None,
|
||||||
|
success=False),
|
||||||
|
stats=2)
|
||||||
|
|
||||||
def test_rerun_setup_hook_failure(self):
|
def test_rerun_setup_hook_failure(self):
|
||||||
# FAILURE then FAILURE
|
# FAILURE then FAILURE
|
||||||
|
@ -1267,11 +1380,13 @@ def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: "test_success"},
|
rerun=Rerun(testname,
|
||||||
stats=1)
|
match="test_success",
|
||||||
|
success=False),
|
||||||
|
stats=2)
|
||||||
|
|
||||||
def test_rerun_teardown_hook_failure(self):
|
def test_rerun_teardown_hook_failure(self):
|
||||||
# FAILURE then FAILURE
|
# FAILURE then FAILURE
|
||||||
|
@ -1287,11 +1402,13 @@ def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: "test_success"},
|
rerun=Rerun(testname,
|
||||||
stats=1)
|
match="test_success",
|
||||||
|
success=False),
|
||||||
|
stats=2)
|
||||||
|
|
||||||
def test_rerun_async_setup_hook_failure(self):
|
def test_rerun_async_setup_hook_failure(self):
|
||||||
# FAILURE then FAILURE
|
# FAILURE then FAILURE
|
||||||
|
@ -1307,11 +1424,12 @@ async def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
rerun=Rerun(testname,
|
||||||
rerun={testname: "test_success"},
|
match="test_success",
|
||||||
stats=1)
|
success=False),
|
||||||
|
stats=2)
|
||||||
|
|
||||||
def test_rerun_async_teardown_hook_failure(self):
|
def test_rerun_async_teardown_hook_failure(self):
|
||||||
# FAILURE then FAILURE
|
# FAILURE then FAILURE
|
||||||
|
@ -1327,11 +1445,13 @@ async def test_success(self):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, testname,
|
self.check_executed_tests(output, testname,
|
||||||
failed=[testname],
|
failed=[testname],
|
||||||
rerun={testname: "test_success"},
|
rerun=Rerun(testname,
|
||||||
stats=1)
|
match="test_success",
|
||||||
|
success=False),
|
||||||
|
stats=2)
|
||||||
|
|
||||||
def test_no_tests_ran(self):
|
def test_no_tests_ran(self):
|
||||||
code = textwrap.dedent("""
|
code = textwrap.dedent("""
|
||||||
|
@ -1347,7 +1467,7 @@ def test_bug(self):
|
||||||
exitcode=EXITCODE_NO_TESTS_RAN)
|
exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
run_no_tests=testname,
|
run_no_tests=testname,
|
||||||
stats=0)
|
stats=0, filtered=True)
|
||||||
|
|
||||||
def test_no_tests_ran_skip(self):
|
def test_no_tests_ran_skip(self):
|
||||||
code = textwrap.dedent("""
|
code = textwrap.dedent("""
|
||||||
|
@ -1378,7 +1498,7 @@ def test_bug(self):
|
||||||
exitcode=EXITCODE_NO_TESTS_RAN)
|
exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
self.check_executed_tests(output, [testname, testname2],
|
self.check_executed_tests(output, [testname, testname2],
|
||||||
run_no_tests=[testname, testname2],
|
run_no_tests=[testname, testname2],
|
||||||
stats=0)
|
stats=0, filtered=True)
|
||||||
|
|
||||||
def test_no_test_ran_some_test_exist_some_not(self):
|
def test_no_test_ran_some_test_exist_some_not(self):
|
||||||
code = textwrap.dedent("""
|
code = textwrap.dedent("""
|
||||||
|
@ -1402,7 +1522,7 @@ def test_other_bug(self):
|
||||||
"-m", "test_other_bug", exitcode=0)
|
"-m", "test_other_bug", exitcode=0)
|
||||||
self.check_executed_tests(output, [testname, testname2],
|
self.check_executed_tests(output, [testname, testname2],
|
||||||
run_no_tests=[testname],
|
run_no_tests=[testname],
|
||||||
stats=1)
|
stats=1, filtered=True)
|
||||||
|
|
||||||
@support.cpython_only
|
@support.cpython_only
|
||||||
def test_uncollectable(self):
|
def test_uncollectable(self):
|
||||||
|
@ -1719,6 +1839,17 @@ def test_format_duration(self):
|
||||||
self.assertEqual(utils.format_duration(3 * 3600 + 1),
|
self.assertEqual(utils.format_duration(3 * 3600 + 1),
|
||||||
'3 hour 1 sec')
|
'3 hour 1 sec')
|
||||||
|
|
||||||
|
def test_normalize_test_name(self):
|
||||||
|
normalize = normalize_test_name
|
||||||
|
self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'),
|
||||||
|
'test_access')
|
||||||
|
self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True),
|
||||||
|
'ChownFileTests')
|
||||||
|
self.assertEqual(normalize('test_success (test.test_bug.ExampleTests.test_success)', is_error=True),
|
||||||
|
'test_success')
|
||||||
|
self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True))
|
||||||
|
self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
When regrtest reruns failed tests in verbose mode (``./python -m test
|
||||||
|
--rerun``), tests are now rerun in fresh worker processes rather than being
|
||||||
|
executed in the main process. If a test does crash or is killed by a timeout,
|
||||||
|
the main process can detect and handle the killed worker process. Tests are
|
||||||
|
rerun in parallel if the ``-jN`` option is used to run tests in parallel.
|
||||||
|
Patch by Victor Stinner.
|
|
@ -0,0 +1,2 @@
|
||||||
|
Rename regrtest ``--verbose2`` option (``-w``) to ``--rerun``. Keep
|
||||||
|
``--verbose2`` as a deprecated alias. Patch by Victor Stinner.
|
|
@ -0,0 +1,3 @@
|
||||||
|
Add ``--fail-rerun option`` option to regrtest: if a test failed when then
|
||||||
|
passed when rerun in verbose mode, exit the process with exit code 2
|
||||||
|
(error), instead of exit code 0 (success). Patch by Victor Stinner.
|
Loading…
Reference in New Issue