Skip to content

Commit

Permalink
pythongh-110756: Sync regrtest with main branch (python#110758)
Browse files Browse the repository at this point in the history
pythongh-110756: Sync regrtest with main branch

Copy files from main to this branch:

* Lib/test/libregrtest/*.py
* Lib/test/__init__.py
* Lib/test/__main__.py
* Lib/test/autotest.py
* Lib/test/pythoninfo.py
* Lib/test/regrtest.py
* Lib/test/test_regrtest.py

Copy also changes from:

* Lib/test/support/__init__.py
* Lib/test/support/os_helper.py
* Lib/test/support/testresult.py
* Lib/test/support/threading_helper.py
* Lib/test/test_support.py

Do not modify scripts running tests such as Makefile.pre.in,
.github/workflows/build.yml or Tools/scripts/run_tests.py: do not use
--fast-ci and --slow-ci in this change.

Changes:

* SPLITTESTDIRS: don't include test_inspect.
* Add utils.process_cpu_count() using len(os.sched_getaffinity(0)).
* test_regrtest doesn't use @support.without_optimizer which doesn't
  exist in Python 3.11.
* Add support.set_sanitizer_env_var().
* Update test_faulthandler to use support.set_sanitizer_env_var().
* @support.without_optimizer doesn't exist in 3.11.
* Add support.Py_DEBUG.
* regrtest.refleak: 3.11 doesn't have sys.getunicodeinternedsize.
  • Loading branch information
vstinner committed Oct 12, 2023
1 parent e16922f commit f452cdf
Show file tree
Hide file tree
Showing 29 changed files with 3,715 additions and 2,176 deletions.
4 changes: 2 additions & 2 deletions Lib/test/__main__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from test.libregrtest import main
main()
from test.libregrtest.main import main
main(_add_python_opts=True)
2 changes: 1 addition & 1 deletion Lib/test/autotest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This should be equivalent to running regrtest.py from the cmdline.
# It can be especially handy if you're in an interactive shell, e.g.,
# from test import autotest.
from test.libregrtest import main
from test.libregrtest.main import main
main()
2 changes: 0 additions & 2 deletions Lib/test/libregrtest/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +0,0 @@
from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
from test.libregrtest.main import main
125 changes: 94 additions & 31 deletions Lib/test/libregrtest/cmdline.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import argparse
import os
import os.path
import shlex
import sys
from test.support import os_helper
from .utils import ALL_RESOURCES, RESOURCE_NAMES


USAGE = """\
Expand All @@ -27,8 +28,10 @@
Additional option details:
-r randomizes test execution order. You can use --randseed=int to provide an
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
int seed value for the randomizer. The randseed value will be used
to set seeds for all random usages in tests
(including randomizing the tests order if -r is set).
By default we always set random seed, but do not randomize test order.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
Expand Down Expand Up @@ -130,25 +133,17 @@
"""


ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime')

# Other resources excluded from --use=all:
#
# - extralagefile (ex: test_zipfile64): really too slow to be enabled
# "by default"
# - tzdata: while needed to validate fully test_datetime, it makes
# test_datetime too slow (15-20 min on some buildbots) and so is disabled by
# default (see bpo-30822).
RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')


class Namespace(argparse.Namespace):
def __init__(self, **kwargs) -> None:
self.ci = False
self.testdir = None
self.verbose = 0
self.quiet = False
self.exclude = False
self.cleanup = False
self.wait = False
self.list_cases = False
self.list_tests = False
self.single = False
self.randomize = False
self.fromfile = None
Expand All @@ -157,8 +152,8 @@ def __init__(self, **kwargs) -> None:
self.trace = False
self.coverdir = 'coverage'
self.runleaks = False
self.huntrleaks = False
self.verbose2 = False
self.huntrleaks: tuple[int, int, str] | None = None
self.rerun = False
self.verbose3 = False
self.print_slow = False
self.random_seed = None
Expand All @@ -170,6 +165,14 @@ def __init__(self, **kwargs) -> None:
self.ignore_tests = None
self.pgo = False
self.pgo_extended = False
self.worker_json = None
self.start = None
self.timeout = None
self.memlimit = None
self.threshold = None
self.fail_rerun = False
self.tempdir = None
self._add_python_opts = True

super().__init__(**kwargs)

Expand Down Expand Up @@ -198,25 +201,35 @@ def _create_parser():
# We add help explicitly to control what argument group it renders under.
group.add_argument('-h', '--help', action='help',
help='show this help message and exit')
group.add_argument('--timeout', metavar='TIMEOUT', type=float,
group.add_argument('--fast-ci', action='store_true',
help='Fast Continuous Integration (CI) mode used by '
'GitHub Actions')
group.add_argument('--slow-ci', action='store_true',
help='Slow Continuous Integration (CI) mode used by '
'buildbot workers')
group.add_argument('--timeout', metavar='TIMEOUT',
help='dump the traceback and exit if a test takes '
'more than TIMEOUT seconds; disabled if TIMEOUT '
'is negative or equals to zero')
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
group.add_argument('--worker-args', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
group.add_argument('-p', '--python', metavar='PYTHON',
help='Command to run Python test subprocesses with.')
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a global random seed')

group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
group.add_argument('-w', '--verbose2', action='store_true',
group.add_argument('-w', '--rerun', action='store_true',
help='re-run failed tests in verbose mode')
group.add_argument('--verbose2', action='store_true', dest='rerun',
help='deprecated alias to --rerun')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
Expand All @@ -229,10 +242,6 @@ def _create_parser():
group = parser.add_argument_group('Selecting tests')
group.add_argument('-r', '--randomize', action='store_true',
help='randomize test execution order.' + more_details)
group.add_argument('--randseed', metavar='SEED',
dest='random_seed', type=int,
help='pass a random seed to reproduce a previous '
'random run')
group.add_argument('-f', '--fromfile', metavar='FILE',
help='read names of tests to run from a file.' +
more_details)
Expand Down Expand Up @@ -311,6 +320,9 @@ def _create_parser():
group.add_argument('--fail-env-changed', action='store_true',
help='if a test file alters the environment, mark '
'the test as failed')
group.add_argument('--fail-rerun', action='store_true',
help='if a test failed and then passed when re-run, '
'mark the tests as failed')

group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
help='writes JUnit-style XML results to the specified '
Expand All @@ -319,6 +331,9 @@ def _create_parser():
help='override the working directory for the test run')
group.add_argument('--cleanup', action='store_true',
help='remove old test_python_* directories')
group.add_argument('--dont-add-python-opts', dest='_add_python_opts',
action='store_false',
help="internal option, don't use it")
return parser


Expand Down Expand Up @@ -369,7 +384,50 @@ def _parse_args(args, **kwargs):
for arg in ns.args:
if arg.startswith('-'):
parser.error("unrecognized arguments: %s" % arg)
sys.exit(1)

if ns.timeout is not None:
# Support "--timeout=" (no value) so Makefile.pre.pre TESTTIMEOUT
# can be used by "make buildbottest" and "make test".
if ns.timeout != "":
try:
ns.timeout = float(ns.timeout)
except ValueError:
parser.error(f"invalid timeout value: {ns.timeout!r}")
else:
ns.timeout = None

# Continuous Integration (CI): common options for fast/slow CI modes
if ns.slow_ci or ns.fast_ci:
# Similar to options:
#
# -j0 --randomize --fail-env-changed --fail-rerun --rerun
# --slowest --verbose3
if ns.use_mp is None:
ns.use_mp = 0
ns.randomize = True
ns.fail_env_changed = True
ns.fail_rerun = True
if ns.python is None:
ns.rerun = True
ns.print_slow = True
ns.verbose3 = True
else:
ns._add_python_opts = False

# When both --slow-ci and --fast-ci options are present,
# --slow-ci has the priority
if ns.slow_ci:
# Similar to: -u "all" --timeout=1200
if not ns.use:
ns.use = [['all']]
if ns.timeout is None:
ns.timeout = 1200 # 20 minutes
elif ns.fast_ci:
# Similar to: -u "all,-cpu" --timeout=600
if not ns.use:
ns.use = [['all', '-cpu']]
if ns.timeout is None:
ns.timeout = 600 # 10 minutes

if ns.single and ns.fromfile:
parser.error("-s and -f don't go together!")
Expand All @@ -382,7 +440,7 @@ def _parse_args(args, **kwargs):
ns.python = shlex.split(ns.python)
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
if ns.pgo and (ns.verbose or ns.rerun or ns.verbose3):
parser.error("--pgo/-v don't go together!")
if ns.pgo_extended:
ns.pgo = True # pgo_extended implies pgo
Expand All @@ -396,10 +454,6 @@ def _parse_args(args, **kwargs):
if ns.timeout is not None:
if ns.timeout <= 0:
ns.timeout = None
if ns.use_mp is not None:
if ns.use_mp <= 0:
# Use all cores + extras for tests that like to sleep
ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use:
for a in ns.use:
for r in a:
Expand Down Expand Up @@ -443,4 +497,13 @@ def _parse_args(args, **kwargs):
# --forever implies --failfast
ns.failfast = True

if ns.huntrleaks:
warmup, repetitions, _ = ns.huntrleaks
if warmup < 1 or repetitions < 1:
msg = ("Invalid values for the --huntrleaks/-R parameters. The "
"number of warmups and repetitions must be at least 1 "
"each (1:1).")
print(msg, file=sys.stderr, flush=True)
sys.exit(2)

return ns
105 changes: 105 additions & 0 deletions Lib/test/libregrtest/findtests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import os
import sys
import unittest

from test import support

from .utils import (
StrPath, TestName, TestTuple, TestList, FilterTuple,
abs_module_name, count, printlist)


# If these test directories are encountered recurse into them and treat each
# "test_*.py" file or each sub-directory as a separate test module. This can
# increase parallelism.
#
# Beware this can't generally be done for any directory with sub-tests as the
# __init__.py may do things which alter what tests are to be run.
SPLITTESTDIRS: set[TestName] = {
"test_asyncio",
"test_concurrent_futures",
"test_future_stmt",
"test_gdb",
"test_multiprocessing_fork",
"test_multiprocessing_forkserver",
"test_multiprocessing_spawn",
}


def findtestdir(path: StrPath | None = None) -> StrPath:
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir


def findtests(*, testdir: StrPath | None = None, exclude=(),
split_test_dirs: set[TestName] = SPLITTESTDIRS,
base_mod: str = "") -> TestList:
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
tests = []
for name in os.listdir(testdir):
mod, ext = os.path.splitext(name)
if (not mod.startswith("test_")) or (mod in exclude):
continue
if base_mod:
fullname = f"{base_mod}.{mod}"
else:
fullname = mod
if fullname in split_test_dirs:
subdir = os.path.join(testdir, mod)
if not base_mod:
fullname = f"test.{mod}"
tests.extend(findtests(testdir=subdir, exclude=exclude,
split_test_dirs=split_test_dirs,
base_mod=fullname))
elif ext in (".py", ""):
tests.append(fullname)
return sorted(tests)


def split_test_packages(tests, *, testdir: StrPath | None = None, exclude=(),
split_test_dirs=SPLITTESTDIRS):
testdir = findtestdir(testdir)
splitted = []
for name in tests:
if name in split_test_dirs:
subdir = os.path.join(testdir, name)
splitted.extend(findtests(testdir=subdir, exclude=exclude,
split_test_dirs=split_test_dirs,
base_mod=name))
else:
splitted.append(name)
return splitted


def _list_cases(suite):
for test in suite:
if isinstance(test, unittest.loader._FailedTest):
continue
if isinstance(test, unittest.TestSuite):
_list_cases(test)
elif isinstance(test, unittest.TestCase):
if support.match_test(test):
print(test.id())

def list_cases(tests: TestTuple, *,
match_tests: FilterTuple | None = None,
ignore_tests: FilterTuple | None = None,
test_dir: StrPath | None = None):
support.verbose = False
support.set_match_tests(match_tests, ignore_tests)

skipped = []
for test_name in tests:
module_name = abs_module_name(test_name, test_dir)
try:
suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
_list_cases(suite)
except unittest.SkipTest:
skipped.append(test_name)

if skipped:
sys.stdout.flush()
stderr = sys.stderr
print(file=stderr)
print(count(len(skipped), "test"), "skipped:", file=stderr)
printlist(skipped, file=stderr)
Loading

0 comments on commit f452cdf

Please sign in to comment.