mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-05-22 21:32:31 +00:00
python-3.6.zip added from Github
README.cosmo contains the necessary links.
This commit is contained in:
parent
75fc601ff5
commit
0c4c56ff39
4219 changed files with 1968626 additions and 0 deletions
5
third_party/python/Lib/test/libregrtest/__init__.py
vendored
Normal file
5
third_party/python/Lib/test/libregrtest/__init__.py
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# We import importlib *ASAP* in order to test #15386
|
||||
import importlib
|
||||
|
||||
from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
|
||||
from test.libregrtest.main import main
|
391
third_party/python/Lib/test/libregrtest/cmdline.py
vendored
Normal file
391
third_party/python/Lib/test/libregrtest/cmdline.py
vendored
Normal file
|
@ -0,0 +1,391 @@
|
|||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from test import support
|
||||
|
||||
|
||||
USAGE = """\
|
||||
python -m test [options] [test_name1 [test_name2 ...]]
|
||||
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
|
||||
"""
|
||||
|
||||
DESCRIPTION = """\
|
||||
Run Python regression tests.
|
||||
|
||||
If no arguments or options are provided, finds all files matching
|
||||
the pattern "test_*" in the Lib/test subdirectory and runs
|
||||
them in alphabetical order (but see -M and -u, below, for exceptions).
|
||||
|
||||
For more rigorous testing, it is useful to use the following
|
||||
command line:
|
||||
|
||||
python -E -Wd -m test [options] [test_name1 ...]
|
||||
"""
|
||||
|
||||
EPILOG = """\
|
||||
Additional option details:
|
||||
|
||||
-r randomizes test execution order. You can use --randseed=int to provide an
|
||||
int seed value for the randomizer; this is useful for reproducing troublesome
|
||||
test orders.
|
||||
|
||||
-s On the first invocation of regrtest using -s, the first test file found
|
||||
or the first test file given on the command line is run, and the name of
|
||||
the next test is recorded in a file named pynexttest. If run from the
|
||||
Python build directory, pynexttest is located in the 'build' subdirectory,
|
||||
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
|
||||
the test in pynexttest is run, and the next test is written to pynexttest.
|
||||
When the last test has been run, pynexttest is deleted. In this way it
|
||||
is possible to single step through the test files. This is useful when
|
||||
doing memory analysis on the Python interpreter, which process tends to
|
||||
consume too many resources to run the full regression test non-stop.
|
||||
|
||||
-S is used to continue running tests after an aborted run. It will
|
||||
maintain the order a standard run (ie, this assumes -r is not used).
|
||||
This is useful after the tests have prematurely stopped for some external
|
||||
reason and you want to start running from where you left off rather
|
||||
than starting from the beginning.
|
||||
|
||||
-f reads the names of tests from the file given as f's argument, one
|
||||
or more test names per line. Whitespace is ignored. Blank lines and
|
||||
lines beginning with '#' are ignored. This is especially useful for
|
||||
whittling down failures involving interactions among tests.
|
||||
|
||||
-L causes the leaks(1) command to be run just before exit if it exists.
|
||||
leaks(1) is available on Mac OS X and presumably on some other
|
||||
FreeBSD-derived systems.
|
||||
|
||||
-R runs each test several times and examines sys.gettotalrefcount() to
|
||||
see if the test appears to be leaking references. The argument should
|
||||
be of the form stab:run:fname where 'stab' is the number of times the
|
||||
test is run to let gettotalrefcount settle down, 'run' is the number
|
||||
of times further it is run and 'fname' is the name of the file the
|
||||
reports are written to. These parameters all have defaults (5, 4 and
|
||||
"reflog.txt" respectively), and the minimal invocation is '-R :'.
|
||||
|
||||
-M runs tests that require an exorbitant amount of memory. These tests
|
||||
typically try to ascertain containers keep working when containing more than
|
||||
2 billion objects, which only works on 64-bit systems. There are also some
|
||||
tests that try to exhaust the address space of the process, which only makes
|
||||
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
|
||||
which is a string in the form of '2.5Gb', determines howmuch memory the
|
||||
tests will limit themselves to (but they may go slightly over.) The number
|
||||
shouldn't be more memory than the machine has (including swap memory). You
|
||||
should also keep in mind that swap memory is generally much, much slower
|
||||
than RAM, and setting memlimit to all available RAM or higher will heavily
|
||||
tax the machine. On the other hand, it is no use running these tests with a
|
||||
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
|
||||
to use more than memlimit memory will be skipped. The big-memory tests
|
||||
generally run very, very long.
|
||||
|
||||
-u is used to specify which special resource intensive tests to run,
|
||||
such as those requiring large file support or network connectivity.
|
||||
The argument is a comma-separated list of words indicating the
|
||||
resources to test. Currently only the following are defined:
|
||||
|
||||
all - Enable all special resources.
|
||||
|
||||
none - Disable all special resources (this is the default).
|
||||
|
||||
audio - Tests that use the audio device. (There are known
|
||||
cases of broken audio drivers that can crash Python or
|
||||
even the Linux kernel.)
|
||||
|
||||
curses - Tests that use curses and will modify the terminal's
|
||||
state and output modes.
|
||||
|
||||
largefile - It is okay to run some test that may create huge
|
||||
files. These tests can take a long time and may
|
||||
consume >2 GiB of disk space temporarily.
|
||||
|
||||
network - It is okay to run tests that use external network
|
||||
resource, e.g. testing SSL support for sockets.
|
||||
|
||||
decimal - Test the decimal module against a large suite that
|
||||
verifies compliance with standards.
|
||||
|
||||
cpu - Used for certain CPU-heavy tests.
|
||||
|
||||
subprocess Run all tests for the subprocess module.
|
||||
|
||||
urlfetch - It is okay to download files required on testing.
|
||||
|
||||
gui - Run tests that require a running GUI.
|
||||
|
||||
tzdata - Run tests that require timezone data.
|
||||
|
||||
To enable all resources except one, use '-uall,-<resource>'. For
|
||||
example, to run all the tests except for the gui tests, give the
|
||||
option '-uall,-gui'.
|
||||
|
||||
--matchfile filters tests using a text file, one pattern per line.
|
||||
Pattern examples:
|
||||
|
||||
- test method: test_stat_attributes
|
||||
- test class: FileTests
|
||||
- test identifier: test_os.FileTests.test_stat_attributes
|
||||
"""
|
||||
|
||||
|
||||
ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
|
||||
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
|
||||
|
||||
# Other resources excluded from --use=all:
|
||||
#
|
||||
# - extralagefile (ex: test_zipfile64): really too slow to be enabled
|
||||
# "by default"
|
||||
# - tzdata: while needed to validate fully test_datetime, it makes
|
||||
# test_datetime too slow (15-20 min on some buildbots) and so is disabled by
|
||||
# default (see bpo-30822).
|
||||
RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile', 'tzdata')
|
||||
|
||||
class _ArgParser(argparse.ArgumentParser):
|
||||
|
||||
def error(self, message):
|
||||
super().error(message + "\nPass -h or --help for complete help.")
|
||||
|
||||
|
||||
def _create_parser():
|
||||
# Set prog to prevent the uninformative "__main__.py" from displaying in
|
||||
# error messages when using "python -m test ...".
|
||||
parser = _ArgParser(prog='regrtest.py',
|
||||
usage=USAGE,
|
||||
description=DESCRIPTION,
|
||||
epilog=EPILOG,
|
||||
add_help=False,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
# Arguments with this clause added to its help are described further in
|
||||
# the epilog's "Additional option details" section.
|
||||
more_details = ' See the section at bottom for more details.'
|
||||
|
||||
group = parser.add_argument_group('General options')
|
||||
# We add help explicitly to control what argument group it renders under.
|
||||
group.add_argument('-h', '--help', action='help',
|
||||
help='show this help message and exit')
|
||||
group.add_argument('--timeout', metavar='TIMEOUT', type=float,
|
||||
help='dump the traceback and exit if a test takes '
|
||||
'more than TIMEOUT seconds; disabled if TIMEOUT '
|
||||
'is negative or equals to zero')
|
||||
group.add_argument('--wait', action='store_true',
|
||||
help='wait for user input, e.g., allow a debugger '
|
||||
'to be attached')
|
||||
group.add_argument('--worker-args', metavar='ARGS')
|
||||
group.add_argument('-S', '--start', metavar='START',
|
||||
help='the name of the test at which to start.' +
|
||||
more_details)
|
||||
|
||||
group = parser.add_argument_group('Verbosity')
|
||||
group.add_argument('-v', '--verbose', action='count',
|
||||
help='run tests in verbose mode with output to stdout')
|
||||
group.add_argument('-w', '--verbose2', action='store_true',
|
||||
help='re-run failed tests in verbose mode')
|
||||
group.add_argument('-W', '--verbose3', action='store_true',
|
||||
help='display test output on failure')
|
||||
group.add_argument('-q', '--quiet', action='store_true',
|
||||
help='no output unless one or more tests fail')
|
||||
group.add_argument('-o', '--slowest', action='store_true', dest='print_slow',
|
||||
help='print the slowest 10 tests')
|
||||
group.add_argument('--header', action='store_true',
|
||||
help='print header with interpreter info')
|
||||
|
||||
group = parser.add_argument_group('Selecting tests')
|
||||
group.add_argument('-r', '--randomize', action='store_true',
|
||||
help='randomize test execution order.' + more_details)
|
||||
group.add_argument('--randseed', metavar='SEED',
|
||||
dest='random_seed', type=int,
|
||||
help='pass a random seed to reproduce a previous '
|
||||
'random run')
|
||||
group.add_argument('-f', '--fromfile', metavar='FILE',
|
||||
help='read names of tests to run from a file.' +
|
||||
more_details)
|
||||
group.add_argument('-x', '--exclude', action='store_true',
|
||||
help='arguments are tests to *exclude*')
|
||||
group.add_argument('-s', '--single', action='store_true',
|
||||
help='single step through a set of tests.' +
|
||||
more_details)
|
||||
group.add_argument('-m', '--match', metavar='PAT',
|
||||
dest='match_tests', action='append',
|
||||
help='match test cases and methods with glob pattern PAT')
|
||||
group.add_argument('--matchfile', metavar='FILENAME',
|
||||
dest='match_filename',
|
||||
help='similar to --match but get patterns from a '
|
||||
'text file, one pattern per line')
|
||||
group.add_argument('-G', '--failfast', action='store_true',
|
||||
help='fail as soon as a test fails (only with -v or -W)')
|
||||
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
|
||||
action='append', type=resources_list,
|
||||
help='specify which special resource intensive tests '
|
||||
'to run.' + more_details)
|
||||
group.add_argument('-M', '--memlimit', metavar='LIMIT',
|
||||
help='run very large memory-consuming tests.' +
|
||||
more_details)
|
||||
group.add_argument('--testdir', metavar='DIR',
|
||||
type=relative_filename,
|
||||
help='execute test files in the specified directory '
|
||||
'(instead of the Python stdlib test suite)')
|
||||
|
||||
group = parser.add_argument_group('Special runs')
|
||||
group.add_argument('-l', '--findleaks', action='store_true',
|
||||
help='if GC is available detect tests that leak memory')
|
||||
group.add_argument('-L', '--runleaks', action='store_true',
|
||||
help='run the leaks(1) command just before exit.' +
|
||||
more_details)
|
||||
group.add_argument('-R', '--huntrleaks', metavar='RUNCOUNTS',
|
||||
type=huntrleaks,
|
||||
help='search for reference leaks (needs debug build, '
|
||||
'very slow).' + more_details)
|
||||
group.add_argument('-j', '--multiprocess', metavar='PROCESSES',
|
||||
dest='use_mp', type=int,
|
||||
help='run PROCESSES processes at once')
|
||||
group.add_argument('-T', '--coverage', action='store_true',
|
||||
dest='trace',
|
||||
help='turn on code coverage tracing using the trace '
|
||||
'module')
|
||||
group.add_argument('-D', '--coverdir', metavar='DIR',
|
||||
type=relative_filename,
|
||||
help='directory where coverage files are put')
|
||||
group.add_argument('-N', '--nocoverdir',
|
||||
action='store_const', const=None, dest='coverdir',
|
||||
help='put coverage files alongside modules')
|
||||
group.add_argument('-t', '--threshold', metavar='THRESHOLD',
|
||||
type=int,
|
||||
help='call gc.set_threshold(THRESHOLD)')
|
||||
group.add_argument('-n', '--nowindows', action='store_true',
|
||||
help='suppress error message boxes on Windows')
|
||||
group.add_argument('-F', '--forever', action='store_true',
|
||||
help='run the specified tests in a loop, until an '
|
||||
'error happens')
|
||||
group.add_argument('--list-tests', action='store_true',
|
||||
help="only write the name of tests that will be run, "
|
||||
"don't execute them")
|
||||
group.add_argument('--list-cases', action='store_true',
|
||||
help='only write the name of test cases that will be run'
|
||||
' , don\'t execute them')
|
||||
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
|
||||
help='enable Profile Guided Optimization training')
|
||||
group.add_argument('--fail-env-changed', action='store_true',
|
||||
help='if a test file alters the environment, mark '
|
||||
'the test as failed')
|
||||
|
||||
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
|
||||
help='writes JUnit-style XML results to the specified '
|
||||
'file')
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def relative_filename(string):
|
||||
# CWD is replaced with a temporary dir before calling main(), so we
|
||||
# join it with the saved CWD so it ends up where the user expects.
|
||||
return os.path.join(support.SAVEDCWD, string)
|
||||
|
||||
|
||||
def huntrleaks(string):
|
||||
args = string.split(':')
|
||||
if len(args) not in (2, 3):
|
||||
raise argparse.ArgumentTypeError(
|
||||
'needs 2 or 3 colon-separated arguments')
|
||||
nwarmup = int(args[0]) if args[0] else 5
|
||||
ntracked = int(args[1]) if args[1] else 4
|
||||
fname = args[2] if len(args) > 2 and args[2] else 'reflog.txt'
|
||||
return nwarmup, ntracked, fname
|
||||
|
||||
|
||||
def resources_list(string):
|
||||
u = [x.lower() for x in string.split(',')]
|
||||
for r in u:
|
||||
if r == 'all' or r == 'none':
|
||||
continue
|
||||
if r[0] == '-':
|
||||
r = r[1:]
|
||||
if r not in RESOURCE_NAMES:
|
||||
raise argparse.ArgumentTypeError('invalid resource: ' + r)
|
||||
return u
|
||||
|
||||
|
||||
def _parse_args(args, **kwargs):
|
||||
# Defaults
|
||||
ns = argparse.Namespace(testdir=None, verbose=0, quiet=False,
|
||||
exclude=False, single=False, randomize=False, fromfile=None,
|
||||
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
|
||||
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
|
||||
random_seed=None, use_mp=None, verbose3=False, forever=False,
|
||||
header=False, failfast=False, match_tests=None, pgo=False)
|
||||
for k, v in kwargs.items():
|
||||
if not hasattr(ns, k):
|
||||
raise TypeError('%r is an invalid keyword argument '
|
||||
'for this function' % k)
|
||||
setattr(ns, k, v)
|
||||
if ns.use_resources is None:
|
||||
ns.use_resources = []
|
||||
|
||||
parser = _create_parser()
|
||||
# Issue #14191: argparse doesn't support "intermixed" positional and
|
||||
# optional arguments. Use parse_known_args() as workaround.
|
||||
ns.args = parser.parse_known_args(args=args, namespace=ns)[1]
|
||||
for arg in ns.args:
|
||||
if arg.startswith('-'):
|
||||
parser.error("unrecognized arguments: %s" % arg)
|
||||
sys.exit(1)
|
||||
|
||||
if ns.single and ns.fromfile:
|
||||
parser.error("-s and -f don't go together!")
|
||||
if ns.use_mp is not None and ns.trace:
|
||||
parser.error("-T and -j don't go together!")
|
||||
if ns.use_mp is not None and ns.findleaks:
|
||||
parser.error("-l and -j don't go together!")
|
||||
if ns.failfast and not (ns.verbose or ns.verbose3):
|
||||
parser.error("-G/--failfast needs either -v or -W")
|
||||
if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
|
||||
parser.error("--pgo/-v don't go together!")
|
||||
|
||||
if ns.nowindows:
|
||||
print("Warning: the --nowindows (-n) option is deprecated. "
|
||||
"Use -vv to display assertions in stderr.", file=sys.stderr)
|
||||
|
||||
if ns.quiet:
|
||||
ns.verbose = 0
|
||||
if ns.timeout is not None:
|
||||
if ns.timeout <= 0:
|
||||
ns.timeout = None
|
||||
if ns.use_mp is not None:
|
||||
if ns.use_mp <= 0:
|
||||
# Use all cores + extras for tests that like to sleep
|
||||
ns.use_mp = 2 + (os.cpu_count() or 1)
|
||||
if ns.use:
|
||||
for a in ns.use:
|
||||
for r in a:
|
||||
if r == 'all':
|
||||
ns.use_resources[:] = ALL_RESOURCES
|
||||
continue
|
||||
if r == 'none':
|
||||
del ns.use_resources[:]
|
||||
continue
|
||||
remove = False
|
||||
if r[0] == '-':
|
||||
remove = True
|
||||
r = r[1:]
|
||||
if remove:
|
||||
if r in ns.use_resources:
|
||||
ns.use_resources.remove(r)
|
||||
elif r not in ns.use_resources:
|
||||
ns.use_resources.append(r)
|
||||
if ns.random_seed is not None:
|
||||
ns.randomize = True
|
||||
if ns.verbose:
|
||||
ns.header = True
|
||||
if ns.huntrleaks and ns.verbose3:
|
||||
ns.verbose3 = False
|
||||
print("WARNING: Disable --verbose3 because it's incompatible with "
|
||||
"--huntrleaks: see http://bugs.python.org/issue27103",
|
||||
file=sys.stderr)
|
||||
if ns.match_filename:
|
||||
if ns.match_tests is None:
|
||||
ns.match_tests = []
|
||||
filename = os.path.join(support.SAVEDCWD, ns.match_filename)
|
||||
with open(filename) as fp:
|
||||
for line in fp:
|
||||
ns.match_tests.append(line.strip())
|
||||
|
||||
return ns
|
638
third_party/python/Lib/test/libregrtest/main.py
vendored
Normal file
638
third_party/python/Lib/test/libregrtest/main.py
vendored
Normal file
|
@ -0,0 +1,638 @@
|
|||
import datetime
|
||||
import faulthandler
|
||||
import locale
|
||||
import os
|
||||
import platform
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
import sysconfig
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
from test.libregrtest.cmdline import _parse_args
|
||||
from test.libregrtest.runtest import (
|
||||
findtests, runtest, get_abs_module,
|
||||
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
|
||||
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN,
|
||||
PROGRESS_MIN_TIME, format_test_result)
|
||||
from test.libregrtest.setup import setup_tests
|
||||
from test.libregrtest.utils import removepy, count, format_duration, printlist
|
||||
from test import support
|
||||
try:
|
||||
import gc
|
||||
except ImportError:
|
||||
gc = None
|
||||
|
||||
|
||||
# When tests are run from the Python build directory, it is best practice
|
||||
# to keep the test files in a subfolder. This eases the cleanup of leftover
|
||||
# files using the "make distclean" command.
|
||||
if sysconfig.is_python_build():
|
||||
TEMPDIR = sysconfig.get_config_var('abs_builddir')
|
||||
if TEMPDIR is None:
|
||||
# bpo-30284: On Windows, only srcdir is available. Using abs_builddir
|
||||
# mostly matters on UNIX when building Python out of the source tree,
|
||||
# especially when the source tree is read only.
|
||||
TEMPDIR = sysconfig.get_config_var('srcdir')
|
||||
TEMPDIR = os.path.join(TEMPDIR, 'build')
|
||||
else:
|
||||
TEMPDIR = tempfile.gettempdir()
|
||||
TEMPDIR = os.path.abspath(TEMPDIR)
|
||||
|
||||
|
||||
class Regrtest:
|
||||
"""Execute a test suite.
|
||||
|
||||
This also parses command-line options and modifies its behavior
|
||||
accordingly.
|
||||
|
||||
tests -- a list of strings containing test names (optional)
|
||||
testdir -- the directory in which to look for tests (optional)
|
||||
|
||||
Users other than the Python test suite will certainly want to
|
||||
specify testdir; if it's omitted, the directory containing the
|
||||
Python test suite is searched for.
|
||||
|
||||
If the tests argument is omitted, the tests listed on the
|
||||
command-line will be used. If that's empty, too, then all *.py
|
||||
files beginning with test_ will be used.
|
||||
|
||||
The other default arguments (verbose, quiet, exclude,
|
||||
single, randomize, findleaks, use_resources, trace, coverdir,
|
||||
print_slow, and random_seed) allow programmers calling main()
|
||||
directly to set the values that would normally be set by flags
|
||||
on the command line.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Namespace of command line options
|
||||
self.ns = None
|
||||
|
||||
# tests
|
||||
self.tests = []
|
||||
self.selected = []
|
||||
|
||||
# test results
|
||||
self.good = []
|
||||
self.bad = []
|
||||
self.skipped = []
|
||||
self.resource_denieds = []
|
||||
self.environment_changed = []
|
||||
self.rerun = []
|
||||
self.run_no_tests = []
|
||||
self.first_result = None
|
||||
self.interrupted = False
|
||||
|
||||
# used by --slow
|
||||
self.test_times = []
|
||||
|
||||
# used by --coverage, trace.Trace instance
|
||||
self.tracer = None
|
||||
|
||||
# used by --findleaks, store for gc.garbage
|
||||
self.found_garbage = []
|
||||
|
||||
# used to display the progress bar "[ 3/100]"
|
||||
self.start_time = time.monotonic()
|
||||
self.test_count = ''
|
||||
self.test_count_width = 1
|
||||
|
||||
# used by --single
|
||||
self.next_single_test = None
|
||||
self.next_single_filename = None
|
||||
|
||||
# used by --junit-xml
|
||||
self.testsuite_xml = None
|
||||
|
||||
def accumulate_result(self, test, result):
|
||||
ok, test_time, xml_data = result
|
||||
if ok not in (CHILD_ERROR, INTERRUPTED):
|
||||
self.test_times.append((test_time, test))
|
||||
if ok == PASSED:
|
||||
self.good.append(test)
|
||||
elif ok in (FAILED, CHILD_ERROR):
|
||||
self.bad.append(test)
|
||||
elif ok == ENV_CHANGED:
|
||||
self.environment_changed.append(test)
|
||||
elif ok == SKIPPED:
|
||||
self.skipped.append(test)
|
||||
elif ok == RESOURCE_DENIED:
|
||||
self.skipped.append(test)
|
||||
self.resource_denieds.append(test)
|
||||
elif ok == TEST_DID_NOT_RUN:
|
||||
self.run_no_tests.append(test)
|
||||
elif ok != INTERRUPTED:
|
||||
raise ValueError("invalid test result: %r" % ok)
|
||||
|
||||
if xml_data:
|
||||
import xml.etree.ElementTree as ET
|
||||
for e in xml_data:
|
||||
try:
|
||||
self.testsuite_xml.append(ET.fromstring(e))
|
||||
except ET.ParseError:
|
||||
print(xml_data, file=sys.__stderr__)
|
||||
raise
|
||||
|
||||
def display_progress(self, test_index, test):
|
||||
if self.ns.quiet:
|
||||
return
|
||||
|
||||
# "[ 51/405/1] test_tcl passed"
|
||||
line = f"{test_index:{self.test_count_width}}{self.test_count}"
|
||||
fails = len(self.bad) + len(self.environment_changed)
|
||||
if fails and not self.ns.pgo:
|
||||
line = f"{line}/{fails}"
|
||||
line = f"[{line}] {test}"
|
||||
|
||||
# add the system load prefix: "load avg: 1.80 "
|
||||
if hasattr(os, 'getloadavg'):
|
||||
load_avg_1min = os.getloadavg()[0]
|
||||
line = f"load avg: {load_avg_1min:.2f} {line}"
|
||||
|
||||
# add the timestamp prefix: "0:01:05 "
|
||||
test_time = time.monotonic() - self.start_time
|
||||
test_time = datetime.timedelta(seconds=int(test_time))
|
||||
line = f"{test_time} {line}"
|
||||
print(line, flush=True)
|
||||
|
||||
def parse_args(self, kwargs):
|
||||
ns = _parse_args(sys.argv[1:], **kwargs)
|
||||
|
||||
if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'):
|
||||
print("Warning: The timeout option requires "
|
||||
"faulthandler.dump_traceback_later", file=sys.stderr)
|
||||
ns.timeout = None
|
||||
|
||||
if ns.threshold is not None and gc is None:
|
||||
print('No GC available, ignore --threshold.', file=sys.stderr)
|
||||
ns.threshold = None
|
||||
|
||||
if ns.findleaks:
|
||||
if gc is not None:
|
||||
# Uncomment the line below to report garbage that is not
|
||||
# freeable by reference counting alone. By default only
|
||||
# garbage that is not collectable by the GC is reported.
|
||||
pass
|
||||
#gc.set_debug(gc.DEBUG_SAVEALL)
|
||||
else:
|
||||
print('No GC available, disabling --findleaks',
|
||||
file=sys.stderr)
|
||||
ns.findleaks = False
|
||||
|
||||
if ns.xmlpath:
|
||||
support.junit_xml_list = self.testsuite_xml = []
|
||||
|
||||
# Strip .py extensions.
|
||||
removepy(ns.args)
|
||||
|
||||
return ns
|
||||
|
||||
def find_tests(self, tests):
|
||||
self.tests = tests
|
||||
|
||||
if self.ns.single:
|
||||
self.next_single_filename = os.path.join(TEMPDIR, 'pynexttest')
|
||||
try:
|
||||
with open(self.next_single_filename, 'r') as fp:
|
||||
next_test = fp.read().strip()
|
||||
self.tests = [next_test]
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
if self.ns.fromfile:
|
||||
self.tests = []
|
||||
# regex to match 'test_builtin' in line:
|
||||
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
|
||||
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
|
||||
with open(os.path.join(support.SAVEDCWD, self.ns.fromfile)) as fp:
|
||||
for line in fp:
|
||||
line = line.split('#', 1)[0]
|
||||
line = line.strip()
|
||||
match = regex.search(line)
|
||||
if match is not None:
|
||||
self.tests.append(match.group())
|
||||
|
||||
removepy(self.tests)
|
||||
|
||||
stdtests = STDTESTS[:]
|
||||
nottests = NOTTESTS.copy()
|
||||
if self.ns.exclude:
|
||||
for arg in self.ns.args:
|
||||
if arg in stdtests:
|
||||
stdtests.remove(arg)
|
||||
nottests.add(arg)
|
||||
self.ns.args = []
|
||||
|
||||
# if testdir is set, then we are not running the python tests suite, so
|
||||
# don't add default tests to be executed or skipped (pass empty values)
|
||||
if self.ns.testdir:
|
||||
alltests = findtests(self.ns.testdir, list(), set())
|
||||
else:
|
||||
alltests = findtests(self.ns.testdir, stdtests, nottests)
|
||||
|
||||
if not self.ns.fromfile:
|
||||
self.selected = self.tests or self.ns.args or alltests
|
||||
else:
|
||||
self.selected = self.tests
|
||||
if self.ns.single:
|
||||
self.selected = self.selected[:1]
|
||||
try:
|
||||
pos = alltests.index(self.selected[0])
|
||||
self.next_single_test = alltests[pos + 1]
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
# Remove all the selected tests that precede start if it's set.
|
||||
if self.ns.start:
|
||||
try:
|
||||
del self.selected[:self.selected.index(self.ns.start)]
|
||||
except ValueError:
|
||||
print("Couldn't find starting test (%s), using all tests"
|
||||
% self.ns.start, file=sys.stderr)
|
||||
|
||||
if self.ns.randomize:
|
||||
if self.ns.random_seed is None:
|
||||
self.ns.random_seed = random.randrange(10000000)
|
||||
random.seed(self.ns.random_seed)
|
||||
random.shuffle(self.selected)
|
||||
|
||||
def list_tests(self):
|
||||
for name in self.selected:
|
||||
print(name)
|
||||
|
||||
def _list_cases(self, suite):
|
||||
for test in suite:
|
||||
if isinstance(test, unittest.loader._FailedTest):
|
||||
continue
|
||||
if isinstance(test, unittest.TestSuite):
|
||||
self._list_cases(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
if support.match_test(test):
|
||||
print(test.id())
|
||||
|
||||
def list_cases(self):
|
||||
support.verbose = False
|
||||
support.set_match_tests(self.ns.match_tests)
|
||||
|
||||
for test in self.selected:
|
||||
abstest = get_abs_module(self.ns, test)
|
||||
try:
|
||||
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
|
||||
self._list_cases(suite)
|
||||
except unittest.SkipTest:
|
||||
self.skipped.append(test)
|
||||
|
||||
if self.skipped:
|
||||
print(file=sys.stderr)
|
||||
print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
|
||||
printlist(self.skipped, file=sys.stderr)
|
||||
|
||||
def rerun_failed_tests(self):
|
||||
self.ns.verbose = True
|
||||
self.ns.failfast = False
|
||||
self.ns.verbose3 = False
|
||||
|
||||
self.first_result = self.get_tests_result()
|
||||
|
||||
print()
|
||||
print("Re-running failed tests in verbose mode")
|
||||
self.rerun = self.bad[:]
|
||||
for test in self.rerun:
|
||||
print("Re-running test %r in verbose mode" % test, flush=True)
|
||||
try:
|
||||
self.ns.verbose = True
|
||||
ok = runtest(self.ns, test)
|
||||
except KeyboardInterrupt:
|
||||
self.interrupted = True
|
||||
# print a newline separate from the ^C
|
||||
print()
|
||||
break
|
||||
else:
|
||||
if ok[0] in {PASSED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED}:
|
||||
self.bad.remove(test)
|
||||
else:
|
||||
if self.bad:
|
||||
print(count(len(self.bad), 'test'), "failed again:")
|
||||
printlist(self.bad)
|
||||
|
||||
self.display_result()
|
||||
|
||||
def display_result(self):
|
||||
# If running the test suite for PGO then no one cares about results.
|
||||
if self.ns.pgo:
|
||||
return
|
||||
|
||||
print()
|
||||
print("== Tests result: %s ==" % self.get_tests_result())
|
||||
|
||||
if self.interrupted:
|
||||
print()
|
||||
# print a newline after ^C
|
||||
print("Test suite interrupted by signal SIGINT.")
|
||||
executed = set(self.good) | set(self.bad) | set(self.skipped)
|
||||
omitted = set(self.selected) - executed
|
||||
print(count(len(omitted), "test"), "omitted:")
|
||||
printlist(omitted)
|
||||
|
||||
if self.good and not self.ns.quiet:
|
||||
print()
|
||||
if (not self.bad
|
||||
and not self.skipped
|
||||
and not self.interrupted
|
||||
and len(self.good) > 1):
|
||||
print("All", end=' ')
|
||||
print(count(len(self.good), "test"), "OK.")
|
||||
|
||||
if self.ns.print_slow:
|
||||
self.test_times.sort(reverse=True)
|
||||
print()
|
||||
print("10 slowest tests:")
|
||||
for time, test in self.test_times[:10]:
|
||||
print("- %s: %s" % (test, format_duration(time)))
|
||||
|
||||
if self.bad:
|
||||
print()
|
||||
print(count(len(self.bad), "test"), "failed:")
|
||||
printlist(self.bad)
|
||||
|
||||
if self.environment_changed:
|
||||
print()
|
||||
print("{} altered the execution environment:".format(
|
||||
count(len(self.environment_changed), "test")))
|
||||
printlist(self.environment_changed)
|
||||
|
||||
if self.skipped and not self.ns.quiet:
|
||||
print()
|
||||
print(count(len(self.skipped), "test"), "skipped:")
|
||||
printlist(self.skipped)
|
||||
|
||||
if self.rerun:
|
||||
print()
|
||||
print("%s:" % count(len(self.rerun), "re-run test"))
|
||||
printlist(self.rerun)
|
||||
|
||||
if self.run_no_tests:
|
||||
print()
|
||||
print(count(len(self.run_no_tests), "test"), "run no tests:")
|
||||
printlist(self.run_no_tests)
|
||||
|
||||
def run_tests_sequential(self):
|
||||
if self.ns.trace:
|
||||
import trace
|
||||
self.tracer = trace.Trace(trace=False, count=True)
|
||||
|
||||
save_modules = sys.modules.keys()
|
||||
|
||||
print("Run tests sequentially")
|
||||
|
||||
previous_test = None
|
||||
for test_index, test in enumerate(self.tests, 1):
|
||||
start_time = time.monotonic()
|
||||
|
||||
text = test
|
||||
if previous_test:
|
||||
text = '%s -- %s' % (text, previous_test)
|
||||
self.display_progress(test_index, text)
|
||||
|
||||
if self.tracer:
|
||||
# If we're tracing code coverage, then we don't exit with status
|
||||
# if on a false return value from main.
|
||||
cmd = ('result = runtest(self.ns, test); '
|
||||
'self.accumulate_result(test, result)')
|
||||
ns = dict(locals())
|
||||
self.tracer.runctx(cmd, globals=globals(), locals=ns)
|
||||
result = ns['result']
|
||||
else:
|
||||
try:
|
||||
result = runtest(self.ns, test)
|
||||
except KeyboardInterrupt:
|
||||
self.interrupted = True
|
||||
self.accumulate_result(test, (INTERRUPTED, None, None))
|
||||
break
|
||||
else:
|
||||
self.accumulate_result(test, result)
|
||||
|
||||
previous_test = format_test_result(test, result[0])
|
||||
test_time = time.monotonic() - start_time
|
||||
if test_time >= PROGRESS_MIN_TIME:
|
||||
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
|
||||
elif result[0] == PASSED:
|
||||
# be quiet: say nothing if the test passed shortly
|
||||
previous_test = None
|
||||
|
||||
if self.ns.findleaks:
|
||||
gc.collect()
|
||||
if gc.garbage:
|
||||
print("Warning: test created", len(gc.garbage), end=' ')
|
||||
print("uncollectable object(s).")
|
||||
# move the uncollectable objects somewhere so we don't see
|
||||
# them again
|
||||
self.found_garbage.extend(gc.garbage)
|
||||
del gc.garbage[:]
|
||||
|
||||
# Unload the newly imported modules (best effort finalization)
|
||||
for module in sys.modules.keys():
|
||||
if module not in save_modules and module.startswith("test."):
|
||||
support.unload(module)
|
||||
|
||||
if previous_test:
|
||||
print(previous_test)
|
||||
|
||||
def _test_forever(self, tests):
|
||||
while True:
|
||||
for test in tests:
|
||||
yield test
|
||||
if self.bad:
|
||||
return
|
||||
if self.ns.fail_env_changed and self.environment_changed:
|
||||
return
|
||||
|
||||
def display_header(self):
|
||||
# Print basic platform information
|
||||
print("==", platform.python_implementation(), *sys.version.split())
|
||||
print("==", platform.platform(aliased=True),
|
||||
"%s-endian" % sys.byteorder)
|
||||
print("== cwd:", os.getcwd())
|
||||
cpu_count = os.cpu_count()
|
||||
if cpu_count:
|
||||
print("== CPU count:", cpu_count)
|
||||
print("== encodings: locale=%s, FS=%s"
|
||||
% (locale.getpreferredencoding(False),
|
||||
sys.getfilesystemencoding()))
|
||||
|
||||
def get_tests_result(self):
|
||||
result = []
|
||||
if self.bad:
|
||||
result.append("FAILURE")
|
||||
elif self.ns.fail_env_changed and self.environment_changed:
|
||||
result.append("ENV CHANGED")
|
||||
elif not any((self.good, self.bad, self.skipped, self.interrupted,
|
||||
self.environment_changed)):
|
||||
result.append("NO TEST RUN")
|
||||
|
||||
if self.interrupted:
|
||||
result.append("INTERRUPTED")
|
||||
|
||||
if not result:
|
||||
result.append("SUCCESS")
|
||||
|
||||
result = ', '.join(result)
|
||||
if self.first_result:
|
||||
result = '%s then %s' % (self.first_result, result)
|
||||
return result
|
||||
|
||||
def run_tests(self):
|
||||
# For a partial run, we do not need to clutter the output.
|
||||
if (self.ns.header
|
||||
or not(self.ns.pgo or self.ns.quiet or self.ns.single
|
||||
or self.tests or self.ns.args)):
|
||||
self.display_header()
|
||||
|
||||
if self.ns.huntrleaks:
|
||||
warmup, repetitions, _ = self.ns.huntrleaks
|
||||
if warmup < 3:
|
||||
msg = ("WARNING: Running tests with --huntrleaks/-R and less than "
|
||||
"3 warmup repetitions can give false positives!")
|
||||
print(msg, file=sys.stdout, flush=True)
|
||||
|
||||
if self.ns.randomize:
|
||||
print("Using random seed", self.ns.random_seed)
|
||||
|
||||
if self.ns.forever:
|
||||
self.tests = self._test_forever(list(self.selected))
|
||||
self.test_count = ''
|
||||
self.test_count_width = 3
|
||||
else:
|
||||
self.tests = iter(self.selected)
|
||||
self.test_count = '/{}'.format(len(self.selected))
|
||||
self.test_count_width = len(self.test_count) - 1
|
||||
|
||||
if self.ns.use_mp:
|
||||
from test.libregrtest.runtest_mp import run_tests_multiprocess
|
||||
run_tests_multiprocess(self)
|
||||
else:
|
||||
self.run_tests_sequential()
|
||||
|
||||
def finalize(self):
|
||||
if self.next_single_filename:
|
||||
if self.next_single_test:
|
||||
with open(self.next_single_filename, 'w') as fp:
|
||||
fp.write(self.next_single_test + '\n')
|
||||
else:
|
||||
os.unlink(self.next_single_filename)
|
||||
|
||||
if self.tracer:
|
||||
r = self.tracer.results()
|
||||
r.write_results(show_missing=True, summary=True,
|
||||
coverdir=self.ns.coverdir)
|
||||
|
||||
print()
|
||||
duration = time.monotonic() - self.start_time
|
||||
print("Total duration: %s" % format_duration(duration))
|
||||
print("Tests result: %s" % self.get_tests_result())
|
||||
|
||||
if self.ns.runleaks:
|
||||
os.system("leaks %d" % os.getpid())
|
||||
|
||||
def save_xml_result(self):
|
||||
if not self.ns.xmlpath and not self.testsuite_xml:
|
||||
return
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
root = ET.Element("testsuites")
|
||||
|
||||
# Manually count the totals for the overall summary
|
||||
totals = {'tests': 0, 'errors': 0, 'failures': 0}
|
||||
for suite in self.testsuite_xml:
|
||||
root.append(suite)
|
||||
for k in totals:
|
||||
try:
|
||||
totals[k] += int(suite.get(k, 0))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
for k, v in totals.items():
|
||||
root.set(k, str(v))
|
||||
|
||||
xmlpath = os.path.join(support.SAVEDCWD, self.ns.xmlpath)
|
||||
with open(xmlpath, 'wb') as f:
|
||||
for s in ET.tostringlist(root):
|
||||
f.write(s)
|
||||
|
||||
def main(self, tests=None, **kwargs):
|
||||
global TEMPDIR
|
||||
|
||||
if sysconfig.is_python_build():
|
||||
try:
|
||||
os.mkdir(TEMPDIR)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
# Define a writable temp dir that will be used as cwd while running
|
||||
# the tests. The name of the dir includes the pid to allow parallel
|
||||
# testing (see the -j option).
|
||||
test_cwd = 'test_python_{}'.format(os.getpid())
|
||||
test_cwd = os.path.join(TEMPDIR, test_cwd)
|
||||
|
||||
# Run the tests in a context manager that temporarily changes the CWD to a
|
||||
# temporary and writable directory. If it's not possible to create or
|
||||
# change the CWD, the original CWD will be used. The original CWD is
|
||||
# available from support.SAVEDCWD.
|
||||
with support.temp_cwd(test_cwd, quiet=True):
|
||||
self._main(tests, kwargs)
|
||||
|
||||
def _main(self, tests, kwargs):
|
||||
self.ns = self.parse_args(kwargs)
|
||||
|
||||
if self.ns.huntrleaks:
|
||||
warmup, repetitions, _ = self.ns.huntrleaks
|
||||
if warmup < 1 or repetitions < 1:
|
||||
msg = ("Invalid values for the --huntrleaks/-R parameters. The "
|
||||
"number of warmups and repetitions must be at least 1 "
|
||||
"each (1:1).")
|
||||
print(msg, file=sys.stderr, flush=True)
|
||||
sys.exit(2)
|
||||
|
||||
if self.ns.worker_args is not None:
|
||||
from test.libregrtest.runtest_mp import run_tests_worker
|
||||
run_tests_worker(self.ns.worker_args)
|
||||
|
||||
if self.ns.wait:
|
||||
input("Press any key to continue...")
|
||||
|
||||
support.PGO = self.ns.pgo
|
||||
|
||||
setup_tests(self.ns)
|
||||
|
||||
self.find_tests(tests)
|
||||
|
||||
if self.ns.list_tests:
|
||||
self.list_tests()
|
||||
sys.exit(0)
|
||||
|
||||
if self.ns.list_cases:
|
||||
self.list_cases()
|
||||
sys.exit(0)
|
||||
|
||||
self.run_tests()
|
||||
self.display_result()
|
||||
|
||||
if self.ns.verbose2 and self.bad:
|
||||
self.rerun_failed_tests()
|
||||
|
||||
self.finalize()
|
||||
|
||||
self.save_xml_result()
|
||||
|
||||
if self.bad:
|
||||
sys.exit(2)
|
||||
if self.interrupted:
|
||||
sys.exit(130)
|
||||
if self.ns.fail_env_changed and self.environment_changed:
|
||||
sys.exit(3)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def main(tests=None, **kwargs):
|
||||
"""Run the Python suite."""
|
||||
Regrtest().main(tests=tests, **kwargs)
|
264
third_party/python/Lib/test/libregrtest/refleak.py
vendored
Normal file
264
third_party/python/Lib/test/libregrtest/refleak.py
vendored
Normal file
|
@ -0,0 +1,264 @@
|
|||
import errno
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
from inspect import isabstract
|
||||
from test import support
|
||||
|
||||
|
||||
def dash_R(the_module, test, indirect_test, huntrleaks):
|
||||
"""Run a test multiple times, looking for reference leaks.
|
||||
|
||||
Returns:
|
||||
False if the test didn't leak references; True if we detected refleaks.
|
||||
"""
|
||||
# This code is hackish and inelegant, but it seems to do the job.
|
||||
import copyreg
|
||||
import collections.abc
|
||||
|
||||
if not hasattr(sys, 'gettotalrefcount'):
|
||||
raise Exception("Tracking reference leaks requires a debug build "
|
||||
"of Python")
|
||||
|
||||
# Save current values for dash_R_cleanup() to restore.
|
||||
fs = warnings.filters[:]
|
||||
ps = copyreg.dispatch_table.copy()
|
||||
pic = sys.path_importer_cache.copy()
|
||||
try:
|
||||
import zipimport
|
||||
except ImportError:
|
||||
zdc = None # Run unmodified on platforms without zipimport support
|
||||
else:
|
||||
zdc = zipimport._zip_directory_cache.copy()
|
||||
abcs = {}
|
||||
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
|
||||
if not isabstract(abc):
|
||||
continue
|
||||
for obj in abc.__subclasses__() + [abc]:
|
||||
abcs[obj] = obj._abc_registry.copy()
|
||||
|
||||
# bpo-31217: Integer pool to get a single integer object for the same
|
||||
# value. The pool is used to prevent false alarm when checking for memory
|
||||
# block leaks. Fill the pool with values in -1000..1000 which are the most
|
||||
# common (reference, memory block, file descriptor) differences.
|
||||
int_pool = {value: value for value in range(-1000, 1000)}
|
||||
def get_pooled_int(value):
|
||||
return int_pool.setdefault(value, value)
|
||||
|
||||
nwarmup, ntracked, fname = huntrleaks
|
||||
fname = os.path.join(support.SAVEDCWD, fname)
|
||||
repcount = nwarmup + ntracked
|
||||
rc_deltas = [0] * repcount
|
||||
alloc_deltas = [0] * repcount
|
||||
fd_deltas = [0] * repcount
|
||||
|
||||
print("beginning", repcount, "repetitions", file=sys.stderr)
|
||||
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
|
||||
flush=True)
|
||||
# initialize variables to make pyflakes quiet
|
||||
rc_before = alloc_before = fd_before = 0
|
||||
for i in range(repcount):
|
||||
indirect_test()
|
||||
alloc_after, rc_after, fd_after = dash_R_cleanup(fs, ps, pic, zdc,
|
||||
abcs)
|
||||
print('.', end='', file=sys.stderr, flush=True)
|
||||
if i >= nwarmup:
|
||||
rc_deltas[i] = get_pooled_int(rc_after - rc_before)
|
||||
alloc_deltas[i] = get_pooled_int(alloc_after - alloc_before)
|
||||
fd_deltas[i] = get_pooled_int(fd_after - fd_before)
|
||||
alloc_before = alloc_after
|
||||
rc_before = rc_after
|
||||
fd_before = fd_after
|
||||
print(file=sys.stderr)
|
||||
|
||||
# These checkers return False on success, True on failure
|
||||
def check_rc_deltas(deltas):
|
||||
# Checker for reference counters and memomry blocks.
|
||||
#
|
||||
# bpo-30776: Try to ignore false positives:
|
||||
#
|
||||
# [3, 0, 0]
|
||||
# [0, 1, 0]
|
||||
# [8, -8, 1]
|
||||
#
|
||||
# Expected leaks:
|
||||
#
|
||||
# [5, 5, 6]
|
||||
# [10, 1, 1]
|
||||
return all(delta >= 1 for delta in deltas)
|
||||
|
||||
def check_fd_deltas(deltas):
|
||||
return any(deltas)
|
||||
|
||||
failed = False
|
||||
for deltas, item_name, checker in [
|
||||
(rc_deltas, 'references', check_rc_deltas),
|
||||
(alloc_deltas, 'memory blocks', check_rc_deltas),
|
||||
(fd_deltas, 'file descriptors', check_fd_deltas)
|
||||
]:
|
||||
# ignore warmup runs
|
||||
deltas = deltas[nwarmup:]
|
||||
if checker(deltas):
|
||||
msg = '%s leaked %s %s, sum=%s' % (
|
||||
test, deltas, item_name, sum(deltas))
|
||||
print(msg, file=sys.stderr, flush=True)
|
||||
with open(fname, "a") as refrep:
|
||||
print(msg, file=refrep)
|
||||
refrep.flush()
|
||||
failed = True
|
||||
return failed
|
||||
|
||||
|
||||
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
|
||||
import gc, copyreg
|
||||
import collections.abc
|
||||
from weakref import WeakSet
|
||||
|
||||
# Restore some original values.
|
||||
warnings.filters[:] = fs
|
||||
copyreg.dispatch_table.clear()
|
||||
copyreg.dispatch_table.update(ps)
|
||||
sys.path_importer_cache.clear()
|
||||
sys.path_importer_cache.update(pic)
|
||||
try:
|
||||
import zipimport
|
||||
except ImportError:
|
||||
pass # Run unmodified on platforms without zipimport support
|
||||
else:
|
||||
zipimport._zip_directory_cache.clear()
|
||||
zipimport._zip_directory_cache.update(zdc)
|
||||
|
||||
# clear type cache
|
||||
sys._clear_type_cache()
|
||||
|
||||
# Clear ABC registries, restoring previously saved ABC registries.
|
||||
abs_classes = [getattr(collections.abc, a) for a in collections.abc.__all__]
|
||||
abs_classes = filter(isabstract, abs_classes)
|
||||
if 'typing' in sys.modules:
|
||||
t = sys.modules['typing']
|
||||
# These classes require special treatment because they do not appear
|
||||
# in direct subclasses of collections.abc classes
|
||||
abs_classes = list(abs_classes) + [t.ChainMap, t.Counter, t.DefaultDict]
|
||||
for abc in abs_classes:
|
||||
for obj in abc.__subclasses__() + [abc]:
|
||||
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
|
||||
obj._abc_cache.clear()
|
||||
obj._abc_negative_cache.clear()
|
||||
|
||||
clear_caches()
|
||||
|
||||
# Collect cyclic trash and read memory statistics immediately after.
|
||||
func1 = sys.getallocatedblocks
|
||||
func2 = sys.gettotalrefcount
|
||||
gc.collect()
|
||||
return func1(), func2(), support.fd_count()
|
||||
|
||||
|
||||
def clear_caches():
|
||||
import gc
|
||||
|
||||
# Clear the warnings registry, so they can be displayed again
|
||||
for mod in sys.modules.values():
|
||||
if hasattr(mod, '__warningregistry__'):
|
||||
del mod.__warningregistry__
|
||||
|
||||
# Flush standard output, so that buffered data is sent to the OS and
|
||||
# associated Python objects are reclaimed.
|
||||
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
|
||||
if stream is not None:
|
||||
stream.flush()
|
||||
|
||||
# Clear assorted module caches.
|
||||
# Don't worry about resetting the cache if the module is not loaded
|
||||
try:
|
||||
distutils_dir_util = sys.modules['distutils.dir_util']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
distutils_dir_util._path_created.clear()
|
||||
re.purge()
|
||||
|
||||
try:
|
||||
_strptime = sys.modules['_strptime']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
_strptime._regex_cache.clear()
|
||||
|
||||
try:
|
||||
urllib_parse = sys.modules['urllib.parse']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
urllib_parse.clear_cache()
|
||||
|
||||
try:
|
||||
urllib_request = sys.modules['urllib.request']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
urllib_request.urlcleanup()
|
||||
|
||||
try:
|
||||
linecache = sys.modules['linecache']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
linecache.clearcache()
|
||||
|
||||
try:
|
||||
mimetypes = sys.modules['mimetypes']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
mimetypes._default_mime_types()
|
||||
|
||||
try:
|
||||
filecmp = sys.modules['filecmp']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
filecmp._cache.clear()
|
||||
|
||||
try:
|
||||
struct = sys.modules['struct']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
struct._clearcache()
|
||||
|
||||
try:
|
||||
doctest = sys.modules['doctest']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
doctest.master = None
|
||||
|
||||
try:
|
||||
ctypes = sys.modules['ctypes']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
ctypes._reset_cache()
|
||||
|
||||
try:
|
||||
typing = sys.modules['typing']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
for f in typing._cleanups:
|
||||
f()
|
||||
|
||||
gc.collect()
|
||||
|
||||
|
||||
def warm_caches():
|
||||
# char cache
|
||||
s = bytes(range(256))
|
||||
for i in range(256):
|
||||
s[i:i+1]
|
||||
# unicode cache
|
||||
[chr(i) for i in range(256)]
|
||||
# int cache
|
||||
list(range(-5, 257))
|
263
third_party/python/Lib/test/libregrtest/runtest.py
vendored
Normal file
263
third_party/python/Lib/test/libregrtest/runtest.py
vendored
Normal file
|
@ -0,0 +1,263 @@
|
|||
import faulthandler
|
||||
import importlib
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import unittest
|
||||
from test import support
|
||||
from test.libregrtest.refleak import dash_R, clear_caches
|
||||
from test.libregrtest.save_env import saved_test_environment
|
||||
|
||||
|
||||
# Test result constants.
|
||||
PASSED = 1
|
||||
FAILED = 0
|
||||
ENV_CHANGED = -1
|
||||
SKIPPED = -2
|
||||
RESOURCE_DENIED = -3
|
||||
INTERRUPTED = -4
|
||||
CHILD_ERROR = -5 # error in a child process
|
||||
TEST_DID_NOT_RUN = -6 # error in a child process
|
||||
|
||||
_FORMAT_TEST_RESULT = {
|
||||
PASSED: '%s passed',
|
||||
FAILED: '%s failed',
|
||||
ENV_CHANGED: '%s failed (env changed)',
|
||||
SKIPPED: '%s skipped',
|
||||
RESOURCE_DENIED: '%s skipped (resource denied)',
|
||||
INTERRUPTED: '%s interrupted',
|
||||
CHILD_ERROR: '%s crashed',
|
||||
TEST_DID_NOT_RUN: '%s run no tests',
|
||||
}
|
||||
|
||||
# Minimum duration of a test to display its duration or to mention that
|
||||
# the test is running in background
|
||||
PROGRESS_MIN_TIME = 30.0 # seconds
|
||||
|
||||
# small set of tests to determine if we have a basically functioning interpreter
|
||||
# (i.e. if any of these fail, then anything else is likely to follow)
|
||||
STDTESTS = [
|
||||
'test_grammar',
|
||||
'test_opcodes',
|
||||
'test_dict',
|
||||
'test_builtin',
|
||||
'test_exceptions',
|
||||
'test_types',
|
||||
'test_unittest',
|
||||
'test_doctest',
|
||||
'test_doctest2',
|
||||
'test_support'
|
||||
]
|
||||
|
||||
# set of tests that we don't want to be executed when using regrtest
|
||||
NOTTESTS = set()
|
||||
|
||||
|
||||
def format_test_result(test_name, result):
|
||||
fmt = _FORMAT_TEST_RESULT.get(result, "%s")
|
||||
return fmt % test_name
|
||||
|
||||
|
||||
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
|
||||
"""Return a list of all applicable test modules."""
|
||||
testdir = findtestdir(testdir)
|
||||
names = os.listdir(testdir)
|
||||
tests = []
|
||||
others = set(stdtests) | nottests
|
||||
for name in names:
|
||||
mod, ext = os.path.splitext(name)
|
||||
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
|
||||
tests.append(mod)
|
||||
return stdtests + sorted(tests)
|
||||
|
||||
|
||||
def get_abs_module(ns, test):
|
||||
if test.startswith('test.') or ns.testdir:
|
||||
return test
|
||||
else:
|
||||
# Always import it from the test package
|
||||
return 'test.' + test
|
||||
|
||||
|
||||
def runtest(ns, test):
|
||||
"""Run a single test.
|
||||
|
||||
ns -- regrtest namespace of options
|
||||
test -- the name of the test
|
||||
|
||||
Returns the tuple (result, test_time, xml_data), where result is one
|
||||
of the constants:
|
||||
|
||||
INTERRUPTED KeyboardInterrupt when run under -j
|
||||
RESOURCE_DENIED test skipped because resource denied
|
||||
SKIPPED test skipped for some other reason
|
||||
ENV_CHANGED test failed because it changed the execution environment
|
||||
FAILED test failed
|
||||
PASSED test passed
|
||||
EMPTY_TEST_SUITE test ran no subtests.
|
||||
|
||||
If ns.xmlpath is not None, xml_data is a list containing each
|
||||
generated testsuite element.
|
||||
"""
|
||||
|
||||
output_on_failure = ns.verbose3
|
||||
|
||||
use_timeout = (ns.timeout is not None)
|
||||
if use_timeout:
|
||||
faulthandler.dump_traceback_later(ns.timeout, exit=True)
|
||||
try:
|
||||
support.set_match_tests(ns.match_tests)
|
||||
# reset the environment_altered flag to detect if a test altered
|
||||
# the environment
|
||||
support.environment_altered = False
|
||||
support.junit_xml_list = xml_list = [] if ns.xmlpath else None
|
||||
if ns.failfast:
|
||||
support.failfast = True
|
||||
if output_on_failure:
|
||||
support.verbose = True
|
||||
|
||||
stream = io.StringIO()
|
||||
orig_stdout = sys.stdout
|
||||
orig_stderr = sys.stderr
|
||||
try:
|
||||
sys.stdout = stream
|
||||
sys.stderr = stream
|
||||
result = runtest_inner(ns, test, display_failure=False)
|
||||
if result[0] != PASSED:
|
||||
output = stream.getvalue()
|
||||
orig_stderr.write(output)
|
||||
orig_stderr.flush()
|
||||
finally:
|
||||
sys.stdout = orig_stdout
|
||||
sys.stderr = orig_stderr
|
||||
else:
|
||||
support.verbose = ns.verbose # Tell tests to be moderately quiet
|
||||
result = runtest_inner(ns, test, display_failure=not ns.verbose)
|
||||
|
||||
if xml_list:
|
||||
import xml.etree.ElementTree as ET
|
||||
xml_data = [ET.tostring(x).decode('us-ascii') for x in xml_list]
|
||||
else:
|
||||
xml_data = None
|
||||
return result + (xml_data,)
|
||||
finally:
|
||||
if use_timeout:
|
||||
faulthandler.cancel_dump_traceback_later()
|
||||
cleanup_test_droppings(test, ns.verbose)
|
||||
support.junit_xml_list = None
|
||||
|
||||
|
||||
def post_test_cleanup():
|
||||
support.reap_children()
|
||||
|
||||
|
||||
def runtest_inner(ns, test, display_failure=True):
|
||||
support.unload(test)
|
||||
|
||||
test_time = 0.0
|
||||
refleak = False # True if the test leaked references.
|
||||
try:
|
||||
abstest = get_abs_module(ns, test)
|
||||
clear_caches()
|
||||
with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
|
||||
start_time = time.time()
|
||||
the_module = importlib.import_module(abstest)
|
||||
# If the test has a test_main, that will run the appropriate
|
||||
# tests. If not, use normal unittest test loading.
|
||||
test_runner = getattr(the_module, "test_main", None)
|
||||
if test_runner is None:
|
||||
def test_runner():
|
||||
loader = unittest.TestLoader()
|
||||
tests = loader.loadTestsFromModule(the_module)
|
||||
for error in loader.errors:
|
||||
print(error, file=sys.stderr)
|
||||
if loader.errors:
|
||||
raise Exception("errors while loading tests")
|
||||
support.run_unittest(tests)
|
||||
if ns.huntrleaks:
|
||||
refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
|
||||
else:
|
||||
test_runner()
|
||||
test_time = time.time() - start_time
|
||||
post_test_cleanup()
|
||||
except support.ResourceDenied as msg:
|
||||
if not ns.quiet and not ns.pgo:
|
||||
print(test, "skipped --", msg, flush=True)
|
||||
return RESOURCE_DENIED, test_time
|
||||
except unittest.SkipTest as msg:
|
||||
if not ns.quiet and not ns.pgo:
|
||||
print(test, "skipped --", msg, flush=True)
|
||||
return SKIPPED, test_time
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except support.TestFailed as msg:
|
||||
if not ns.pgo:
|
||||
if display_failure:
|
||||
print("test", test, "failed --", msg, file=sys.stderr,
|
||||
flush=True)
|
||||
else:
|
||||
print("test", test, "failed", file=sys.stderr, flush=True)
|
||||
return FAILED, test_time
|
||||
except support.TestDidNotRun:
|
||||
return TEST_DID_NOT_RUN, test_time
|
||||
except:
|
||||
msg = traceback.format_exc()
|
||||
if not ns.pgo:
|
||||
print("test", test, "crashed --", msg, file=sys.stderr,
|
||||
flush=True)
|
||||
return FAILED, test_time
|
||||
else:
|
||||
if refleak:
|
||||
return FAILED, test_time
|
||||
if environment.changed:
|
||||
return ENV_CHANGED, test_time
|
||||
return PASSED, test_time
|
||||
|
||||
|
||||
def cleanup_test_droppings(testname, verbose):
|
||||
import shutil
|
||||
import stat
|
||||
import gc
|
||||
|
||||
# First kill any dangling references to open files etc.
|
||||
# This can also issue some ResourceWarnings which would otherwise get
|
||||
# triggered during the following test run, and possibly produce failures.
|
||||
gc.collect()
|
||||
|
||||
# Try to clean up junk commonly left behind. While tests shouldn't leave
|
||||
# any files or directories behind, when a test fails that can be tedious
|
||||
# for it to arrange. The consequences can be especially nasty on Windows,
|
||||
# since if a test leaves a file open, it cannot be deleted by name (while
|
||||
# there's nothing we can do about that here either, we can display the
|
||||
# name of the offending test, which is a real help).
|
||||
for name in (support.TESTFN,
|
||||
"db_home",
|
||||
):
|
||||
if not os.path.exists(name):
|
||||
continue
|
||||
|
||||
if os.path.isdir(name):
|
||||
kind, nuker = "directory", shutil.rmtree
|
||||
elif os.path.isfile(name):
|
||||
kind, nuker = "file", os.unlink
|
||||
else:
|
||||
raise SystemError("os.path says %r exists but is neither "
|
||||
"directory nor file" % name)
|
||||
|
||||
if verbose:
|
||||
print("%r left behind %s %r" % (testname, kind, name))
|
||||
try:
|
||||
# if we have chmod, fix possible permissions problems
|
||||
# that might prevent cleanup
|
||||
if (hasattr(os, 'chmod')):
|
||||
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
|
||||
nuker(name)
|
||||
except Exception as msg:
|
||||
print(("%r left behind %s %r and it couldn't be "
|
||||
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
|
||||
|
||||
|
||||
def findtestdir(path=None):
|
||||
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
|
247
third_party/python/Lib/test/libregrtest/runtest_mp.py
vendored
Normal file
247
third_party/python/Lib/test/libregrtest/runtest_mp.py
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
import faulthandler
|
||||
import json
|
||||
import os
|
||||
import queue
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import types
|
||||
from test import support
|
||||
try:
|
||||
import threading
|
||||
except ImportError:
|
||||
print("Multiprocess option requires thread support")
|
||||
sys.exit(2)
|
||||
|
||||
from test.libregrtest.runtest import (
|
||||
runtest, INTERRUPTED, CHILD_ERROR, PROGRESS_MIN_TIME,
|
||||
format_test_result)
|
||||
from test.libregrtest.setup import setup_tests
|
||||
from test.libregrtest.utils import format_duration
|
||||
|
||||
|
||||
# Display the running tests if nothing happened last N seconds
|
||||
PROGRESS_UPDATE = 30.0 # seconds
|
||||
|
||||
# If interrupted, display the wait progress every N seconds
|
||||
WAIT_PROGRESS = 2.0 # seconds
|
||||
|
||||
|
||||
def run_test_in_subprocess(testname, ns):
|
||||
"""Run the given test in a subprocess with --worker-args.
|
||||
|
||||
ns is the option Namespace parsed from command-line arguments. regrtest
|
||||
is invoked in a subprocess with the --worker-args argument; when the
|
||||
subprocess exits, its return code, stdout and stderr are returned as a
|
||||
3-tuple.
|
||||
"""
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
ns_dict = vars(ns)
|
||||
worker_args = (ns_dict, testname)
|
||||
worker_args = json.dumps(worker_args)
|
||||
|
||||
cmd = [sys.executable, *support.args_from_interpreter_flags(),
|
||||
'-u', # Unbuffered stdout and stderr
|
||||
'-m', 'test.regrtest',
|
||||
'--worker-args', worker_args]
|
||||
if ns.pgo:
|
||||
cmd += ['--pgo']
|
||||
|
||||
# Running the child from the same working directory as regrtest's original
|
||||
# invocation ensures that TEMPDIR for the child is the same when
|
||||
# sysconfig.is_python_build() is true. See issue 15300.
|
||||
popen = Popen(cmd,
|
||||
stdout=PIPE, stderr=PIPE,
|
||||
universal_newlines=True,
|
||||
close_fds=(os.name != 'nt'),
|
||||
cwd=support.SAVEDCWD)
|
||||
with popen:
|
||||
stdout, stderr = popen.communicate()
|
||||
retcode = popen.wait()
|
||||
return retcode, stdout, stderr
|
||||
|
||||
|
||||
def run_tests_worker(worker_args):
|
||||
ns_dict, testname = json.loads(worker_args)
|
||||
ns = types.SimpleNamespace(**ns_dict)
|
||||
|
||||
setup_tests(ns)
|
||||
|
||||
try:
|
||||
result = runtest(ns, testname)
|
||||
except KeyboardInterrupt:
|
||||
result = INTERRUPTED, '', None
|
||||
except BaseException as e:
|
||||
traceback.print_exc()
|
||||
result = CHILD_ERROR, str(e)
|
||||
|
||||
print() # Force a newline (just in case)
|
||||
print(json.dumps(result), flush=True)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# We do not use a generator so multiple threads can call next().
|
||||
class MultiprocessIterator:
|
||||
|
||||
"""A thread-safe iterator over tests for multiprocess mode."""
|
||||
|
||||
def __init__(self, tests):
|
||||
self.interrupted = False
|
||||
self.lock = threading.Lock()
|
||||
self.tests = tests
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
with self.lock:
|
||||
if self.interrupted:
|
||||
raise StopIteration('tests interrupted')
|
||||
return next(self.tests)
|
||||
|
||||
|
||||
class MultiprocessThread(threading.Thread):
|
||||
def __init__(self, pending, output, ns):
|
||||
super().__init__()
|
||||
self.pending = pending
|
||||
self.output = output
|
||||
self.ns = ns
|
||||
self.current_test = None
|
||||
self.start_time = None
|
||||
|
||||
def _runtest(self):
|
||||
try:
|
||||
test = next(self.pending)
|
||||
except StopIteration:
|
||||
self.output.put((None, None, None, None))
|
||||
return True
|
||||
|
||||
try:
|
||||
self.start_time = time.monotonic()
|
||||
self.current_test = test
|
||||
|
||||
retcode, stdout, stderr = run_test_in_subprocess(test, self.ns)
|
||||
finally:
|
||||
self.current_test = None
|
||||
|
||||
if retcode != 0:
|
||||
result = (CHILD_ERROR, "Exit code %s" % retcode, None)
|
||||
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
|
||||
result))
|
||||
return False
|
||||
|
||||
stdout, _, result = stdout.strip().rpartition("\n")
|
||||
if not result:
|
||||
self.output.put((None, None, None, None))
|
||||
return True
|
||||
|
||||
result = json.loads(result)
|
||||
assert len(result) == 3, f"Invalid result tuple: {result!r}"
|
||||
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
|
||||
result))
|
||||
return False
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
stop = False
|
||||
while not stop:
|
||||
stop = self._runtest()
|
||||
except BaseException:
|
||||
self.output.put((None, None, None, None))
|
||||
raise
|
||||
|
||||
|
||||
def run_tests_multiprocess(regrtest):
|
||||
output = queue.Queue()
|
||||
pending = MultiprocessIterator(regrtest.tests)
|
||||
test_timeout = regrtest.ns.timeout
|
||||
use_timeout = (test_timeout is not None)
|
||||
|
||||
workers = [MultiprocessThread(pending, output, regrtest.ns)
|
||||
for i in range(regrtest.ns.use_mp)]
|
||||
print("Run tests in parallel using %s child processes"
|
||||
% len(workers))
|
||||
for worker in workers:
|
||||
worker.start()
|
||||
|
||||
def get_running(workers):
|
||||
running = []
|
||||
for worker in workers:
|
||||
current_test = worker.current_test
|
||||
if not current_test:
|
||||
continue
|
||||
dt = time.monotonic() - worker.start_time
|
||||
if dt >= PROGRESS_MIN_TIME:
|
||||
text = '%s (%s)' % (current_test, format_duration(dt))
|
||||
running.append(text)
|
||||
return running
|
||||
|
||||
finished = 0
|
||||
test_index = 1
|
||||
get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
|
||||
try:
|
||||
while finished < regrtest.ns.use_mp:
|
||||
if use_timeout:
|
||||
faulthandler.dump_traceback_later(test_timeout, exit=True)
|
||||
|
||||
try:
|
||||
item = output.get(timeout=get_timeout)
|
||||
except queue.Empty:
|
||||
running = get_running(workers)
|
||||
if running and not regrtest.ns.pgo:
|
||||
print('running: %s' % ', '.join(running), flush=True)
|
||||
continue
|
||||
|
||||
test, stdout, stderr, result = item
|
||||
if test is None:
|
||||
finished += 1
|
||||
continue
|
||||
regrtest.accumulate_result(test, result)
|
||||
|
||||
# Display progress
|
||||
ok, test_time, xml_data = result
|
||||
text = format_test_result(test, ok)
|
||||
if (ok not in (CHILD_ERROR, INTERRUPTED)
|
||||
and test_time >= PROGRESS_MIN_TIME
|
||||
and not regrtest.ns.pgo):
|
||||
text += ' (%s)' % format_duration(test_time)
|
||||
elif ok == CHILD_ERROR:
|
||||
text = '%s (%s)' % (text, test_time)
|
||||
running = get_running(workers)
|
||||
if running and not regrtest.ns.pgo:
|
||||
text += ' -- running: %s' % ', '.join(running)
|
||||
regrtest.display_progress(test_index, text)
|
||||
|
||||
# Copy stdout and stderr from the child process
|
||||
if stdout:
|
||||
print(stdout, flush=True)
|
||||
if stderr and not regrtest.ns.pgo:
|
||||
print(stderr, file=sys.stderr, flush=True)
|
||||
|
||||
if result[0] == INTERRUPTED:
|
||||
raise KeyboardInterrupt
|
||||
test_index += 1
|
||||
except KeyboardInterrupt:
|
||||
regrtest.interrupted = True
|
||||
pending.interrupted = True
|
||||
print()
|
||||
finally:
|
||||
if use_timeout:
|
||||
faulthandler.cancel_dump_traceback_later()
|
||||
|
||||
# If tests are interrupted, wait until tests complete
|
||||
wait_start = time.monotonic()
|
||||
while True:
|
||||
running = [worker.current_test for worker in workers]
|
||||
running = list(filter(bool, running))
|
||||
if not running:
|
||||
break
|
||||
|
||||
dt = time.monotonic() - wait_start
|
||||
line = "Waiting for %s (%s tests)" % (', '.join(running), len(running))
|
||||
if dt >= WAIT_PROGRESS:
|
||||
line = "%s since %.0f sec" % (line, dt)
|
||||
print(line, flush=True)
|
||||
for worker in workers:
|
||||
worker.join(WAIT_PROGRESS)
|
290
third_party/python/Lib/test/libregrtest/save_env.py
vendored
Normal file
290
third_party/python/Lib/test/libregrtest/save_env.py
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
import builtins
|
||||
import locale
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import sysconfig
|
||||
import warnings
|
||||
from test import support
|
||||
try:
|
||||
import threading
|
||||
except ImportError:
|
||||
threading = None
|
||||
try:
|
||||
import _multiprocessing, multiprocessing.process
|
||||
except ImportError:
|
||||
multiprocessing = None
|
||||
|
||||
|
||||
# Unit tests are supposed to leave the execution environment unchanged
|
||||
# once they complete. But sometimes tests have bugs, especially when
|
||||
# tests fail, and the changes to environment go on to mess up other
|
||||
# tests. This can cause issues with buildbot stability, since tests
|
||||
# are run in random order and so problems may appear to come and go.
|
||||
# There are a few things we can save and restore to mitigate this, and
|
||||
# the following context manager handles this task.
|
||||
|
||||
class saved_test_environment:
|
||||
"""Save bits of the test environment and restore them at block exit.
|
||||
|
||||
with saved_test_environment(testname, verbose, quiet):
|
||||
#stuff
|
||||
|
||||
Unless quiet is True, a warning is printed to stderr if any of
|
||||
the saved items was changed by the test. The attribute 'changed'
|
||||
is initially False, but is set to True if a change is detected.
|
||||
|
||||
If verbose is more than 1, the before and after state of changed
|
||||
items is also printed.
|
||||
"""
|
||||
|
||||
changed = False
|
||||
|
||||
def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
|
||||
self.testname = testname
|
||||
self.verbose = verbose
|
||||
self.quiet = quiet
|
||||
self.pgo = pgo
|
||||
|
||||
# To add things to save and restore, add a name XXX to the resources list
|
||||
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
|
||||
# return the value to be saved and compared against a second call to the
|
||||
# get function when test execution completes. restore_XXX should accept
|
||||
# the saved value and restore the resource using it. It will be called if
|
||||
# and only if a change in the value is detected.
|
||||
#
|
||||
# Note: XXX will have any '.' replaced with '_' characters when determining
|
||||
# the corresponding method names.
|
||||
|
||||
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
|
||||
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
|
||||
'warnings.filters', 'asyncore.socket_map',
|
||||
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
|
||||
'sys.warnoptions',
|
||||
# multiprocessing.process._cleanup() may release ref
|
||||
# to a thread, so check processes first.
|
||||
'multiprocessing.process._dangling', 'threading._dangling',
|
||||
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
|
||||
'files', 'locale', 'warnings.showwarning',
|
||||
'shutil_archive_formats', 'shutil_unpack_formats',
|
||||
)
|
||||
|
||||
def get_sys_argv(self):
|
||||
return id(sys.argv), sys.argv, sys.argv[:]
|
||||
def restore_sys_argv(self, saved_argv):
|
||||
sys.argv = saved_argv[1]
|
||||
sys.argv[:] = saved_argv[2]
|
||||
|
||||
def get_cwd(self):
|
||||
return os.getcwd()
|
||||
def restore_cwd(self, saved_cwd):
|
||||
os.chdir(saved_cwd)
|
||||
|
||||
def get_sys_stdout(self):
|
||||
return sys.stdout
|
||||
def restore_sys_stdout(self, saved_stdout):
|
||||
sys.stdout = saved_stdout
|
||||
|
||||
def get_sys_stderr(self):
|
||||
return sys.stderr
|
||||
def restore_sys_stderr(self, saved_stderr):
|
||||
sys.stderr = saved_stderr
|
||||
|
||||
def get_sys_stdin(self):
|
||||
return sys.stdin
|
||||
def restore_sys_stdin(self, saved_stdin):
|
||||
sys.stdin = saved_stdin
|
||||
|
||||
def get_os_environ(self):
|
||||
return id(os.environ), os.environ, dict(os.environ)
|
||||
def restore_os_environ(self, saved_environ):
|
||||
os.environ = saved_environ[1]
|
||||
os.environ.clear()
|
||||
os.environ.update(saved_environ[2])
|
||||
|
||||
def get_sys_path(self):
|
||||
return id(sys.path), sys.path, sys.path[:]
|
||||
def restore_sys_path(self, saved_path):
|
||||
sys.path = saved_path[1]
|
||||
sys.path[:] = saved_path[2]
|
||||
|
||||
def get_sys_path_hooks(self):
|
||||
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
|
||||
def restore_sys_path_hooks(self, saved_hooks):
|
||||
sys.path_hooks = saved_hooks[1]
|
||||
sys.path_hooks[:] = saved_hooks[2]
|
||||
|
||||
def get_sys_gettrace(self):
|
||||
return sys.gettrace()
|
||||
def restore_sys_gettrace(self, trace_fxn):
|
||||
sys.settrace(trace_fxn)
|
||||
|
||||
def get___import__(self):
|
||||
return builtins.__import__
|
||||
def restore___import__(self, import_):
|
||||
builtins.__import__ = import_
|
||||
|
||||
def get_warnings_filters(self):
|
||||
return id(warnings.filters), warnings.filters, warnings.filters[:]
|
||||
def restore_warnings_filters(self, saved_filters):
|
||||
warnings.filters = saved_filters[1]
|
||||
warnings.filters[:] = saved_filters[2]
|
||||
|
||||
def get_asyncore_socket_map(self):
|
||||
asyncore = sys.modules.get('asyncore')
|
||||
# XXX Making a copy keeps objects alive until __exit__ gets called.
|
||||
return asyncore and asyncore.socket_map.copy() or {}
|
||||
def restore_asyncore_socket_map(self, saved_map):
|
||||
asyncore = sys.modules.get('asyncore')
|
||||
if asyncore is not None:
|
||||
asyncore.close_all(ignore_all=True)
|
||||
asyncore.socket_map.update(saved_map)
|
||||
|
||||
def get_shutil_archive_formats(self):
|
||||
# we could call get_archives_formats() but that only returns the
|
||||
# registry keys; we want to check the values too (the functions that
|
||||
# are registered)
|
||||
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
|
||||
def restore_shutil_archive_formats(self, saved):
|
||||
shutil._ARCHIVE_FORMATS = saved[0]
|
||||
shutil._ARCHIVE_FORMATS.clear()
|
||||
shutil._ARCHIVE_FORMATS.update(saved[1])
|
||||
|
||||
def get_shutil_unpack_formats(self):
|
||||
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
|
||||
def restore_shutil_unpack_formats(self, saved):
|
||||
shutil._UNPACK_FORMATS = saved[0]
|
||||
shutil._UNPACK_FORMATS.clear()
|
||||
shutil._UNPACK_FORMATS.update(saved[1])
|
||||
|
||||
def get_logging__handlers(self):
|
||||
# _handlers is a WeakValueDictionary
|
||||
return id(logging._handlers), logging._handlers, logging._handlers.copy()
|
||||
def restore_logging__handlers(self, saved_handlers):
|
||||
# Can't easily revert the logging state
|
||||
pass
|
||||
|
||||
def get_logging__handlerList(self):
|
||||
# _handlerList is a list of weakrefs to handlers
|
||||
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
|
||||
def restore_logging__handlerList(self, saved_handlerList):
|
||||
# Can't easily revert the logging state
|
||||
pass
|
||||
|
||||
def get_sys_warnoptions(self):
|
||||
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
|
||||
def restore_sys_warnoptions(self, saved_options):
|
||||
sys.warnoptions = saved_options[1]
|
||||
sys.warnoptions[:] = saved_options[2]
|
||||
|
||||
# Controlling dangling references to Thread objects can make it easier
|
||||
# to track reference leaks.
|
||||
def get_threading__dangling(self):
|
||||
if not threading:
|
||||
return None
|
||||
# This copies the weakrefs without making any strong reference
|
||||
return threading._dangling.copy()
|
||||
def restore_threading__dangling(self, saved):
|
||||
if not threading:
|
||||
return
|
||||
threading._dangling.clear()
|
||||
threading._dangling.update(saved)
|
||||
|
||||
# Same for Process objects
|
||||
def get_multiprocessing_process__dangling(self):
|
||||
if not multiprocessing:
|
||||
return None
|
||||
# Unjoined process objects can survive after process exits
|
||||
multiprocessing.process._cleanup()
|
||||
# This copies the weakrefs without making any strong reference
|
||||
return multiprocessing.process._dangling.copy()
|
||||
def restore_multiprocessing_process__dangling(self, saved):
|
||||
if not multiprocessing:
|
||||
return
|
||||
multiprocessing.process._dangling.clear()
|
||||
multiprocessing.process._dangling.update(saved)
|
||||
|
||||
def get_sysconfig__CONFIG_VARS(self):
|
||||
# make sure the dict is initialized
|
||||
sysconfig.get_config_var('prefix')
|
||||
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
|
||||
dict(sysconfig._CONFIG_VARS))
|
||||
def restore_sysconfig__CONFIG_VARS(self, saved):
|
||||
sysconfig._CONFIG_VARS = saved[1]
|
||||
sysconfig._CONFIG_VARS.clear()
|
||||
sysconfig._CONFIG_VARS.update(saved[2])
|
||||
|
||||
def get_sysconfig__INSTALL_SCHEMES(self):
|
||||
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
|
||||
sysconfig._INSTALL_SCHEMES.copy())
|
||||
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
|
||||
sysconfig._INSTALL_SCHEMES = saved[1]
|
||||
sysconfig._INSTALL_SCHEMES.clear()
|
||||
sysconfig._INSTALL_SCHEMES.update(saved[2])
|
||||
|
||||
def get_files(self):
|
||||
return sorted(fn + ('/' if os.path.isdir(fn) else '')
|
||||
for fn in os.listdir())
|
||||
def restore_files(self, saved_value):
|
||||
fn = support.TESTFN
|
||||
if fn not in saved_value and (fn + '/') not in saved_value:
|
||||
if os.path.isfile(fn):
|
||||
support.unlink(fn)
|
||||
elif os.path.isdir(fn):
|
||||
support.rmtree(fn)
|
||||
|
||||
_lc = [getattr(locale, lc) for lc in dir(locale)
|
||||
if lc.startswith('LC_')]
|
||||
def get_locale(self):
|
||||
pairings = []
|
||||
for lc in self._lc:
|
||||
try:
|
||||
pairings.append((lc, locale.setlocale(lc, None)))
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
return pairings
|
||||
def restore_locale(self, saved):
|
||||
for lc, setting in saved:
|
||||
locale.setlocale(lc, setting)
|
||||
|
||||
def get_warnings_showwarning(self):
|
||||
return warnings.showwarning
|
||||
def restore_warnings_showwarning(self, fxn):
|
||||
warnings.showwarning = fxn
|
||||
|
||||
def resource_info(self):
|
||||
for name in self.resources:
|
||||
method_suffix = name.replace('.', '_')
|
||||
get_name = 'get_' + method_suffix
|
||||
restore_name = 'restore_' + method_suffix
|
||||
yield name, getattr(self, get_name), getattr(self, restore_name)
|
||||
|
||||
def __enter__(self):
|
||||
self.saved_values = dict((name, get()) for name, get, restore
|
||||
in self.resource_info())
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
saved_values = self.saved_values
|
||||
del self.saved_values
|
||||
|
||||
# Some resources use weak references
|
||||
support.gc_collect()
|
||||
|
||||
# Read support.environment_altered, set by support helper functions
|
||||
self.changed |= support.environment_altered
|
||||
|
||||
for name, get, restore in self.resource_info():
|
||||
current = get()
|
||||
original = saved_values.pop(name)
|
||||
# Check for changes to the resource's value
|
||||
if current != original:
|
||||
self.changed = True
|
||||
restore(original)
|
||||
if not self.quiet and not self.pgo:
|
||||
print(f"Warning -- {name} was modified by {self.testname}",
|
||||
file=sys.stderr, flush=True)
|
||||
print(f" Before: {original}\n After: {current} ",
|
||||
file=sys.stderr, flush=True)
|
||||
return False
|
124
third_party/python/Lib/test/libregrtest/setup.py
vendored
Normal file
124
third_party/python/Lib/test/libregrtest/setup.py
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
import atexit
|
||||
import faulthandler
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import unittest
|
||||
from test import support
|
||||
try:
|
||||
import gc
|
||||
except ImportError:
|
||||
gc = None
|
||||
|
||||
from test.libregrtest.refleak import warm_caches
|
||||
|
||||
|
||||
def setup_tests(ns):
|
||||
try:
|
||||
stderr_fd = sys.__stderr__.fileno()
|
||||
except (ValueError, AttributeError):
|
||||
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
|
||||
# and ValueError on a closed stream.
|
||||
#
|
||||
# Catch AttributeError for stderr being None.
|
||||
stderr_fd = None
|
||||
else:
|
||||
# Display the Python traceback on fatal errors (e.g. segfault)
|
||||
faulthandler.enable(all_threads=True, file=stderr_fd)
|
||||
|
||||
# Display the Python traceback on SIGALRM or SIGUSR1 signal
|
||||
signals = []
|
||||
if hasattr(signal, 'SIGALRM'):
|
||||
signals.append(signal.SIGALRM)
|
||||
if hasattr(signal, 'SIGUSR1'):
|
||||
signals.append(signal.SIGUSR1)
|
||||
for signum in signals:
|
||||
faulthandler.register(signum, chain=True, file=stderr_fd)
|
||||
|
||||
replace_stdout()
|
||||
support.record_original_stdout(sys.stdout)
|
||||
|
||||
if ns.testdir:
|
||||
# Prepend test directory to sys.path, so runtest() will be able
|
||||
# to locate tests
|
||||
sys.path.insert(0, os.path.abspath(ns.testdir))
|
||||
|
||||
# Some times __path__ and __file__ are not absolute (e.g. while running from
|
||||
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
|
||||
# imports might fail. This affects only the modules imported before os.chdir().
|
||||
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
|
||||
# they are found in the CWD their __file__ and __path__ will be relative (this
|
||||
# happens before the chdir). All the modules imported after the chdir, are
|
||||
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
|
||||
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
|
||||
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
|
||||
# the packages to prevent later imports to fail when the CWD is different.
|
||||
for module in sys.modules.values():
|
||||
if hasattr(module, '__path__'):
|
||||
for index, path in enumerate(module.__path__):
|
||||
module.__path__[index] = os.path.abspath(path)
|
||||
if getattr(module, '__file__', None):
|
||||
module.__file__ = os.path.abspath(module.__file__)
|
||||
|
||||
if ns.huntrleaks:
|
||||
unittest.BaseTestSuite._cleanup = False
|
||||
|
||||
# Avoid false positives due to various caches
|
||||
# filling slowly with random data:
|
||||
warm_caches()
|
||||
|
||||
if ns.memlimit is not None:
|
||||
support.set_memlimit(ns.memlimit)
|
||||
|
||||
if ns.threshold is not None:
|
||||
gc.set_threshold(ns.threshold)
|
||||
|
||||
try:
|
||||
import msvcrt
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
|
||||
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
|
||||
msvcrt.SEM_NOGPFAULTERRORBOX|
|
||||
msvcrt.SEM_NOOPENFILEERRORBOX)
|
||||
try:
|
||||
msvcrt.CrtSetReportMode
|
||||
except AttributeError:
|
||||
# release build
|
||||
pass
|
||||
else:
|
||||
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
|
||||
if ns.verbose and ns.verbose >= 2:
|
||||
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
|
||||
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
|
||||
else:
|
||||
msvcrt.CrtSetReportMode(m, 0)
|
||||
|
||||
support.use_resources = ns.use_resources
|
||||
|
||||
|
||||
def replace_stdout():
|
||||
"""Set stdout encoder error handler to backslashreplace (as stderr error
|
||||
handler) to avoid UnicodeEncodeError when printing a traceback"""
|
||||
stdout = sys.stdout
|
||||
try:
|
||||
fd = stdout.fileno()
|
||||
except ValueError:
|
||||
# On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
|
||||
# object. Leaving sys.stdout unchanged.
|
||||
#
|
||||
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
|
||||
# and ValueError on a closed stream.
|
||||
return
|
||||
|
||||
sys.stdout = open(fd, 'w',
|
||||
encoding=stdout.encoding,
|
||||
errors="backslashreplace",
|
||||
closefd=False,
|
||||
newline='\n')
|
||||
|
||||
def restore_stdout():
|
||||
sys.stdout.close()
|
||||
sys.stdout = stdout
|
||||
atexit.register(restore_stdout)
|
56
third_party/python/Lib/test/libregrtest/utils.py
vendored
Normal file
56
third_party/python/Lib/test/libregrtest/utils.py
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
import os.path
|
||||
import math
|
||||
import textwrap
|
||||
|
||||
|
||||
def format_duration(seconds):
|
||||
ms = math.ceil(seconds * 1e3)
|
||||
seconds, ms = divmod(ms, 1000)
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
|
||||
parts = []
|
||||
if hours:
|
||||
parts.append('%s hour' % hours)
|
||||
if minutes:
|
||||
parts.append('%s min' % minutes)
|
||||
if seconds:
|
||||
parts.append('%s sec' % seconds)
|
||||
if ms:
|
||||
parts.append('%s ms' % ms)
|
||||
if not parts:
|
||||
return '0 ms'
|
||||
|
||||
parts = parts[:2]
|
||||
return ' '.join(parts)
|
||||
|
||||
|
||||
def removepy(names):
|
||||
if not names:
|
||||
return
|
||||
for idx, name in enumerate(names):
|
||||
basename, ext = os.path.splitext(name)
|
||||
if ext == '.py':
|
||||
names[idx] = basename
|
||||
|
||||
|
||||
def count(n, word):
|
||||
if n == 1:
|
||||
return "%d %s" % (n, word)
|
||||
else:
|
||||
return "%d %ss" % (n, word)
|
||||
|
||||
|
||||
def printlist(x, width=70, indent=4, file=None):
|
||||
"""Print the elements of iterable x to stdout.
|
||||
|
||||
Optional arg width (default 70) is the maximum line length.
|
||||
Optional arg indent (default 4) is the number of blanks with which to
|
||||
begin each line.
|
||||
"""
|
||||
|
||||
blanks = ' ' * indent
|
||||
# Print the sorted list: 'x' may be a '--random' list or a set()
|
||||
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
|
||||
initial_indent=blanks, subsequent_indent=blanks),
|
||||
file=file)
|
Loading…
Add table
Add a link
Reference in a new issue