mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-05-22 21:32:31 +00:00
python-3.6.zip added from Github
README.cosmo contains the necessary links.
This commit is contained in:
parent
75fc601ff5
commit
0c4c56ff39
4219 changed files with 1968626 additions and 0 deletions
140
third_party/python/Lib/__future__.py
vendored
Normal file
140
third_party/python/Lib/__future__.py
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
"""Record of phased-in incompatible language changes.
|
||||
|
||||
Each line is of the form:
|
||||
|
||||
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
|
||||
CompilerFlag ")"
|
||||
|
||||
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
|
||||
of the same form as sys.version_info:
|
||||
|
||||
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
|
||||
PY_MINOR_VERSION, # the 1; an int
|
||||
PY_MICRO_VERSION, # the 0; an int
|
||||
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
|
||||
PY_RELEASE_SERIAL # the 3; an int
|
||||
)
|
||||
|
||||
OptionalRelease records the first release in which
|
||||
|
||||
from __future__ import FeatureName
|
||||
|
||||
was accepted.
|
||||
|
||||
In the case of MandatoryReleases that have not yet occurred,
|
||||
MandatoryRelease predicts the release in which the feature will become part
|
||||
of the language.
|
||||
|
||||
Else MandatoryRelease records when the feature became part of the language;
|
||||
in releases at or after that, modules no longer need
|
||||
|
||||
from __future__ import FeatureName
|
||||
|
||||
to use the feature in question, but may continue to use such imports.
|
||||
|
||||
MandatoryRelease may also be None, meaning that a planned feature got
|
||||
dropped.
|
||||
|
||||
Instances of class _Feature have two corresponding methods,
|
||||
.getOptionalRelease() and .getMandatoryRelease().
|
||||
|
||||
CompilerFlag is the (bitfield) flag that should be passed in the fourth
|
||||
argument to the builtin function compile() to enable the feature in
|
||||
dynamically compiled code. This flag is stored in the .compiler_flag
|
||||
attribute on _Future instances. These values must match the appropriate
|
||||
#defines of CO_xxx flags in Include/compile.h.
|
||||
|
||||
No feature line is ever to be deleted from this file.
|
||||
"""
|
||||
|
||||
all_feature_names = [
|
||||
"nested_scopes",
|
||||
"generators",
|
||||
"division",
|
||||
"absolute_import",
|
||||
"with_statement",
|
||||
"print_function",
|
||||
"unicode_literals",
|
||||
"barry_as_FLUFL",
|
||||
"generator_stop",
|
||||
]
|
||||
|
||||
__all__ = ["all_feature_names"] + all_feature_names
|
||||
|
||||
# The CO_xxx symbols are defined here under the same names used by
|
||||
# compile.h, so that an editor search will find them here. However,
|
||||
# they're not exported in __all__, because they don't really belong to
|
||||
# this module.
|
||||
CO_NESTED = 0x0010 # nested_scopes
|
||||
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
|
||||
CO_FUTURE_DIVISION = 0x2000 # division
|
||||
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
|
||||
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
|
||||
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
|
||||
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
|
||||
CO_FUTURE_BARRY_AS_BDFL = 0x40000
|
||||
CO_FUTURE_GENERATOR_STOP = 0x80000 # StopIteration becomes RuntimeError in generators
|
||||
|
||||
class _Feature:
|
||||
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
|
||||
self.optional = optionalRelease
|
||||
self.mandatory = mandatoryRelease
|
||||
self.compiler_flag = compiler_flag
|
||||
|
||||
def getOptionalRelease(self):
|
||||
"""Return first release in which this feature was recognized.
|
||||
|
||||
This is a 5-tuple, of the same form as sys.version_info.
|
||||
"""
|
||||
|
||||
return self.optional
|
||||
|
||||
def getMandatoryRelease(self):
|
||||
"""Return release in which this feature will become mandatory.
|
||||
|
||||
This is a 5-tuple, of the same form as sys.version_info, or, if
|
||||
the feature was dropped, is None.
|
||||
"""
|
||||
|
||||
return self.mandatory
|
||||
|
||||
def __repr__(self):
|
||||
return "_Feature" + repr((self.optional,
|
||||
self.mandatory,
|
||||
self.compiler_flag))
|
||||
|
||||
nested_scopes = _Feature((2, 1, 0, "beta", 1),
|
||||
(2, 2, 0, "alpha", 0),
|
||||
CO_NESTED)
|
||||
|
||||
generators = _Feature((2, 2, 0, "alpha", 1),
|
||||
(2, 3, 0, "final", 0),
|
||||
CO_GENERATOR_ALLOWED)
|
||||
|
||||
division = _Feature((2, 2, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_DIVISION)
|
||||
|
||||
absolute_import = _Feature((2, 5, 0, "alpha", 1),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_ABSOLUTE_IMPORT)
|
||||
|
||||
with_statement = _Feature((2, 5, 0, "alpha", 1),
|
||||
(2, 6, 0, "alpha", 0),
|
||||
CO_FUTURE_WITH_STATEMENT)
|
||||
|
||||
print_function = _Feature((2, 6, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_PRINT_FUNCTION)
|
||||
|
||||
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_UNICODE_LITERALS)
|
||||
|
||||
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
|
||||
(3, 9, 0, "alpha", 0),
|
||||
CO_FUTURE_BARRY_AS_BDFL)
|
||||
|
||||
generator_stop = _Feature((3, 5, 0, "beta", 1),
|
||||
(3, 7, 0, "alpha", 0),
|
||||
CO_FUTURE_GENERATOR_STOP)
|
1
third_party/python/Lib/__phello__.foo.py
vendored
Normal file
1
third_party/python/Lib/__phello__.foo.py
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
# This file exists as a helper for the test.test_frozen module.
|
34
third_party/python/Lib/_bootlocale.py
vendored
Normal file
34
third_party/python/Lib/_bootlocale.py
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
"""A minimal subset of the locale module used at interpreter startup
|
||||
(imported by the _io module), in order to reduce startup time.
|
||||
|
||||
Don't import directly from third-party code; use the `locale` module instead!
|
||||
"""
|
||||
|
||||
import sys
|
||||
import _locale
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
return _locale._getdefaultlocale()[1]
|
||||
else:
|
||||
try:
|
||||
_locale.CODESET
|
||||
except AttributeError:
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
# This path for legacy systems needs the more complex
|
||||
# getdefaultlocale() function, import the full locale module.
|
||||
import locale
|
||||
return locale.getpreferredencoding(do_setlocale)
|
||||
else:
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
assert not do_setlocale
|
||||
result = _locale.nl_langinfo(_locale.CODESET)
|
||||
if not result and sys.platform == 'darwin':
|
||||
# nl_langinfo can return an empty string
|
||||
# when the setting has an invalid value.
|
||||
# Default to UTF-8 in that case because
|
||||
# UTF-8 is the default charset on OSX and
|
||||
# returning nothing will crash the
|
||||
# interpreter.
|
||||
result = 'UTF-8'
|
||||
return result
|
1011
third_party/python/Lib/_collections_abc.py
vendored
Normal file
1011
third_party/python/Lib/_collections_abc.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
251
third_party/python/Lib/_compat_pickle.py
vendored
Normal file
251
third_party/python/Lib/_compat_pickle.py
vendored
Normal file
|
@ -0,0 +1,251 @@
|
|||
# This module is used to map the old Python 2 names to the new names used in
|
||||
# Python 3 for the pickle module. This needed to make pickle streams
|
||||
# generated with Python 2 loadable by Python 3.
|
||||
|
||||
# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
|
||||
# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
|
||||
# Thus, this could cause the module to be imported recursively.
|
||||
IMPORT_MAPPING = {
|
||||
'__builtin__' : 'builtins',
|
||||
'copy_reg': 'copyreg',
|
||||
'Queue': 'queue',
|
||||
'SocketServer': 'socketserver',
|
||||
'ConfigParser': 'configparser',
|
||||
'repr': 'reprlib',
|
||||
'tkFileDialog': 'tkinter.filedialog',
|
||||
'tkSimpleDialog': 'tkinter.simpledialog',
|
||||
'tkColorChooser': 'tkinter.colorchooser',
|
||||
'tkCommonDialog': 'tkinter.commondialog',
|
||||
'Dialog': 'tkinter.dialog',
|
||||
'Tkdnd': 'tkinter.dnd',
|
||||
'tkFont': 'tkinter.font',
|
||||
'tkMessageBox': 'tkinter.messagebox',
|
||||
'ScrolledText': 'tkinter.scrolledtext',
|
||||
'Tkconstants': 'tkinter.constants',
|
||||
'Tix': 'tkinter.tix',
|
||||
'ttk': 'tkinter.ttk',
|
||||
'Tkinter': 'tkinter',
|
||||
'markupbase': '_markupbase',
|
||||
'_winreg': 'winreg',
|
||||
'thread': '_thread',
|
||||
'dummy_thread': '_dummy_thread',
|
||||
'dbhash': 'dbm.bsd',
|
||||
'dumbdbm': 'dbm.dumb',
|
||||
'dbm': 'dbm.ndbm',
|
||||
'gdbm': 'dbm.gnu',
|
||||
'xmlrpclib': 'xmlrpc.client',
|
||||
'SimpleXMLRPCServer': 'xmlrpc.server',
|
||||
'httplib': 'http.client',
|
||||
'htmlentitydefs' : 'html.entities',
|
||||
'HTMLParser' : 'html.parser',
|
||||
'Cookie': 'http.cookies',
|
||||
'cookielib': 'http.cookiejar',
|
||||
'BaseHTTPServer': 'http.server',
|
||||
'test.test_support': 'test.support',
|
||||
'commands': 'subprocess',
|
||||
'urlparse' : 'urllib.parse',
|
||||
'robotparser' : 'urllib.robotparser',
|
||||
'urllib2': 'urllib.request',
|
||||
'anydbm': 'dbm',
|
||||
'_abcoll' : 'collections.abc',
|
||||
}
|
||||
|
||||
|
||||
# This contains rename rules that are easy to handle. We ignore the more
|
||||
# complex stuff (e.g. mapping the names in the urllib and types modules).
|
||||
# These rules should be run before import names are fixed.
|
||||
NAME_MAPPING = {
|
||||
('__builtin__', 'xrange'): ('builtins', 'range'),
|
||||
('__builtin__', 'reduce'): ('functools', 'reduce'),
|
||||
('__builtin__', 'intern'): ('sys', 'intern'),
|
||||
('__builtin__', 'unichr'): ('builtins', 'chr'),
|
||||
('__builtin__', 'unicode'): ('builtins', 'str'),
|
||||
('__builtin__', 'long'): ('builtins', 'int'),
|
||||
('itertools', 'izip'): ('builtins', 'zip'),
|
||||
('itertools', 'imap'): ('builtins', 'map'),
|
||||
('itertools', 'ifilter'): ('builtins', 'filter'),
|
||||
('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
|
||||
('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
|
||||
('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
|
||||
('UserList', 'UserList'): ('collections', 'UserList'),
|
||||
('UserString', 'UserString'): ('collections', 'UserString'),
|
||||
('whichdb', 'whichdb'): ('dbm', 'whichdb'),
|
||||
('_socket', 'fromfd'): ('socket', 'fromfd'),
|
||||
('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
|
||||
('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
|
||||
('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
|
||||
('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
|
||||
('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
|
||||
('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
|
||||
('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
|
||||
('urllib', 'quote'): ('urllib.parse', 'quote'),
|
||||
('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
|
||||
('urllib', 'unquote'): ('urllib.parse', 'unquote'),
|
||||
('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
|
||||
('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
|
||||
('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
|
||||
('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
|
||||
('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
|
||||
('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
|
||||
('urllib2', 'URLError'): ('urllib.error', 'URLError'),
|
||||
}
|
||||
|
||||
PYTHON2_EXCEPTIONS = (
|
||||
"ArithmeticError",
|
||||
"AssertionError",
|
||||
"AttributeError",
|
||||
"BaseException",
|
||||
"BufferError",
|
||||
"BytesWarning",
|
||||
"DeprecationWarning",
|
||||
"EOFError",
|
||||
"EnvironmentError",
|
||||
"Exception",
|
||||
"FloatingPointError",
|
||||
"FutureWarning",
|
||||
"GeneratorExit",
|
||||
"IOError",
|
||||
"ImportError",
|
||||
"ImportWarning",
|
||||
"IndentationError",
|
||||
"IndexError",
|
||||
"KeyError",
|
||||
"KeyboardInterrupt",
|
||||
"LookupError",
|
||||
"MemoryError",
|
||||
"NameError",
|
||||
"NotImplementedError",
|
||||
"OSError",
|
||||
"OverflowError",
|
||||
"PendingDeprecationWarning",
|
||||
"ReferenceError",
|
||||
"RuntimeError",
|
||||
"RuntimeWarning",
|
||||
# StandardError is gone in Python 3, so we map it to Exception
|
||||
"StopIteration",
|
||||
"SyntaxError",
|
||||
"SyntaxWarning",
|
||||
"SystemError",
|
||||
"SystemExit",
|
||||
"TabError",
|
||||
"TypeError",
|
||||
"UnboundLocalError",
|
||||
"UnicodeDecodeError",
|
||||
"UnicodeEncodeError",
|
||||
"UnicodeError",
|
||||
"UnicodeTranslateError",
|
||||
"UnicodeWarning",
|
||||
"UserWarning",
|
||||
"ValueError",
|
||||
"Warning",
|
||||
"ZeroDivisionError",
|
||||
)
|
||||
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
pass
|
||||
else:
|
||||
PYTHON2_EXCEPTIONS += ("WindowsError",)
|
||||
|
||||
for excname in PYTHON2_EXCEPTIONS:
|
||||
NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
|
||||
|
||||
MULTIPROCESSING_EXCEPTIONS = (
|
||||
'AuthenticationError',
|
||||
'BufferTooShort',
|
||||
'ProcessError',
|
||||
'TimeoutError',
|
||||
)
|
||||
|
||||
for excname in MULTIPROCESSING_EXCEPTIONS:
|
||||
NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
|
||||
|
||||
# Same, but for 3.x to 2.x
|
||||
REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
|
||||
assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
|
||||
REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
|
||||
assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
|
||||
|
||||
# Non-mutual mappings.
|
||||
|
||||
IMPORT_MAPPING.update({
|
||||
'cPickle': 'pickle',
|
||||
'_elementtree': 'xml.etree.ElementTree',
|
||||
'FileDialog': 'tkinter.filedialog',
|
||||
'SimpleDialog': 'tkinter.simpledialog',
|
||||
'DocXMLRPCServer': 'xmlrpc.server',
|
||||
'SimpleHTTPServer': 'http.server',
|
||||
'CGIHTTPServer': 'http.server',
|
||||
# For compatibility with broken pickles saved in old Python 3 versions
|
||||
'UserDict': 'collections',
|
||||
'UserList': 'collections',
|
||||
'UserString': 'collections',
|
||||
'whichdb': 'dbm',
|
||||
'StringIO': 'io',
|
||||
'cStringIO': 'io',
|
||||
})
|
||||
|
||||
REVERSE_IMPORT_MAPPING.update({
|
||||
'_bz2': 'bz2',
|
||||
'_dbm': 'dbm',
|
||||
'_functools': 'functools',
|
||||
'_gdbm': 'gdbm',
|
||||
'_pickle': 'pickle',
|
||||
})
|
||||
|
||||
NAME_MAPPING.update({
|
||||
('__builtin__', 'basestring'): ('builtins', 'str'),
|
||||
('exceptions', 'StandardError'): ('builtins', 'Exception'),
|
||||
('UserDict', 'UserDict'): ('collections', 'UserDict'),
|
||||
('socket', '_socketobject'): ('socket', 'SocketType'),
|
||||
})
|
||||
|
||||
REVERSE_NAME_MAPPING.update({
|
||||
('_functools', 'reduce'): ('__builtin__', 'reduce'),
|
||||
('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
|
||||
('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
|
||||
('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
|
||||
('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
|
||||
('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
|
||||
('xmlrpc.server', 'XMLRPCDocGenerator'):
|
||||
('DocXMLRPCServer', 'XMLRPCDocGenerator'),
|
||||
('xmlrpc.server', 'DocXMLRPCRequestHandler'):
|
||||
('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
|
||||
('xmlrpc.server', 'DocXMLRPCServer'):
|
||||
('DocXMLRPCServer', 'DocXMLRPCServer'),
|
||||
('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
|
||||
('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
|
||||
('http.server', 'SimpleHTTPRequestHandler'):
|
||||
('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
|
||||
('http.server', 'CGIHTTPRequestHandler'):
|
||||
('CGIHTTPServer', 'CGIHTTPRequestHandler'),
|
||||
('_socket', 'socket'): ('socket', '_socketobject'),
|
||||
})
|
||||
|
||||
PYTHON3_OSERROR_EXCEPTIONS = (
|
||||
'BrokenPipeError',
|
||||
'ChildProcessError',
|
||||
'ConnectionAbortedError',
|
||||
'ConnectionError',
|
||||
'ConnectionRefusedError',
|
||||
'ConnectionResetError',
|
||||
'FileExistsError',
|
||||
'FileNotFoundError',
|
||||
'InterruptedError',
|
||||
'IsADirectoryError',
|
||||
'NotADirectoryError',
|
||||
'PermissionError',
|
||||
'ProcessLookupError',
|
||||
'TimeoutError',
|
||||
)
|
||||
|
||||
for excname in PYTHON3_OSERROR_EXCEPTIONS:
|
||||
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
|
||||
|
||||
PYTHON3_IMPORTERROR_EXCEPTIONS = (
|
||||
'ModuleNotFoundError',
|
||||
)
|
||||
|
||||
for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
|
||||
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
|
152
third_party/python/Lib/_compression.py
vendored
Normal file
152
third_party/python/Lib/_compression.py
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
"""Internal classes used by the gzip, lzma and bz2 modules"""
|
||||
|
||||
import io
|
||||
|
||||
|
||||
BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size
|
||||
|
||||
|
||||
class BaseStream(io.BufferedIOBase):
|
||||
"""Mode-checking helper functions."""
|
||||
|
||||
def _check_not_closed(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
def _check_can_read(self):
|
||||
if not self.readable():
|
||||
raise io.UnsupportedOperation("File not open for reading")
|
||||
|
||||
def _check_can_write(self):
|
||||
if not self.writable():
|
||||
raise io.UnsupportedOperation("File not open for writing")
|
||||
|
||||
def _check_can_seek(self):
|
||||
if not self.readable():
|
||||
raise io.UnsupportedOperation("Seeking is only supported "
|
||||
"on files open for reading")
|
||||
if not self.seekable():
|
||||
raise io.UnsupportedOperation("The underlying file object "
|
||||
"does not support seeking")
|
||||
|
||||
|
||||
class DecompressReader(io.RawIOBase):
|
||||
"""Adapts the decompressor API to a RawIOBase reader API"""
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args):
|
||||
self._fp = fp
|
||||
self._eof = False
|
||||
self._pos = 0 # Current offset in decompressed stream
|
||||
|
||||
# Set to size of decompressed stream once it is known, for SEEK_END
|
||||
self._size = -1
|
||||
|
||||
# Save the decompressor factory and arguments.
|
||||
# If the file contains multiple compressed streams, each
|
||||
# stream will need a separate decompressor object. A new decompressor
|
||||
# object is also needed when implementing a backwards seek().
|
||||
self._decomp_factory = decomp_factory
|
||||
self._decomp_args = decomp_args
|
||||
self._decompressor = self._decomp_factory(**self._decomp_args)
|
||||
|
||||
# Exception class to catch from decompressor signifying invalid
|
||||
# trailing data to ignore
|
||||
self._trailing_error = trailing_error
|
||||
|
||||
def close(self):
|
||||
self._decompressor = None
|
||||
return super().close()
|
||||
|
||||
def seekable(self):
|
||||
return self._fp.seekable()
|
||||
|
||||
def readinto(self, b):
|
||||
with memoryview(b) as view, view.cast("B") as byte_view:
|
||||
data = self.read(len(byte_view))
|
||||
byte_view[:len(data)] = data
|
||||
return len(data)
|
||||
|
||||
def read(self, size=-1):
|
||||
if size < 0:
|
||||
return self.readall()
|
||||
|
||||
if not size or self._eof:
|
||||
return b""
|
||||
data = None # Default if EOF is encountered
|
||||
# Depending on the input data, our call to the decompressor may not
|
||||
# return any data. In this case, try again after reading another block.
|
||||
while True:
|
||||
if self._decompressor.eof:
|
||||
rawblock = (self._decompressor.unused_data or
|
||||
self._fp.read(BUFFER_SIZE))
|
||||
if not rawblock:
|
||||
break
|
||||
# Continue to next stream.
|
||||
self._decompressor = self._decomp_factory(
|
||||
**self._decomp_args)
|
||||
try:
|
||||
data = self._decompressor.decompress(rawblock, size)
|
||||
except self._trailing_error:
|
||||
# Trailing data isn't a valid compressed stream; ignore it.
|
||||
break
|
||||
else:
|
||||
if self._decompressor.needs_input:
|
||||
rawblock = self._fp.read(BUFFER_SIZE)
|
||||
if not rawblock:
|
||||
raise EOFError("Compressed file ended before the "
|
||||
"end-of-stream marker was reached")
|
||||
else:
|
||||
rawblock = b""
|
||||
data = self._decompressor.decompress(rawblock, size)
|
||||
if data:
|
||||
break
|
||||
if not data:
|
||||
self._eof = True
|
||||
self._size = self._pos
|
||||
return b""
|
||||
self._pos += len(data)
|
||||
return data
|
||||
|
||||
# Rewind the file to the beginning of the data stream.
|
||||
def _rewind(self):
|
||||
self._fp.seek(0)
|
||||
self._eof = False
|
||||
self._pos = 0
|
||||
self._decompressor = self._decomp_factory(**self._decomp_args)
|
||||
|
||||
def seek(self, offset, whence=io.SEEK_SET):
|
||||
# Recalculate offset as an absolute file position.
|
||||
if whence == io.SEEK_SET:
|
||||
pass
|
||||
elif whence == io.SEEK_CUR:
|
||||
offset = self._pos + offset
|
||||
elif whence == io.SEEK_END:
|
||||
# Seeking relative to EOF - we need to know the file's size.
|
||||
if self._size < 0:
|
||||
while self.read(io.DEFAULT_BUFFER_SIZE):
|
||||
pass
|
||||
offset = self._size + offset
|
||||
else:
|
||||
raise ValueError("Invalid value for whence: {}".format(whence))
|
||||
|
||||
# Make it so that offset is the number of bytes to skip forward.
|
||||
if offset < self._pos:
|
||||
self._rewind()
|
||||
else:
|
||||
offset -= self._pos
|
||||
|
||||
# Read and discard data until we reach the desired position.
|
||||
while offset > 0:
|
||||
data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset))
|
||||
if not data:
|
||||
break
|
||||
offset -= len(data)
|
||||
|
||||
return self._pos
|
||||
|
||||
def tell(self):
|
||||
"""Return the current file position."""
|
||||
return self._pos
|
163
third_party/python/Lib/_dummy_thread.py
vendored
Normal file
163
third_party/python/Lib/_dummy_thread.py
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
"""Drop-in replacement for the thread module.
|
||||
|
||||
Meant to be used as a brain-dead substitute so that threaded code does
|
||||
not need to be rewritten for when the thread module is not present.
|
||||
|
||||
Suggested usage is::
|
||||
|
||||
try:
|
||||
import _thread
|
||||
except ImportError:
|
||||
import _dummy_thread as _thread
|
||||
|
||||
"""
|
||||
# Exports only things specified by thread documentation;
|
||||
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
|
||||
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
|
||||
'interrupt_main', 'LockType']
|
||||
|
||||
# A dummy value
|
||||
TIMEOUT_MAX = 2**31
|
||||
|
||||
# NOTE: this module can be imported early in the extension building process,
|
||||
# and so top level imports of other modules should be avoided. Instead, all
|
||||
# imports are done when needed on a function-by-function basis. Since threads
|
||||
# are disabled, the import lock should not be an issue anyway (??).
|
||||
|
||||
error = RuntimeError
|
||||
|
||||
def start_new_thread(function, args, kwargs={}):
|
||||
"""Dummy implementation of _thread.start_new_thread().
|
||||
|
||||
Compatibility is maintained by making sure that ``args`` is a
|
||||
tuple and ``kwargs`` is a dictionary. If an exception is raised
|
||||
and it is SystemExit (which can be done by _thread.exit()) it is
|
||||
caught and nothing is done; all other exceptions are printed out
|
||||
by using traceback.print_exc().
|
||||
|
||||
If the executed function calls interrupt_main the KeyboardInterrupt will be
|
||||
raised when the function returns.
|
||||
|
||||
"""
|
||||
if type(args) != type(tuple()):
|
||||
raise TypeError("2nd arg must be a tuple")
|
||||
if type(kwargs) != type(dict()):
|
||||
raise TypeError("3rd arg must be a dict")
|
||||
global _main
|
||||
_main = False
|
||||
try:
|
||||
function(*args, **kwargs)
|
||||
except SystemExit:
|
||||
pass
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
_main = True
|
||||
global _interrupt
|
||||
if _interrupt:
|
||||
_interrupt = False
|
||||
raise KeyboardInterrupt
|
||||
|
||||
def exit():
|
||||
"""Dummy implementation of _thread.exit()."""
|
||||
raise SystemExit
|
||||
|
||||
def get_ident():
|
||||
"""Dummy implementation of _thread.get_ident().
|
||||
|
||||
Since this module should only be used when _threadmodule is not
|
||||
available, it is safe to assume that the current process is the
|
||||
only thread. Thus a constant can be safely returned.
|
||||
"""
|
||||
return -1
|
||||
|
||||
def allocate_lock():
|
||||
"""Dummy implementation of _thread.allocate_lock()."""
|
||||
return LockType()
|
||||
|
||||
def stack_size(size=None):
|
||||
"""Dummy implementation of _thread.stack_size()."""
|
||||
if size is not None:
|
||||
raise error("setting thread stack size not supported")
|
||||
return 0
|
||||
|
||||
def _set_sentinel():
|
||||
"""Dummy implementation of _thread._set_sentinel()."""
|
||||
return LockType()
|
||||
|
||||
class LockType(object):
|
||||
"""Class implementing dummy implementation of _thread.LockType.
|
||||
|
||||
Compatibility is maintained by maintaining self.locked_status
|
||||
which is a boolean that stores the state of the lock. Pickling of
|
||||
the lock, though, should not be done since if the _thread module is
|
||||
then used with an unpickled ``lock()`` from here problems could
|
||||
occur from this class not having atomic methods.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.locked_status = False
|
||||
|
||||
def acquire(self, waitflag=None, timeout=-1):
|
||||
"""Dummy implementation of acquire().
|
||||
|
||||
For blocking calls, self.locked_status is automatically set to
|
||||
True and returned appropriately based on value of
|
||||
``waitflag``. If it is non-blocking, then the value is
|
||||
actually checked and not set if it is already acquired. This
|
||||
is all done so that threading.Condition's assert statements
|
||||
aren't triggered and throw a little fit.
|
||||
|
||||
"""
|
||||
if waitflag is None or waitflag:
|
||||
self.locked_status = True
|
||||
return True
|
||||
else:
|
||||
if not self.locked_status:
|
||||
self.locked_status = True
|
||||
return True
|
||||
else:
|
||||
if timeout > 0:
|
||||
import time
|
||||
time.sleep(timeout)
|
||||
return False
|
||||
|
||||
__enter__ = acquire
|
||||
|
||||
def __exit__(self, typ, val, tb):
|
||||
self.release()
|
||||
|
||||
def release(self):
|
||||
"""Release the dummy lock."""
|
||||
# XXX Perhaps shouldn't actually bother to test? Could lead
|
||||
# to problems for complex, threaded code.
|
||||
if not self.locked_status:
|
||||
raise error
|
||||
self.locked_status = False
|
||||
return True
|
||||
|
||||
def locked(self):
|
||||
return self.locked_status
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s.%s object at %s>" % (
|
||||
"locked" if self.locked_status else "unlocked",
|
||||
self.__class__.__module__,
|
||||
self.__class__.__qualname__,
|
||||
hex(id(self))
|
||||
)
|
||||
|
||||
# Used to signal that interrupt_main was called in a "thread"
|
||||
_interrupt = False
|
||||
# True when not executing in a "thread"
|
||||
_main = True
|
||||
|
||||
def interrupt_main():
|
||||
"""Set _interrupt flag to True to have start_new_thread raise
|
||||
KeyboardInterrupt upon exiting."""
|
||||
if _main:
|
||||
raise KeyboardInterrupt
|
||||
else:
|
||||
global _interrupt
|
||||
_interrupt = True
|
395
third_party/python/Lib/_markupbase.py
vendored
Normal file
395
third_party/python/Lib/_markupbase.py
vendored
Normal file
|
@ -0,0 +1,395 @@
|
|||
"""Shared support for scanning document type declarations in HTML and XHTML.
|
||||
|
||||
This module is used as a foundation for the html.parser module. It has no
|
||||
documented public API and should not be used directly.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
|
||||
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
|
||||
_commentclose = re.compile(r'--\s*>')
|
||||
_markedsectionclose = re.compile(r']\s*]\s*>')
|
||||
|
||||
# An analysis of the MS-Word extensions is available at
|
||||
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
|
||||
|
||||
_msmarkedsectionclose = re.compile(r']\s*>')
|
||||
|
||||
del re
|
||||
|
||||
|
||||
class ParserBase:
|
||||
"""Parser base class which provides some common support methods used
|
||||
by the SGML/HTML and XHTML parsers."""
|
||||
|
||||
def __init__(self):
|
||||
if self.__class__ is ParserBase:
|
||||
raise RuntimeError(
|
||||
"_markupbase.ParserBase must be subclassed")
|
||||
|
||||
def error(self, message):
|
||||
raise NotImplementedError(
|
||||
"subclasses of ParserBase must override error()")
|
||||
|
||||
def reset(self):
|
||||
self.lineno = 1
|
||||
self.offset = 0
|
||||
|
||||
def getpos(self):
|
||||
"""Return current line number and offset."""
|
||||
return self.lineno, self.offset
|
||||
|
||||
# Internal -- update line number and offset. This should be
|
||||
# called for each piece of data exactly once, in order -- in other
|
||||
# words the concatenation of all the input strings to this
|
||||
# function should be exactly the entire input.
|
||||
def updatepos(self, i, j):
|
||||
if i >= j:
|
||||
return j
|
||||
rawdata = self.rawdata
|
||||
nlines = rawdata.count("\n", i, j)
|
||||
if nlines:
|
||||
self.lineno = self.lineno + nlines
|
||||
pos = rawdata.rindex("\n", i, j) # Should not fail
|
||||
self.offset = j-(pos+1)
|
||||
else:
|
||||
self.offset = self.offset + j-i
|
||||
return j
|
||||
|
||||
_decl_otherchars = ''
|
||||
|
||||
# Internal -- parse declaration (for use by subclasses).
|
||||
def parse_declaration(self, i):
|
||||
# This is some sort of declaration; in "HTML as
|
||||
# deployed," this should only be the document type
|
||||
# declaration ("<!DOCTYPE html...>").
|
||||
# ISO 8879:1986, however, has more complex
|
||||
# declaration syntax for elements in <!...>, including:
|
||||
# --comment--
|
||||
# [marked section]
|
||||
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
||||
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
||||
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
||||
rawdata = self.rawdata
|
||||
j = i + 2
|
||||
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
|
||||
if rawdata[j:j+1] == ">":
|
||||
# the empty comment <!>
|
||||
return j + 1
|
||||
if rawdata[j:j+1] in ("-", ""):
|
||||
# Start of comment followed by buffer boundary,
|
||||
# or just a buffer boundary.
|
||||
return -1
|
||||
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
|
||||
n = len(rawdata)
|
||||
if rawdata[j:j+2] == '--': #comment
|
||||
# Locate --.*-- as the body of the comment
|
||||
return self.parse_comment(i)
|
||||
elif rawdata[j] == '[': #marked section
|
||||
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
|
||||
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
|
||||
# Note that this is extended by Microsoft Office "Save as Web" function
|
||||
# to include [if...] and [endif].
|
||||
return self.parse_marked_section(i)
|
||||
else: #all other declaration elements
|
||||
decltype, j = self._scan_name(j, i)
|
||||
if j < 0:
|
||||
return j
|
||||
if decltype == "doctype":
|
||||
self._decl_otherchars = ''
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == ">":
|
||||
# end of declaration syntax
|
||||
data = rawdata[i+2:j]
|
||||
if decltype == "doctype":
|
||||
self.handle_decl(data)
|
||||
else:
|
||||
# According to the HTML5 specs sections "8.2.4.44 Bogus
|
||||
# comment state" and "8.2.4.45 Markup declaration open
|
||||
# state", a comment token should be emitted.
|
||||
# Calling unknown_decl provides more flexibility though.
|
||||
self.unknown_decl(data)
|
||||
return j + 1
|
||||
if c in "\"'":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1 # incomplete
|
||||
j = m.end()
|
||||
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
|
||||
name, j = self._scan_name(j, i)
|
||||
elif c in self._decl_otherchars:
|
||||
j = j + 1
|
||||
elif c == "[":
|
||||
# this could be handled in a separate doctype parser
|
||||
if decltype == "doctype":
|
||||
j = self._parse_doctype_subset(j + 1, i)
|
||||
elif decltype in {"attlist", "linktype", "link", "element"}:
|
||||
# must tolerate []'d groups in a content model in an element declaration
|
||||
# also in data attribute specifications of attlist declaration
|
||||
# also link type declaration subsets in linktype declarations
|
||||
# also link attribute specification lists in link declarations
|
||||
self.error("unsupported '[' char in %s declaration" % decltype)
|
||||
else:
|
||||
self.error("unexpected '[' char in declaration")
|
||||
else:
|
||||
self.error(
|
||||
"unexpected %r char in declaration" % rawdata[j])
|
||||
if j < 0:
|
||||
return j
|
||||
return -1 # incomplete
|
||||
|
||||
# Internal -- parse a marked section
|
||||
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
|
||||
def parse_marked_section(self, i, report=1):
|
||||
rawdata= self.rawdata
|
||||
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
|
||||
sectName, j = self._scan_name( i+3, i )
|
||||
if j < 0:
|
||||
return j
|
||||
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
|
||||
# look for standard ]]> ending
|
||||
match= _markedsectionclose.search(rawdata, i+3)
|
||||
elif sectName in {"if", "else", "endif"}:
|
||||
# look for MS Office ]> ending
|
||||
match= _msmarkedsectionclose.search(rawdata, i+3)
|
||||
else:
|
||||
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
|
||||
if not match:
|
||||
return -1
|
||||
if report:
|
||||
j = match.start(0)
|
||||
self.unknown_decl(rawdata[i+3: j])
|
||||
return match.end(0)
|
||||
|
||||
# Internal -- parse comment, return length or -1 if not terminated
|
||||
def parse_comment(self, i, report=1):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+4] != '<!--':
|
||||
self.error('unexpected call to parse_comment()')
|
||||
match = _commentclose.search(rawdata, i+4)
|
||||
if not match:
|
||||
return -1
|
||||
if report:
|
||||
j = match.start(0)
|
||||
self.handle_comment(rawdata[i+4: j])
|
||||
return match.end(0)
|
||||
|
||||
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
|
||||
# returning the index just past any whitespace following the trailing ']'.
|
||||
def _parse_doctype_subset(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
n = len(rawdata)
|
||||
j = i
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == "<":
|
||||
s = rawdata[j:j+2]
|
||||
if s == "<":
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if s != "<!":
|
||||
self.updatepos(declstartpos, j + 1)
|
||||
self.error("unexpected char in internal subset (in %r)" % s)
|
||||
if (j + 2) == n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if (j + 4) > n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if rawdata[j:j+4] == "<!--":
|
||||
j = self.parse_comment(j, report=0)
|
||||
if j < 0:
|
||||
return j
|
||||
continue
|
||||
name, j = self._scan_name(j + 2, declstartpos)
|
||||
if j == -1:
|
||||
return -1
|
||||
if name not in {"attlist", "element", "entity", "notation"}:
|
||||
self.updatepos(declstartpos, j + 2)
|
||||
self.error(
|
||||
"unknown declaration %r in internal subset" % name)
|
||||
# handle the individual names
|
||||
meth = getattr(self, "_parse_doctype_" + name)
|
||||
j = meth(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
elif c == "%":
|
||||
# parameter entity reference
|
||||
if (j + 1) == n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
s, j = self._scan_name(j + 1, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
if rawdata[j] == ";":
|
||||
j = j + 1
|
||||
elif c == "]":
|
||||
j = j + 1
|
||||
while j < n and rawdata[j].isspace():
|
||||
j = j + 1
|
||||
if j < n:
|
||||
if rawdata[j] == ">":
|
||||
return j
|
||||
self.updatepos(declstartpos, j)
|
||||
self.error("unexpected char after internal subset")
|
||||
else:
|
||||
return -1
|
||||
elif c.isspace():
|
||||
j = j + 1
|
||||
else:
|
||||
self.updatepos(declstartpos, j)
|
||||
self.error("unexpected char %r in internal subset" % c)
|
||||
# end of buffer reached
|
||||
return -1
|
||||
|
||||
# Internal -- scan past <!ELEMENT declarations
|
||||
def _parse_doctype_element(self, i, declstartpos):
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
if j == -1:
|
||||
return -1
|
||||
# style content model; just skip until '>'
|
||||
rawdata = self.rawdata
|
||||
if '>' in rawdata[j:]:
|
||||
return rawdata.find(">", j) + 1
|
||||
return -1
|
||||
|
||||
# Internal -- scan past <!ATTLIST declarations
|
||||
def _parse_doctype_attlist(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
c = rawdata[j:j+1]
|
||||
if c == "":
|
||||
return -1
|
||||
if c == ">":
|
||||
return j + 1
|
||||
while 1:
|
||||
# scan a series of attribute descriptions; simplified:
|
||||
# name type [value] [#constraint]
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
c = rawdata[j:j+1]
|
||||
if c == "":
|
||||
return -1
|
||||
if c == "(":
|
||||
# an enumerated type; look for ')'
|
||||
if ")" in rawdata[j:]:
|
||||
j = rawdata.find(")", j) + 1
|
||||
else:
|
||||
return -1
|
||||
while rawdata[j:j+1].isspace():
|
||||
j = j + 1
|
||||
if not rawdata[j:]:
|
||||
# end of buffer, incomplete
|
||||
return -1
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if m:
|
||||
j = m.end()
|
||||
else:
|
||||
return -1
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c == "#":
|
||||
if rawdata[j:] == "#":
|
||||
# end of buffer
|
||||
return -1
|
||||
name, j = self._scan_name(j + 1, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c == '>':
|
||||
# all done
|
||||
return j + 1
|
||||
|
||||
# Internal -- scan past <!NOTATION declarations
|
||||
def _parse_doctype_notation(self, i, declstartpos):
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
rawdata = self.rawdata
|
||||
while 1:
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if c == '>':
|
||||
return j + 1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1
|
||||
j = m.end()
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
|
||||
# Internal -- scan past <!ENTITY declarations
|
||||
def _parse_doctype_entity(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+1] == "%":
|
||||
j = i + 1
|
||||
while 1:
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c.isspace():
|
||||
j = j + 1
|
||||
else:
|
||||
break
|
||||
else:
|
||||
j = i
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
while 1:
|
||||
c = self.rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if m:
|
||||
j = m.end()
|
||||
else:
|
||||
return -1 # incomplete
|
||||
elif c == ">":
|
||||
return j + 1
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
|
||||
# Internal -- scan a name token and the new position and the token, or
|
||||
# return -1 if we've reached the end of the buffer.
|
||||
def _scan_name(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
n = len(rawdata)
|
||||
if i == n:
|
||||
return None, -1
|
||||
m = _declname_match(rawdata, i)
|
||||
if m:
|
||||
s = m.group()
|
||||
name = s.strip()
|
||||
if (i + len(s)) == n:
|
||||
return None, -1 # end of buffer
|
||||
return name.lower(), m.end()
|
||||
else:
|
||||
self.updatepos(declstartpos, i)
|
||||
self.error("expected name token at %r"
|
||||
% rawdata[declstartpos:declstartpos+20])
|
||||
|
||||
# To be overridden -- handlers for unknown objects
|
||||
def unknown_decl(self, data):
|
||||
pass
|
502
third_party/python/Lib/_osx_support.py
vendored
Normal file
502
third_party/python/Lib/_osx_support.py
vendored
Normal file
|
@ -0,0 +1,502 @@
|
|||
"""Shared OS X support functions."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
__all__ = [
|
||||
'compiler_fixup',
|
||||
'customize_config_vars',
|
||||
'customize_compiler',
|
||||
'get_platform_osx',
|
||||
]
|
||||
|
||||
# configuration variables that may contain universal build flags,
|
||||
# like "-arch" or "-isdkroot", that may need customization for
|
||||
# the user environment
|
||||
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
|
||||
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
|
||||
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
|
||||
'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS')
|
||||
|
||||
# configuration variables that may contain compiler calls
|
||||
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
|
||||
|
||||
# prefix added to original configuration variable names
|
||||
_INITPRE = '_OSX_SUPPORT_INITIAL_'
|
||||
|
||||
|
||||
def _find_executable(executable, path=None):
|
||||
"""Tries to find 'executable' in the directories listed in 'path'.
|
||||
|
||||
A string listing directories separated by 'os.pathsep'; defaults to
|
||||
os.environ['PATH']. Returns the complete filename or None if not found.
|
||||
"""
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if (sys.platform == 'win32') and (ext != '.exe'):
|
||||
executable = executable + '.exe'
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
f = os.path.join(p, executable)
|
||||
if os.path.isfile(f):
|
||||
# the file exists, we have a shot at spawn working
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def _read_output(commandstring):
|
||||
"""Output from successful command execution or None"""
|
||||
# Similar to os.popen(commandstring, "r").read(),
|
||||
# but without actually using os.popen because that
|
||||
# function is not usable during python bootstrap.
|
||||
# tempfile is also not available then.
|
||||
import contextlib
|
||||
try:
|
||||
import tempfile
|
||||
fp = tempfile.NamedTemporaryFile()
|
||||
except ImportError:
|
||||
fp = open("/tmp/_osx_support.%s"%(
|
||||
os.getpid(),), "w+b")
|
||||
|
||||
with contextlib.closing(fp) as fp:
|
||||
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
|
||||
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
|
||||
|
||||
|
||||
def _find_build_tool(toolname):
|
||||
"""Find a build tool on current path or using xcrun"""
|
||||
return (_find_executable(toolname)
|
||||
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
|
||||
or ''
|
||||
)
|
||||
|
||||
_SYSTEM_VERSION = None
|
||||
|
||||
def _get_system_version():
|
||||
"""Return the OS X system version as a string"""
|
||||
# Reading this plist is a documented way to get the system
|
||||
# version (see the documentation for the Gestalt Manager)
|
||||
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
|
||||
# the build of Python itself (distutils is used to build standard library
|
||||
# extensions).
|
||||
|
||||
global _SYSTEM_VERSION
|
||||
|
||||
if _SYSTEM_VERSION is None:
|
||||
_SYSTEM_VERSION = ''
|
||||
try:
|
||||
f = open('/System/Library/CoreServices/SystemVersion.plist')
|
||||
except OSError:
|
||||
# We're on a plain darwin box, fall back to the default
|
||||
# behaviour.
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
|
||||
r'<string>(.*?)</string>', f.read())
|
||||
finally:
|
||||
f.close()
|
||||
if m is not None:
|
||||
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
|
||||
# else: fall back to the default behaviour
|
||||
|
||||
return _SYSTEM_VERSION
|
||||
|
||||
def _remove_original_values(_config_vars):
|
||||
"""Remove original unmodified values for testing"""
|
||||
# This is needed for higher-level cross-platform tests of get_platform.
|
||||
for k in list(_config_vars):
|
||||
if k.startswith(_INITPRE):
|
||||
del _config_vars[k]
|
||||
|
||||
def _save_modified_value(_config_vars, cv, newvalue):
|
||||
"""Save modified and original unmodified value of configuration var"""
|
||||
|
||||
oldvalue = _config_vars.get(cv, '')
|
||||
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
|
||||
_config_vars[_INITPRE + cv] = oldvalue
|
||||
_config_vars[cv] = newvalue
|
||||
|
||||
def _supports_universal_builds():
|
||||
"""Returns True if universal builds are supported on this system"""
|
||||
# As an approximation, we assume that if we are running on 10.4 or above,
|
||||
# then we are running with an Xcode environment that supports universal
|
||||
# builds, in particular -isysroot and -arch arguments to the compiler. This
|
||||
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
|
||||
|
||||
osx_version = _get_system_version()
|
||||
if osx_version:
|
||||
try:
|
||||
osx_version = tuple(int(i) for i in osx_version.split('.'))
|
||||
except ValueError:
|
||||
osx_version = ''
|
||||
return bool(osx_version >= (10, 4)) if osx_version else False
|
||||
|
||||
|
||||
def _find_appropriate_compiler(_config_vars):
|
||||
"""Find appropriate C compiler for extension module builds"""
|
||||
|
||||
# Issue #13590:
|
||||
# The OSX location for the compiler varies between OSX
|
||||
# (or rather Xcode) releases. With older releases (up-to 10.5)
|
||||
# the compiler is in /usr/bin, with newer releases the compiler
|
||||
# can only be found inside Xcode.app if the "Command Line Tools"
|
||||
# are not installed.
|
||||
#
|
||||
# Furthermore, the compiler that can be used varies between
|
||||
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
|
||||
# as the compiler, after that 'clang' should be used because
|
||||
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
|
||||
# miscompiles Python.
|
||||
|
||||
# skip checks if the compiler was overridden with a CC env variable
|
||||
if 'CC' in os.environ:
|
||||
return _config_vars
|
||||
|
||||
# The CC config var might contain additional arguments.
|
||||
# Ignore them while searching.
|
||||
cc = oldcc = _config_vars['CC'].split()[0]
|
||||
if not _find_executable(cc):
|
||||
# Compiler is not found on the shell search PATH.
|
||||
# Now search for clang, first on PATH (if the Command LIne
|
||||
# Tools have been installed in / or if the user has provided
|
||||
# another location via CC). If not found, try using xcrun
|
||||
# to find an uninstalled clang (within a selected Xcode).
|
||||
|
||||
# NOTE: Cannot use subprocess here because of bootstrap
|
||||
# issues when building Python itself (and os.popen is
|
||||
# implemented on top of subprocess and is therefore not
|
||||
# usable as well)
|
||||
|
||||
cc = _find_build_tool('clang')
|
||||
|
||||
elif os.path.basename(cc).startswith('gcc'):
|
||||
# Compiler is GCC, check if it is LLVM-GCC
|
||||
data = _read_output("'%s' --version"
|
||||
% (cc.replace("'", "'\"'\"'"),))
|
||||
if data and 'llvm-gcc' in data:
|
||||
# Found LLVM-GCC, fall back to clang
|
||||
cc = _find_build_tool('clang')
|
||||
|
||||
if not cc:
|
||||
raise SystemError(
|
||||
"Cannot locate working compiler")
|
||||
|
||||
if cc != oldcc:
|
||||
# Found a replacement compiler.
|
||||
# Modify config vars using new compiler, if not already explicitly
|
||||
# overridden by an env variable, preserving additional arguments.
|
||||
for cv in _COMPILER_CONFIG_VARS:
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
cv_split = _config_vars[cv].split()
|
||||
cv_split[0] = cc if cv != 'CXX' else cc + '++'
|
||||
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _remove_universal_flags(_config_vars):
|
||||
"""Remove all universal build arguments from config vars"""
|
||||
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
# Do not alter a config var explicitly overridden by env var
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
|
||||
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _remove_unsupported_archs(_config_vars):
|
||||
"""Remove any unsupported archs from config vars"""
|
||||
# Different Xcode releases support different sets for '-arch'
|
||||
# flags. In particular, Xcode 4.x no longer supports the
|
||||
# PPC architectures.
|
||||
#
|
||||
# This code automatically removes '-arch ppc' and '-arch ppc64'
|
||||
# when these are not supported. That makes it possible to
|
||||
# build extensions on OSX 10.7 and later with the prebuilt
|
||||
# 32-bit installer on the python.org website.
|
||||
|
||||
# skip checks if the compiler was overridden with a CC env variable
|
||||
if 'CC' in os.environ:
|
||||
return _config_vars
|
||||
|
||||
if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None:
|
||||
# NOTE: Cannot use subprocess here because of bootstrap
|
||||
# issues when building Python itself
|
||||
status = os.system(
|
||||
"""echo 'int main{};' | """
|
||||
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
|
||||
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
|
||||
if status:
|
||||
# The compile failed for some reason. Because of differences
|
||||
# across Xcode and compiler versions, there is no reliable way
|
||||
# to be sure why it failed. Assume here it was due to lack of
|
||||
# PPC support and remove the related '-arch' flags from each
|
||||
# config variables not explicitly overridden by an environment
|
||||
# variable. If the error was for some other reason, we hope the
|
||||
# failure will show up again when trying to compile an extension
|
||||
# module.
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _override_all_archs(_config_vars):
|
||||
"""Allow override of all archs with ARCHFLAGS env var"""
|
||||
# NOTE: This name was introduced by Apple in OSX 10.5 and
|
||||
# is used by several scripting languages distributed with
|
||||
# that OS release.
|
||||
if 'ARCHFLAGS' in os.environ:
|
||||
arch = os.environ['ARCHFLAGS']
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
if cv in _config_vars and '-arch' in _config_vars[cv]:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
|
||||
flags = flags + ' ' + arch
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _check_for_unavailable_sdk(_config_vars):
|
||||
"""Remove references to any SDKs not available"""
|
||||
# If we're on OSX 10.5 or later and the user tries to
|
||||
# compile an extension using an SDK that is not present
|
||||
# on the current machine it is better to not use an SDK
|
||||
# than to fail. This is particularly important with
|
||||
# the standalone Command Line Tools alternative to a
|
||||
# full-blown Xcode install since the CLT packages do not
|
||||
# provide SDKs. If the SDK is not present, it is assumed
|
||||
# that the header files and dev libs have been installed
|
||||
# to /usr and /System/Library by either a standalone CLT
|
||||
# package or the CLT component within Xcode.
|
||||
cflags = _config_vars.get('CFLAGS', '')
|
||||
m = re.search(r'-isysroot\s+(\S+)', cflags)
|
||||
if m is not None:
|
||||
sdk = m.group(1)
|
||||
if not os.path.exists(sdk):
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
# Do not alter a config var explicitly overridden by env var
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def compiler_fixup(compiler_so, cc_args):
|
||||
"""
|
||||
This function will strip '-isysroot PATH' and '-arch ARCH' from the
|
||||
compile flags if the user has specified one them in extra_compile_flags.
|
||||
|
||||
This is needed because '-arch ARCH' adds another architecture to the
|
||||
build, without a way to remove an architecture. Furthermore GCC will
|
||||
barf if multiple '-isysroot' arguments are present.
|
||||
"""
|
||||
stripArch = stripSysroot = False
|
||||
|
||||
compiler_so = list(compiler_so)
|
||||
|
||||
if not _supports_universal_builds():
|
||||
# OSX before 10.4.0, these don't support -arch and -isysroot at
|
||||
# all.
|
||||
stripArch = stripSysroot = True
|
||||
else:
|
||||
stripArch = '-arch' in cc_args
|
||||
stripSysroot = '-isysroot' in cc_args
|
||||
|
||||
if stripArch or 'ARCHFLAGS' in os.environ:
|
||||
while True:
|
||||
try:
|
||||
index = compiler_so.index('-arch')
|
||||
# Strip this argument and the next one:
|
||||
del compiler_so[index:index+2]
|
||||
except ValueError:
|
||||
break
|
||||
|
||||
if 'ARCHFLAGS' in os.environ and not stripArch:
|
||||
# User specified different -arch flags in the environ,
|
||||
# see also distutils.sysconfig
|
||||
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
|
||||
|
||||
if stripSysroot:
|
||||
while True:
|
||||
try:
|
||||
index = compiler_so.index('-isysroot')
|
||||
# Strip this argument and the next one:
|
||||
del compiler_so[index:index+2]
|
||||
except ValueError:
|
||||
break
|
||||
|
||||
# Check if the SDK that is used during compilation actually exists,
|
||||
# the universal build requires the usage of a universal SDK and not all
|
||||
# users have that installed by default.
|
||||
sysroot = None
|
||||
if '-isysroot' in cc_args:
|
||||
idx = cc_args.index('-isysroot')
|
||||
sysroot = cc_args[idx+1]
|
||||
elif '-isysroot' in compiler_so:
|
||||
idx = compiler_so.index('-isysroot')
|
||||
sysroot = compiler_so[idx+1]
|
||||
|
||||
if sysroot and not os.path.isdir(sysroot):
|
||||
from distutils import log
|
||||
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
|
||||
sysroot)
|
||||
log.warn("Please check your Xcode installation")
|
||||
|
||||
return compiler_so
|
||||
|
||||
|
||||
def customize_config_vars(_config_vars):
|
||||
"""Customize Python build configuration variables.
|
||||
|
||||
Called internally from sysconfig with a mutable mapping
|
||||
containing name/value pairs parsed from the configured
|
||||
makefile used to build this interpreter. Returns
|
||||
the mapping updated as needed to reflect the environment
|
||||
in which the interpreter is running; in the case of
|
||||
a Python from a binary installer, the installed
|
||||
environment may be very different from the build
|
||||
environment, i.e. different OS levels, different
|
||||
built tools, different available CPU architectures.
|
||||
|
||||
This customization is performed whenever
|
||||
distutils.sysconfig.get_config_vars() is first
|
||||
called. It may be used in environments where no
|
||||
compilers are present, i.e. when installing pure
|
||||
Python dists. Customization of compiler paths
|
||||
and detection of unavailable archs is deferred
|
||||
until the first extension module build is
|
||||
requested (in distutils.sysconfig.customize_compiler).
|
||||
|
||||
Currently called from distutils.sysconfig
|
||||
"""
|
||||
|
||||
if not _supports_universal_builds():
|
||||
# On Mac OS X before 10.4, check if -arch and -isysroot
|
||||
# are in CFLAGS or LDFLAGS and remove them if they are.
|
||||
# This is needed when building extensions on a 10.3 system
|
||||
# using a universal build of python.
|
||||
_remove_universal_flags(_config_vars)
|
||||
|
||||
# Allow user to override all archs with ARCHFLAGS env var
|
||||
_override_all_archs(_config_vars)
|
||||
|
||||
# Remove references to sdks that are not found
|
||||
_check_for_unavailable_sdk(_config_vars)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def customize_compiler(_config_vars):
|
||||
"""Customize compiler path and configuration variables.
|
||||
|
||||
This customization is performed when the first
|
||||
extension module build is requested
|
||||
in distutils.sysconfig.customize_compiler).
|
||||
"""
|
||||
|
||||
# Find a compiler to use for extension module builds
|
||||
_find_appropriate_compiler(_config_vars)
|
||||
|
||||
# Remove ppc arch flags if not supported here
|
||||
_remove_unsupported_archs(_config_vars)
|
||||
|
||||
# Allow user to override all archs with ARCHFLAGS env var
|
||||
_override_all_archs(_config_vars)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def get_platform_osx(_config_vars, osname, release, machine):
|
||||
"""Filter values for get_platform()"""
|
||||
# called from get_platform() in sysconfig and distutils.util
|
||||
#
|
||||
# For our purposes, we'll assume that the system version from
|
||||
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
|
||||
# to. This makes the compatibility story a bit more sane because the
|
||||
# machine is going to compile and link as if it were
|
||||
# MACOSX_DEPLOYMENT_TARGET.
|
||||
|
||||
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
|
||||
macrelease = _get_system_version() or macver
|
||||
macver = macver or macrelease
|
||||
|
||||
if macver:
|
||||
release = macver
|
||||
osname = "macosx"
|
||||
|
||||
# Use the original CFLAGS value, if available, so that we
|
||||
# return the same machine type for the platform string.
|
||||
# Otherwise, distutils may consider this a cross-compiling
|
||||
# case and disallow installs.
|
||||
cflags = _config_vars.get(_INITPRE+'CFLAGS',
|
||||
_config_vars.get('CFLAGS', ''))
|
||||
if macrelease:
|
||||
try:
|
||||
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
|
||||
except ValueError:
|
||||
macrelease = (10, 0)
|
||||
else:
|
||||
# assume no universal support
|
||||
macrelease = (10, 0)
|
||||
|
||||
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
|
||||
# The universal build will build fat binaries, but not on
|
||||
# systems before 10.4
|
||||
|
||||
machine = 'fat'
|
||||
|
||||
archs = re.findall(r'-arch\s+(\S+)', cflags)
|
||||
archs = tuple(sorted(set(archs)))
|
||||
|
||||
if len(archs) == 1:
|
||||
machine = archs[0]
|
||||
elif archs == ('i386', 'ppc'):
|
||||
machine = 'fat'
|
||||
elif archs == ('i386', 'x86_64'):
|
||||
machine = 'intel'
|
||||
elif archs == ('i386', 'ppc', 'x86_64'):
|
||||
machine = 'fat3'
|
||||
elif archs == ('ppc64', 'x86_64'):
|
||||
machine = 'fat64'
|
||||
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
|
||||
machine = 'universal'
|
||||
else:
|
||||
raise ValueError(
|
||||
"Don't know machine value for archs=%r" % (archs,))
|
||||
|
||||
elif machine == 'i386':
|
||||
# On OSX the machine type returned by uname is always the
|
||||
# 32-bit variant, even if the executable architecture is
|
||||
# the 64-bit variant
|
||||
if sys.maxsize >= 2**32:
|
||||
machine = 'x86_64'
|
||||
|
||||
elif machine in ('PowerPC', 'Power_Macintosh'):
|
||||
# Pick a sane name for the PPC architecture.
|
||||
# See 'i386' case
|
||||
if sys.maxsize >= 2**32:
|
||||
machine = 'ppc64'
|
||||
else:
|
||||
machine = 'ppc'
|
||||
|
||||
return (osname, release, machine)
|
6449
third_party/python/Lib/_pydecimal.py
vendored
Normal file
6449
third_party/python/Lib/_pydecimal.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
2532
third_party/python/Lib/_pyio.py
vendored
Normal file
2532
third_party/python/Lib/_pyio.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
103
third_party/python/Lib/_sitebuiltins.py
vendored
Normal file
103
third_party/python/Lib/_sitebuiltins.py
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
"""
|
||||
The objects used by the site module to add custom builtins.
|
||||
"""
|
||||
|
||||
# Those objects are almost immortal and they keep a reference to their module
|
||||
# globals. Defining them in the site module would keep too many references
|
||||
# alive.
|
||||
# Note this means this module should also avoid keep things alive in its
|
||||
# globals.
|
||||
|
||||
import sys
|
||||
|
||||
class Quitter(object):
|
||||
def __init__(self, name, eof):
|
||||
self.name = name
|
||||
self.eof = eof
|
||||
def __repr__(self):
|
||||
return 'Use %s() or %s to exit' % (self.name, self.eof)
|
||||
def __call__(self, code=None):
|
||||
# Shells like IDLE catch the SystemExit, but listen when their
|
||||
# stdin wrapper is closed.
|
||||
try:
|
||||
sys.stdin.close()
|
||||
except:
|
||||
pass
|
||||
raise SystemExit(code)
|
||||
|
||||
|
||||
class _Printer(object):
|
||||
"""interactive prompt objects for printing the license text, a list of
|
||||
contributors and the copyright notice."""
|
||||
|
||||
MAXLINES = 23
|
||||
|
||||
def __init__(self, name, data, files=(), dirs=()):
|
||||
import os
|
||||
self.__name = name
|
||||
self.__data = data
|
||||
self.__lines = None
|
||||
self.__filenames = [os.path.join(dir, filename)
|
||||
for dir in dirs
|
||||
for filename in files]
|
||||
|
||||
def __setup(self):
|
||||
if self.__lines:
|
||||
return
|
||||
data = None
|
||||
for filename in self.__filenames:
|
||||
try:
|
||||
with open(filename, "r") as fp:
|
||||
data = fp.read()
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
if not data:
|
||||
data = self.__data
|
||||
self.__lines = data.split('\n')
|
||||
self.__linecnt = len(self.__lines)
|
||||
|
||||
def __repr__(self):
|
||||
self.__setup()
|
||||
if len(self.__lines) <= self.MAXLINES:
|
||||
return "\n".join(self.__lines)
|
||||
else:
|
||||
return "Type %s() to see the full %s text" % ((self.__name,)*2)
|
||||
|
||||
def __call__(self):
|
||||
self.__setup()
|
||||
prompt = 'Hit Return for more, or q (and Return) to quit: '
|
||||
lineno = 0
|
||||
while 1:
|
||||
try:
|
||||
for i in range(lineno, lineno + self.MAXLINES):
|
||||
print(self.__lines[i])
|
||||
except IndexError:
|
||||
break
|
||||
else:
|
||||
lineno += self.MAXLINES
|
||||
key = None
|
||||
while key is None:
|
||||
key = input(prompt)
|
||||
if key not in ('', 'q'):
|
||||
key = None
|
||||
if key == 'q':
|
||||
break
|
||||
|
||||
|
||||
class _Helper(object):
|
||||
"""Define the builtin 'help'.
|
||||
|
||||
This is a wrapper around pydoc.help that provides a helpful message
|
||||
when 'help' is typed at the Python interactive prompt.
|
||||
|
||||
Calling help() at the Python prompt starts an interactive help session.
|
||||
Calling help(thing) prints help for the python object 'thing'.
|
||||
"""
|
||||
|
||||
def __repr__(self):
|
||||
return "Type help() for interactive help, " \
|
||||
"or help(object) for help about object."
|
||||
def __call__(self, *args, **kwds):
|
||||
import pydoc
|
||||
return pydoc.help(*args, **kwds)
|
576
third_party/python/Lib/_strptime.py
vendored
Normal file
576
third_party/python/Lib/_strptime.py
vendored
Normal file
|
@ -0,0 +1,576 @@
|
|||
"""Strptime-related classes and functions.
|
||||
|
||||
CLASSES:
|
||||
LocaleTime -- Discovers and stores locale-specific time information
|
||||
TimeRE -- Creates regexes for pattern matching a string of text containing
|
||||
time information
|
||||
|
||||
FUNCTIONS:
|
||||
_getlang -- Figure out what language is being used for the locale
|
||||
strptime -- Calculates the time struct represented by the passed-in string
|
||||
|
||||
"""
|
||||
import time
|
||||
import locale
|
||||
import calendar
|
||||
from re import compile as re_compile
|
||||
from re import IGNORECASE
|
||||
from re import escape as re_escape
|
||||
from datetime import (date as datetime_date,
|
||||
timedelta as datetime_timedelta,
|
||||
timezone as datetime_timezone)
|
||||
try:
|
||||
from _thread import allocate_lock as _thread_allocate_lock
|
||||
except ImportError:
|
||||
from _dummy_thread import allocate_lock as _thread_allocate_lock
|
||||
|
||||
__all__ = []
|
||||
|
||||
def _getlang():
|
||||
# Figure out what the current language is set to.
|
||||
return locale.getlocale(locale.LC_TIME)
|
||||
|
||||
class LocaleTime(object):
|
||||
"""Stores and handles locale-specific information related to time.
|
||||
|
||||
ATTRIBUTES:
|
||||
f_weekday -- full weekday names (7-item list)
|
||||
a_weekday -- abbreviated weekday names (7-item list)
|
||||
f_month -- full month names (13-item list; dummy value in [0], which
|
||||
is added by code)
|
||||
a_month -- abbreviated month names (13-item list, dummy value in
|
||||
[0], which is added by code)
|
||||
am_pm -- AM/PM representation (2-item list)
|
||||
LC_date_time -- format string for date/time representation (string)
|
||||
LC_date -- format string for date representation (string)
|
||||
LC_time -- format string for time representation (string)
|
||||
timezone -- daylight- and non-daylight-savings timezone representation
|
||||
(2-item list of sets)
|
||||
lang -- Language used by instance (2-item tuple)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Set all attributes.
|
||||
|
||||
Order of methods called matters for dependency reasons.
|
||||
|
||||
The locale language is set at the offset and then checked again before
|
||||
exiting. This is to make sure that the attributes were not set with a
|
||||
mix of information from more than one locale. This would most likely
|
||||
happen when using threads where one thread calls a locale-dependent
|
||||
function while another thread changes the locale while the function in
|
||||
the other thread is still running. Proper coding would call for
|
||||
locks to prevent changing the locale while locale-dependent code is
|
||||
running. The check here is done in case someone does not think about
|
||||
doing this.
|
||||
|
||||
Only other possible issue is if someone changed the timezone and did
|
||||
not call tz.tzset . That is an issue for the programmer, though,
|
||||
since changing the timezone is worthless without that call.
|
||||
|
||||
"""
|
||||
self.lang = _getlang()
|
||||
self.__calc_weekday()
|
||||
self.__calc_month()
|
||||
self.__calc_am_pm()
|
||||
self.__calc_timezone()
|
||||
self.__calc_date_time()
|
||||
if _getlang() != self.lang:
|
||||
raise ValueError("locale changed during initialization")
|
||||
if time.tzname != self.tzname or time.daylight != self.daylight:
|
||||
raise ValueError("timezone changed during initialization")
|
||||
|
||||
def __pad(self, seq, front):
|
||||
# Add '' to seq to either the front (is True), else the back.
|
||||
seq = list(seq)
|
||||
if front:
|
||||
seq.insert(0, '')
|
||||
else:
|
||||
seq.append('')
|
||||
return seq
|
||||
|
||||
def __calc_weekday(self):
|
||||
# Set self.a_weekday and self.f_weekday using the calendar
|
||||
# module.
|
||||
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
|
||||
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
|
||||
self.a_weekday = a_weekday
|
||||
self.f_weekday = f_weekday
|
||||
|
||||
def __calc_month(self):
|
||||
# Set self.f_month and self.a_month using the calendar module.
|
||||
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
|
||||
f_month = [calendar.month_name[i].lower() for i in range(13)]
|
||||
self.a_month = a_month
|
||||
self.f_month = f_month
|
||||
|
||||
def __calc_am_pm(self):
|
||||
# Set self.am_pm by using time.strftime().
|
||||
|
||||
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
|
||||
# magical; just happened to have used it everywhere else where a
|
||||
# static date was needed.
|
||||
am_pm = []
|
||||
for hour in (1, 22):
|
||||
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
|
||||
am_pm.append(time.strftime("%p", time_tuple).lower())
|
||||
self.am_pm = am_pm
|
||||
|
||||
def __calc_date_time(self):
|
||||
# Set self.date_time, self.date, & self.time by using
|
||||
# time.strftime().
|
||||
|
||||
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
|
||||
# overloaded numbers is minimized. The order in which searches for
|
||||
# values within the format string is very important; it eliminates
|
||||
# possible ambiguity for what something represents.
|
||||
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
|
||||
date_time = [None, None, None]
|
||||
date_time[0] = time.strftime("%c", time_tuple).lower()
|
||||
date_time[1] = time.strftime("%x", time_tuple).lower()
|
||||
date_time[2] = time.strftime("%X", time_tuple).lower()
|
||||
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
|
||||
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
|
||||
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
|
||||
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
|
||||
('44', '%M'), ('55', '%S'), ('76', '%j'),
|
||||
('17', '%d'), ('03', '%m'), ('3', '%m'),
|
||||
# '3' needed for when no leading zero.
|
||||
('2', '%w'), ('10', '%I')]
|
||||
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
|
||||
for tz in tz_values])
|
||||
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
|
||||
current_format = date_time[offset]
|
||||
for old, new in replacement_pairs:
|
||||
# Must deal with possible lack of locale info
|
||||
# manifesting itself as the empty string (e.g., Swedish's
|
||||
# lack of AM/PM info) or a platform returning a tuple of empty
|
||||
# strings (e.g., MacOS 9 having timezone as ('','')).
|
||||
if old:
|
||||
current_format = current_format.replace(old, new)
|
||||
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
|
||||
# 2005-01-03 occurs before the first Monday of the year. Otherwise
|
||||
# %U is used.
|
||||
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
|
||||
if '00' in time.strftime(directive, time_tuple):
|
||||
U_W = '%W'
|
||||
else:
|
||||
U_W = '%U'
|
||||
date_time[offset] = current_format.replace('11', U_W)
|
||||
self.LC_date_time = date_time[0]
|
||||
self.LC_date = date_time[1]
|
||||
self.LC_time = date_time[2]
|
||||
|
||||
def __calc_timezone(self):
|
||||
# Set self.timezone by using time.tzname.
|
||||
# Do not worry about possibility of time.tzname[0] == time.tzname[1]
|
||||
# and time.daylight; handle that in strptime.
|
||||
try:
|
||||
time.tzset()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.tzname = time.tzname
|
||||
self.daylight = time.daylight
|
||||
no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
|
||||
if self.daylight:
|
||||
has_saving = frozenset({self.tzname[1].lower()})
|
||||
else:
|
||||
has_saving = frozenset()
|
||||
self.timezone = (no_saving, has_saving)
|
||||
|
||||
|
||||
class TimeRE(dict):
|
||||
"""Handle conversion from format directives to regexes."""
|
||||
|
||||
def __init__(self, locale_time=None):
|
||||
"""Create keys/values.
|
||||
|
||||
Order of execution is important for dependency reasons.
|
||||
|
||||
"""
|
||||
if locale_time:
|
||||
self.locale_time = locale_time
|
||||
else:
|
||||
self.locale_time = LocaleTime()
|
||||
base = super()
|
||||
base.__init__({
|
||||
# The " \d" part of the regex is to make %c from ANSI C work
|
||||
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
|
||||
'f': r"(?P<f>[0-9]{1,6})",
|
||||
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
|
||||
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
|
||||
'G': r"(?P<G>\d\d\d\d)",
|
||||
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
|
||||
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
|
||||
'M': r"(?P<M>[0-5]\d|\d)",
|
||||
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
|
||||
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
|
||||
'w': r"(?P<w>[0-6])",
|
||||
'u': r"(?P<u>[1-7])",
|
||||
'V': r"(?P<V>5[0-3]|0[1-9]|[1-4]\d|\d)",
|
||||
# W is set below by using 'U'
|
||||
'y': r"(?P<y>\d\d)",
|
||||
#XXX: Does 'Y' need to worry about having less or more than
|
||||
# 4 digits?
|
||||
'Y': r"(?P<Y>\d\d\d\d)",
|
||||
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
|
||||
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
|
||||
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
|
||||
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
|
||||
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
|
||||
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
|
||||
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
|
||||
for tz in tz_names),
|
||||
'Z'),
|
||||
'%': '%'})
|
||||
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
|
||||
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
|
||||
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
|
||||
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
|
||||
|
||||
def __seqToRE(self, to_convert, directive):
|
||||
"""Convert a list to a regex string for matching a directive.
|
||||
|
||||
Want possible matching values to be from longest to shortest. This
|
||||
prevents the possibility of a match occurring for a value that also
|
||||
a substring of a larger value that should have matched (e.g., 'abc'
|
||||
matching when 'abcdef' should have been the match).
|
||||
|
||||
"""
|
||||
to_convert = sorted(to_convert, key=len, reverse=True)
|
||||
for value in to_convert:
|
||||
if value != '':
|
||||
break
|
||||
else:
|
||||
return ''
|
||||
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
|
||||
regex = '(?P<%s>%s' % (directive, regex)
|
||||
return '%s)' % regex
|
||||
|
||||
def pattern(self, format):
|
||||
"""Return regex pattern for the format string.
|
||||
|
||||
Need to make sure that any characters that might be interpreted as
|
||||
regex syntax are escaped.
|
||||
|
||||
"""
|
||||
processed_format = ''
|
||||
# The sub() call escapes all characters that might be misconstrued
|
||||
# as regex syntax. Cannot use re.escape since we have to deal with
|
||||
# format directives (%m, etc.).
|
||||
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
|
||||
format = regex_chars.sub(r"\\\1", format)
|
||||
whitespace_replacement = re_compile(r'\s+')
|
||||
format = whitespace_replacement.sub(r'\\s+', format)
|
||||
while '%' in format:
|
||||
directive_index = format.index('%')+1
|
||||
processed_format = "%s%s%s" % (processed_format,
|
||||
format[:directive_index-1],
|
||||
self[format[directive_index]])
|
||||
format = format[directive_index+1:]
|
||||
return "%s%s" % (processed_format, format)
|
||||
|
||||
def compile(self, format):
|
||||
"""Return a compiled re object for the format string."""
|
||||
return re_compile(self.pattern(format), IGNORECASE)
|
||||
|
||||
_cache_lock = _thread_allocate_lock()
|
||||
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
|
||||
# first!
|
||||
_TimeRE_cache = TimeRE()
|
||||
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
|
||||
_regex_cache = {}
|
||||
|
||||
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
|
||||
"""Calculate the Julian day based on the year, week of the year, and day of
|
||||
the week, with week_start_day representing whether the week of the year
|
||||
assumes the week starts on Sunday or Monday (6 or 0)."""
|
||||
first_weekday = datetime_date(year, 1, 1).weekday()
|
||||
# If we are dealing with the %U directive (week starts on Sunday), it's
|
||||
# easier to just shift the view to Sunday being the first day of the
|
||||
# week.
|
||||
if not week_starts_Mon:
|
||||
first_weekday = (first_weekday + 1) % 7
|
||||
day_of_week = (day_of_week + 1) % 7
|
||||
# Need to watch out for a week 0 (when the first day of the year is not
|
||||
# the same as that specified by %U or %W).
|
||||
week_0_length = (7 - first_weekday) % 7
|
||||
if week_of_year == 0:
|
||||
return 1 + day_of_week - first_weekday
|
||||
else:
|
||||
days_to_week = week_0_length + (7 * (week_of_year - 1))
|
||||
return 1 + days_to_week + day_of_week
|
||||
|
||||
|
||||
def _calc_julian_from_V(iso_year, iso_week, iso_weekday):
|
||||
"""Calculate the Julian day based on the ISO 8601 year, week, and weekday.
|
||||
ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
|
||||
ISO week days range from 1 (Monday) to 7 (Sunday).
|
||||
"""
|
||||
correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
|
||||
ordinal = (iso_week * 7) + iso_weekday - correction
|
||||
# ordinal may be negative or 0 now, which means the date is in the previous
|
||||
# calendar year
|
||||
if ordinal < 1:
|
||||
ordinal += datetime_date(iso_year, 1, 1).toordinal()
|
||||
iso_year -= 1
|
||||
ordinal -= datetime_date(iso_year, 1, 1).toordinal()
|
||||
return iso_year, ordinal
|
||||
|
||||
|
||||
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a 2-tuple consisting of a time struct and an int containing
|
||||
the number of microseconds based on the input string and the
|
||||
format string."""
|
||||
|
||||
for index, arg in enumerate([data_string, format]):
|
||||
if not isinstance(arg, str):
|
||||
msg = "strptime() argument {} must be str, not {}"
|
||||
raise TypeError(msg.format(index, type(arg)))
|
||||
|
||||
global _TimeRE_cache, _regex_cache
|
||||
with _cache_lock:
|
||||
locale_time = _TimeRE_cache.locale_time
|
||||
if (_getlang() != locale_time.lang or
|
||||
time.tzname != locale_time.tzname or
|
||||
time.daylight != locale_time.daylight):
|
||||
_TimeRE_cache = TimeRE()
|
||||
_regex_cache.clear()
|
||||
locale_time = _TimeRE_cache.locale_time
|
||||
if len(_regex_cache) > _CACHE_MAX_SIZE:
|
||||
_regex_cache.clear()
|
||||
format_regex = _regex_cache.get(format)
|
||||
if not format_regex:
|
||||
try:
|
||||
format_regex = _TimeRE_cache.compile(format)
|
||||
# KeyError raised when a bad format is found; can be specified as
|
||||
# \\, in which case it was a stray % but with a space after it
|
||||
except KeyError as err:
|
||||
bad_directive = err.args[0]
|
||||
if bad_directive == "\\":
|
||||
bad_directive = "%"
|
||||
del err
|
||||
raise ValueError("'%s' is a bad directive in format '%s'" %
|
||||
(bad_directive, format)) from None
|
||||
# IndexError only occurs when the format string is "%"
|
||||
except IndexError:
|
||||
raise ValueError("stray %% in format '%s'" % format) from None
|
||||
_regex_cache[format] = format_regex
|
||||
found = format_regex.match(data_string)
|
||||
if not found:
|
||||
raise ValueError("time data %r does not match format %r" %
|
||||
(data_string, format))
|
||||
if len(data_string) != found.end():
|
||||
raise ValueError("unconverted data remains: %s" %
|
||||
data_string[found.end():])
|
||||
|
||||
iso_year = year = None
|
||||
month = day = 1
|
||||
hour = minute = second = fraction = 0
|
||||
tz = -1
|
||||
tzoffset = None
|
||||
# Default to -1 to signify that values not known; not critical to have,
|
||||
# though
|
||||
iso_week = week_of_year = None
|
||||
week_of_year_start = None
|
||||
# weekday and julian defaulted to None so as to signal need to calculate
|
||||
# values
|
||||
weekday = julian = None
|
||||
found_dict = found.groupdict()
|
||||
for group_key in found_dict.keys():
|
||||
# Directives not explicitly handled below:
|
||||
# c, x, X
|
||||
# handled by making out of other directives
|
||||
# U, W
|
||||
# worthless without day of the week
|
||||
if group_key == 'y':
|
||||
year = int(found_dict['y'])
|
||||
# Open Group specification for strptime() states that a %y
|
||||
#value in the range of [00, 68] is in the century 2000, while
|
||||
#[69,99] is in the century 1900
|
||||
if year <= 68:
|
||||
year += 2000
|
||||
else:
|
||||
year += 1900
|
||||
elif group_key == 'Y':
|
||||
year = int(found_dict['Y'])
|
||||
elif group_key == 'G':
|
||||
iso_year = int(found_dict['G'])
|
||||
elif group_key == 'm':
|
||||
month = int(found_dict['m'])
|
||||
elif group_key == 'B':
|
||||
month = locale_time.f_month.index(found_dict['B'].lower())
|
||||
elif group_key == 'b':
|
||||
month = locale_time.a_month.index(found_dict['b'].lower())
|
||||
elif group_key == 'd':
|
||||
day = int(found_dict['d'])
|
||||
elif group_key == 'H':
|
||||
hour = int(found_dict['H'])
|
||||
elif group_key == 'I':
|
||||
hour = int(found_dict['I'])
|
||||
ampm = found_dict.get('p', '').lower()
|
||||
# If there was no AM/PM indicator, we'll treat this like AM
|
||||
if ampm in ('', locale_time.am_pm[0]):
|
||||
# We're in AM so the hour is correct unless we're
|
||||
# looking at 12 midnight.
|
||||
# 12 midnight == 12 AM == hour 0
|
||||
if hour == 12:
|
||||
hour = 0
|
||||
elif ampm == locale_time.am_pm[1]:
|
||||
# We're in PM so we need to add 12 to the hour unless
|
||||
# we're looking at 12 noon.
|
||||
# 12 noon == 12 PM == hour 12
|
||||
if hour != 12:
|
||||
hour += 12
|
||||
elif group_key == 'M':
|
||||
minute = int(found_dict['M'])
|
||||
elif group_key == 'S':
|
||||
second = int(found_dict['S'])
|
||||
elif group_key == 'f':
|
||||
s = found_dict['f']
|
||||
# Pad to always return microseconds.
|
||||
s += "0" * (6 - len(s))
|
||||
fraction = int(s)
|
||||
elif group_key == 'A':
|
||||
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
|
||||
elif group_key == 'a':
|
||||
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
|
||||
elif group_key == 'w':
|
||||
weekday = int(found_dict['w'])
|
||||
if weekday == 0:
|
||||
weekday = 6
|
||||
else:
|
||||
weekday -= 1
|
||||
elif group_key == 'u':
|
||||
weekday = int(found_dict['u'])
|
||||
weekday -= 1
|
||||
elif group_key == 'j':
|
||||
julian = int(found_dict['j'])
|
||||
elif group_key in ('U', 'W'):
|
||||
week_of_year = int(found_dict[group_key])
|
||||
if group_key == 'U':
|
||||
# U starts week on Sunday.
|
||||
week_of_year_start = 6
|
||||
else:
|
||||
# W starts week on Monday.
|
||||
week_of_year_start = 0
|
||||
elif group_key == 'V':
|
||||
iso_week = int(found_dict['V'])
|
||||
elif group_key == 'z':
|
||||
z = found_dict['z']
|
||||
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
|
||||
if z.startswith("-"):
|
||||
tzoffset = -tzoffset
|
||||
elif group_key == 'Z':
|
||||
# Since -1 is default value only need to worry about setting tz if
|
||||
# it can be something other than -1.
|
||||
found_zone = found_dict['Z'].lower()
|
||||
for value, tz_values in enumerate(locale_time.timezone):
|
||||
if found_zone in tz_values:
|
||||
# Deal with bad locale setup where timezone names are the
|
||||
# same and yet time.daylight is true; too ambiguous to
|
||||
# be able to tell what timezone has daylight savings
|
||||
if (time.tzname[0] == time.tzname[1] and
|
||||
time.daylight and found_zone not in ("utc", "gmt")):
|
||||
break
|
||||
else:
|
||||
tz = value
|
||||
break
|
||||
# Deal with the cases where ambiguities arize
|
||||
# don't assume default values for ISO week/year
|
||||
if year is None and iso_year is not None:
|
||||
if iso_week is None or weekday is None:
|
||||
raise ValueError("ISO year directive '%G' must be used with "
|
||||
"the ISO week directive '%V' and a weekday "
|
||||
"directive ('%A', '%a', '%w', or '%u').")
|
||||
if julian is not None:
|
||||
raise ValueError("Day of the year directive '%j' is not "
|
||||
"compatible with ISO year directive '%G'. "
|
||||
"Use '%Y' instead.")
|
||||
elif week_of_year is None and iso_week is not None:
|
||||
if weekday is None:
|
||||
raise ValueError("ISO week directive '%V' must be used with "
|
||||
"the ISO year directive '%G' and a weekday "
|
||||
"directive ('%A', '%a', '%w', or '%u').")
|
||||
else:
|
||||
raise ValueError("ISO week directive '%V' is incompatible with "
|
||||
"the year directive '%Y'. Use the ISO year '%G' "
|
||||
"instead.")
|
||||
|
||||
leap_year_fix = False
|
||||
if year is None and month == 2 and day == 29:
|
||||
year = 1904 # 1904 is first leap year of 20th century
|
||||
leap_year_fix = True
|
||||
elif year is None:
|
||||
year = 1900
|
||||
|
||||
|
||||
# If we know the week of the year and what day of that week, we can figure
|
||||
# out the Julian day of the year.
|
||||
if julian is None and weekday is not None:
|
||||
if week_of_year is not None:
|
||||
week_starts_Mon = True if week_of_year_start == 0 else False
|
||||
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
|
||||
week_starts_Mon)
|
||||
elif iso_year is not None and iso_week is not None:
|
||||
year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
|
||||
if julian is not None and julian <= 0:
|
||||
year -= 1
|
||||
yday = 366 if calendar.isleap(year) else 365
|
||||
julian += yday
|
||||
|
||||
if julian is None:
|
||||
# Cannot pre-calculate datetime_date() since can change in Julian
|
||||
# calculation and thus could have different value for the day of
|
||||
# the week calculation.
|
||||
# Need to add 1 to result since first day of the year is 1, not 0.
|
||||
julian = datetime_date(year, month, day).toordinal() - \
|
||||
datetime_date(year, 1, 1).toordinal() + 1
|
||||
else: # Assume that if they bothered to include Julian day (or if it was
|
||||
# calculated above with year/week/weekday) it will be accurate.
|
||||
datetime_result = datetime_date.fromordinal(
|
||||
(julian - 1) +
|
||||
datetime_date(year, 1, 1).toordinal())
|
||||
year = datetime_result.year
|
||||
month = datetime_result.month
|
||||
day = datetime_result.day
|
||||
if weekday is None:
|
||||
weekday = datetime_date(year, month, day).weekday()
|
||||
# Add timezone info
|
||||
tzname = found_dict.get("Z")
|
||||
if tzoffset is not None:
|
||||
gmtoff = tzoffset * 60
|
||||
else:
|
||||
gmtoff = None
|
||||
|
||||
if leap_year_fix:
|
||||
# the caller didn't supply a year but asked for Feb 29th. We couldn't
|
||||
# use the default of 1900 for computations. We set it back to ensure
|
||||
# that February 29th is smaller than March 1st.
|
||||
year = 1900
|
||||
|
||||
return (year, month, day,
|
||||
hour, minute, second,
|
||||
weekday, julian, tz, tzname, gmtoff), fraction
|
||||
|
||||
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a time struct based on the input string and the
|
||||
format string."""
|
||||
tt = _strptime(data_string, format)[0]
|
||||
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
|
||||
|
||||
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a class cls instance based on the input string and the
|
||||
format string."""
|
||||
tt, fraction = _strptime(data_string, format)
|
||||
tzname, gmtoff = tt[-2:]
|
||||
args = tt[:6] + (fraction,)
|
||||
if gmtoff is not None:
|
||||
tzdelta = datetime_timedelta(seconds=gmtoff)
|
||||
if tzname:
|
||||
tz = datetime_timezone(tzdelta, tzname)
|
||||
else:
|
||||
tz = datetime_timezone(tzdelta)
|
||||
args += (tz,)
|
||||
|
||||
return cls(*args)
|
242
third_party/python/Lib/_threading_local.py
vendored
Normal file
242
third_party/python/Lib/_threading_local.py
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
"""Thread-local objects.
|
||||
|
||||
(Note that this module provides a Python version of the threading.local
|
||||
class. Depending on the version of Python you're using, there may be a
|
||||
faster one available. You should always import the `local` class from
|
||||
`threading`.)
|
||||
|
||||
Thread-local objects support the management of thread-local data.
|
||||
If you have data that you want to be local to a thread, simply create
|
||||
a thread-local object and use its attributes:
|
||||
|
||||
>>> mydata = local()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
You can also access the local-object's dictionary:
|
||||
|
||||
>>> mydata.__dict__
|
||||
{'number': 42}
|
||||
>>> mydata.__dict__.setdefault('widgets', [])
|
||||
[]
|
||||
>>> mydata.widgets
|
||||
[]
|
||||
|
||||
What's important about thread-local objects is that their data are
|
||||
local to a thread. If we access the data in a different thread:
|
||||
|
||||
>>> log = []
|
||||
>>> def f():
|
||||
... items = sorted(mydata.__dict__.items())
|
||||
... log.append(items)
|
||||
... mydata.number = 11
|
||||
... log.append(mydata.number)
|
||||
|
||||
>>> import threading
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
>>> log
|
||||
[[], 11]
|
||||
|
||||
we get different data. Furthermore, changes made in the other thread
|
||||
don't affect data seen in this thread:
|
||||
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
Of course, values you get from a local object, including a __dict__
|
||||
attribute, are for whatever thread was current at the time the
|
||||
attribute was read. For that reason, you generally don't want to save
|
||||
these values across threads, as they apply only to the thread they
|
||||
came from.
|
||||
|
||||
You can create custom local objects by subclassing the local class:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... number = 2
|
||||
... def __init__(self, **kw):
|
||||
... self.__dict__.update(kw)
|
||||
... def squared(self):
|
||||
... return self.number ** 2
|
||||
|
||||
This can be useful to support default values, methods and
|
||||
initialization. Note that if you define an __init__ method, it will be
|
||||
called each time the local object is used in a separate thread. This
|
||||
is necessary to initialize each thread's dictionary.
|
||||
|
||||
Now if we create a local object:
|
||||
|
||||
>>> mydata = MyLocal(color='red')
|
||||
|
||||
Now we have a default number:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
|
||||
an initial color:
|
||||
|
||||
>>> mydata.color
|
||||
'red'
|
||||
>>> del mydata.color
|
||||
|
||||
And a method that operates on the data:
|
||||
|
||||
>>> mydata.squared()
|
||||
4
|
||||
|
||||
As before, we can access the data in a separate thread:
|
||||
|
||||
>>> log = []
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
>>> log
|
||||
[[('color', 'red')], 11]
|
||||
|
||||
without affecting this thread's data:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
>>> mydata.color
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: 'MyLocal' object has no attribute 'color'
|
||||
|
||||
Note that subclasses can define slots, but they are not thread
|
||||
local. They are shared across threads:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... __slots__ = 'number'
|
||||
|
||||
>>> mydata = MyLocal()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.color = 'red'
|
||||
|
||||
So, the separate thread:
|
||||
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
|
||||
affects what we see:
|
||||
|
||||
>>> mydata.number
|
||||
11
|
||||
|
||||
>>> del mydata
|
||||
"""
|
||||
|
||||
from weakref import ref
|
||||
from contextlib import contextmanager
|
||||
|
||||
__all__ = ["local"]
|
||||
|
||||
# We need to use objects from the threading module, but the threading
|
||||
# module may also want to use our `local` class, if support for locals
|
||||
# isn't compiled in to the `thread` module. This creates potential problems
|
||||
# with circular imports. For that reason, we don't import `threading`
|
||||
# until the bottom of this file (a hack sufficient to worm around the
|
||||
# potential problems). Note that all platforms on CPython do have support
|
||||
# for locals in the `thread` module, and there is no circular import problem
|
||||
# then, so problems introduced by fiddling the order of imports here won't
|
||||
# manifest.
|
||||
|
||||
class _localimpl:
|
||||
"""A class managing thread-local dicts"""
|
||||
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
|
||||
|
||||
def __init__(self):
|
||||
# The key used in the Thread objects' attribute dicts.
|
||||
# We keep it a string for speed but make it unlikely to clash with
|
||||
# a "real" attribute.
|
||||
self.key = '_threading_local._localimpl.' + str(id(self))
|
||||
# { id(Thread) -> (ref(Thread), thread-local dict) }
|
||||
self.dicts = {}
|
||||
|
||||
def get_dict(self):
|
||||
"""Return the dict for the current thread. Raises KeyError if none
|
||||
defined."""
|
||||
thread = current_thread()
|
||||
return self.dicts[id(thread)][1]
|
||||
|
||||
def create_dict(self):
|
||||
"""Create a new dict for the current thread, and return it."""
|
||||
localdict = {}
|
||||
key = self.key
|
||||
thread = current_thread()
|
||||
idt = id(thread)
|
||||
def local_deleted(_, key=key):
|
||||
# When the localimpl is deleted, remove the thread attribute.
|
||||
thread = wrthread()
|
||||
if thread is not None:
|
||||
del thread.__dict__[key]
|
||||
def thread_deleted(_, idt=idt):
|
||||
# When the thread is deleted, remove the local dict.
|
||||
# Note that this is suboptimal if the thread object gets
|
||||
# caught in a reference loop. We would like to be called
|
||||
# as soon as the OS-level thread ends instead.
|
||||
local = wrlocal()
|
||||
if local is not None:
|
||||
dct = local.dicts.pop(idt)
|
||||
wrlocal = ref(self, local_deleted)
|
||||
wrthread = ref(thread, thread_deleted)
|
||||
thread.__dict__[key] = wrlocal
|
||||
self.dicts[idt] = wrthread, localdict
|
||||
return localdict
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _patch(self):
|
||||
impl = object.__getattribute__(self, '_local__impl')
|
||||
try:
|
||||
dct = impl.get_dict()
|
||||
except KeyError:
|
||||
dct = impl.create_dict()
|
||||
args, kw = impl.localargs
|
||||
self.__init__(*args, **kw)
|
||||
with impl.locallock:
|
||||
object.__setattr__(self, '__dict__', dct)
|
||||
yield
|
||||
|
||||
|
||||
class local:
|
||||
__slots__ = '_local__impl', '__dict__'
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
if (args or kw) and (cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
self = object.__new__(cls)
|
||||
impl = _localimpl()
|
||||
impl.localargs = (args, kw)
|
||||
impl.locallock = RLock()
|
||||
object.__setattr__(self, '_local__impl', impl)
|
||||
# We need to create the thread dict in anticipation of
|
||||
# __init__ being called, to make sure we don't call it
|
||||
# again ourselves.
|
||||
impl.create_dict()
|
||||
return self
|
||||
|
||||
def __getattribute__(self, name):
|
||||
with _patch(self):
|
||||
return object.__getattribute__(self, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
with _patch(self):
|
||||
return object.__setattr__(self, name, value)
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
with _patch(self):
|
||||
return object.__delattr__(self, name)
|
||||
|
||||
|
||||
from threading import current_thread, RLock
|
196
third_party/python/Lib/_weakrefset.py
vendored
Normal file
196
third_party/python/Lib/_weakrefset.py
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
# Access WeakSet through the weakref module.
|
||||
# This code is separated-out because it is needed
|
||||
# by abc.py to load everything else at startup.
|
||||
|
||||
from _weakref import ref
|
||||
|
||||
__all__ = ['WeakSet']
|
||||
|
||||
|
||||
class _IterationGuard:
|
||||
# This context manager registers itself in the current iterators of the
|
||||
# weak container, such as to delay all removals until the context manager
|
||||
# exits.
|
||||
# This technique should be relatively thread-safe (since sets are).
|
||||
|
||||
def __init__(self, weakcontainer):
|
||||
# Don't create cycles
|
||||
self.weakcontainer = ref(weakcontainer)
|
||||
|
||||
def __enter__(self):
|
||||
w = self.weakcontainer()
|
||||
if w is not None:
|
||||
w._iterating.add(self)
|
||||
return self
|
||||
|
||||
def __exit__(self, e, t, b):
|
||||
w = self.weakcontainer()
|
||||
if w is not None:
|
||||
s = w._iterating
|
||||
s.remove(self)
|
||||
if not s:
|
||||
w._commit_removals()
|
||||
|
||||
|
||||
class WeakSet:
|
||||
def __init__(self, data=None):
|
||||
self.data = set()
|
||||
def _remove(item, selfref=ref(self)):
|
||||
self = selfref()
|
||||
if self is not None:
|
||||
if self._iterating:
|
||||
self._pending_removals.append(item)
|
||||
else:
|
||||
self.data.discard(item)
|
||||
self._remove = _remove
|
||||
# A list of keys to be removed
|
||||
self._pending_removals = []
|
||||
self._iterating = set()
|
||||
if data is not None:
|
||||
self.update(data)
|
||||
|
||||
def _commit_removals(self):
|
||||
l = self._pending_removals
|
||||
discard = self.data.discard
|
||||
while l:
|
||||
discard(l.pop())
|
||||
|
||||
def __iter__(self):
|
||||
with _IterationGuard(self):
|
||||
for itemref in self.data:
|
||||
item = itemref()
|
||||
if item is not None:
|
||||
# Caveat: the iterator will keep a strong reference to
|
||||
# `item` until it is resumed or closed.
|
||||
yield item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data) - len(self._pending_removals)
|
||||
|
||||
def __contains__(self, item):
|
||||
try:
|
||||
wr = ref(item)
|
||||
except TypeError:
|
||||
return False
|
||||
return wr in self.data
|
||||
|
||||
def __reduce__(self):
|
||||
return (self.__class__, (list(self),),
|
||||
getattr(self, '__dict__', None))
|
||||
|
||||
def add(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.add(ref(item, self._remove))
|
||||
|
||||
def clear(self):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.clear()
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
def pop(self):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
while True:
|
||||
try:
|
||||
itemref = self.data.pop()
|
||||
except KeyError:
|
||||
raise KeyError('pop from empty WeakSet')
|
||||
item = itemref()
|
||||
if item is not None:
|
||||
return item
|
||||
|
||||
def remove(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.remove(ref(item))
|
||||
|
||||
def discard(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.discard(ref(item))
|
||||
|
||||
def update(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
for element in other:
|
||||
self.add(element)
|
||||
|
||||
def __ior__(self, other):
|
||||
self.update(other)
|
||||
return self
|
||||
|
||||
def difference(self, other):
|
||||
newset = self.copy()
|
||||
newset.difference_update(other)
|
||||
return newset
|
||||
__sub__ = difference
|
||||
|
||||
def difference_update(self, other):
|
||||
self.__isub__(other)
|
||||
def __isub__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
if self is other:
|
||||
self.data.clear()
|
||||
else:
|
||||
self.data.difference_update(ref(item) for item in other)
|
||||
return self
|
||||
|
||||
def intersection(self, other):
|
||||
return self.__class__(item for item in other if item in self)
|
||||
__and__ = intersection
|
||||
|
||||
def intersection_update(self, other):
|
||||
self.__iand__(other)
|
||||
def __iand__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.intersection_update(ref(item) for item in other)
|
||||
return self
|
||||
|
||||
def issubset(self, other):
|
||||
return self.data.issubset(ref(item) for item in other)
|
||||
__le__ = issubset
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.data < set(ref(item) for item in other)
|
||||
|
||||
def issuperset(self, other):
|
||||
return self.data.issuperset(ref(item) for item in other)
|
||||
__ge__ = issuperset
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.data > set(ref(item) for item in other)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self.data == set(ref(item) for item in other)
|
||||
|
||||
def symmetric_difference(self, other):
|
||||
newset = self.copy()
|
||||
newset.symmetric_difference_update(other)
|
||||
return newset
|
||||
__xor__ = symmetric_difference
|
||||
|
||||
def symmetric_difference_update(self, other):
|
||||
self.__ixor__(other)
|
||||
def __ixor__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
if self is other:
|
||||
self.data.clear()
|
||||
else:
|
||||
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
|
||||
return self
|
||||
|
||||
def union(self, other):
|
||||
return self.__class__(e for s in (self, other) for e in s)
|
||||
__or__ = union
|
||||
|
||||
def isdisjoint(self, other):
|
||||
return len(self.intersection(other)) == 0
|
250
third_party/python/Lib/abc.py
vendored
Normal file
250
third_party/python/Lib/abc.py
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
# Copyright 2007 Google, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Abstract Base Classes (ABCs) according to PEP 3119."""
|
||||
|
||||
from _weakrefset import WeakSet
|
||||
|
||||
|
||||
def abstractmethod(funcobj):
|
||||
"""A decorator indicating abstract methods.
|
||||
|
||||
Requires that the metaclass is ABCMeta or derived from it. A
|
||||
class that has a metaclass derived from ABCMeta cannot be
|
||||
instantiated unless all of its abstract methods are overridden.
|
||||
The abstract methods can be called using any of the normal
|
||||
'super' call mechanisms.
|
||||
|
||||
Usage:
|
||||
|
||||
class C(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def my_abstract_method(self, ...):
|
||||
...
|
||||
"""
|
||||
funcobj.__isabstractmethod__ = True
|
||||
return funcobj
|
||||
|
||||
|
||||
class abstractclassmethod(classmethod):
|
||||
"""
|
||||
A decorator indicating abstract classmethods.
|
||||
|
||||
Similar to abstractmethod.
|
||||
|
||||
Usage:
|
||||
|
||||
class C(metaclass=ABCMeta):
|
||||
@abstractclassmethod
|
||||
def my_abstract_classmethod(cls, ...):
|
||||
...
|
||||
|
||||
'abstractclassmethod' is deprecated. Use 'classmethod' with
|
||||
'abstractmethod' instead.
|
||||
"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
def __init__(self, callable):
|
||||
callable.__isabstractmethod__ = True
|
||||
super().__init__(callable)
|
||||
|
||||
|
||||
class abstractstaticmethod(staticmethod):
|
||||
"""
|
||||
A decorator indicating abstract staticmethods.
|
||||
|
||||
Similar to abstractmethod.
|
||||
|
||||
Usage:
|
||||
|
||||
class C(metaclass=ABCMeta):
|
||||
@abstractstaticmethod
|
||||
def my_abstract_staticmethod(...):
|
||||
...
|
||||
|
||||
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
|
||||
'abstractmethod' instead.
|
||||
"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
def __init__(self, callable):
|
||||
callable.__isabstractmethod__ = True
|
||||
super().__init__(callable)
|
||||
|
||||
|
||||
class abstractproperty(property):
|
||||
"""
|
||||
A decorator indicating abstract properties.
|
||||
|
||||
Requires that the metaclass is ABCMeta or derived from it. A
|
||||
class that has a metaclass derived from ABCMeta cannot be
|
||||
instantiated unless all of its abstract properties are overridden.
|
||||
The abstract properties can be called using any of the normal
|
||||
'super' call mechanisms.
|
||||
|
||||
Usage:
|
||||
|
||||
class C(metaclass=ABCMeta):
|
||||
@abstractproperty
|
||||
def my_abstract_property(self):
|
||||
...
|
||||
|
||||
This defines a read-only property; you can also define a read-write
|
||||
abstract property using the 'long' form of property declaration:
|
||||
|
||||
class C(metaclass=ABCMeta):
|
||||
def getx(self): ...
|
||||
def setx(self, value): ...
|
||||
x = abstractproperty(getx, setx)
|
||||
|
||||
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
|
||||
instead.
|
||||
"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
|
||||
class ABCMeta(type):
|
||||
|
||||
"""Metaclass for defining Abstract Base Classes (ABCs).
|
||||
|
||||
Use this metaclass to create an ABC. An ABC can be subclassed
|
||||
directly, and then acts as a mix-in class. You can also register
|
||||
unrelated concrete classes (even built-in classes) and unrelated
|
||||
ABCs as 'virtual subclasses' -- these and their descendants will
|
||||
be considered subclasses of the registering ABC by the built-in
|
||||
issubclass() function, but the registering ABC won't show up in
|
||||
their MRO (Method Resolution Order) nor will method
|
||||
implementations defined by the registering ABC be callable (not
|
||||
even via super()).
|
||||
|
||||
"""
|
||||
|
||||
# A global counter that is incremented each time a class is
|
||||
# registered as a virtual subclass of anything. It forces the
|
||||
# negative cache to be cleared before its next use.
|
||||
# Note: this counter is private. Use `abc.get_cache_token()` for
|
||||
# external code.
|
||||
_abc_invalidation_counter = 0
|
||||
|
||||
def __new__(mcls, name, bases, namespace, **kwargs):
|
||||
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
|
||||
# Compute set of abstract method names
|
||||
abstracts = {name
|
||||
for name, value in namespace.items()
|
||||
if getattr(value, "__isabstractmethod__", False)}
|
||||
for base in bases:
|
||||
for name in getattr(base, "__abstractmethods__", set()):
|
||||
value = getattr(cls, name, None)
|
||||
if getattr(value, "__isabstractmethod__", False):
|
||||
abstracts.add(name)
|
||||
cls.__abstractmethods__ = frozenset(abstracts)
|
||||
# Set up inheritance registry
|
||||
cls._abc_registry = WeakSet()
|
||||
cls._abc_cache = WeakSet()
|
||||
cls._abc_negative_cache = WeakSet()
|
||||
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||
return cls
|
||||
|
||||
def register(cls, subclass):
|
||||
"""Register a virtual subclass of an ABC.
|
||||
|
||||
Returns the subclass, to allow usage as a class decorator.
|
||||
"""
|
||||
if not isinstance(subclass, type):
|
||||
raise TypeError("Can only register classes")
|
||||
if issubclass(subclass, cls):
|
||||
return subclass # Already a subclass
|
||||
# Subtle: test for cycles *after* testing for "already a subclass";
|
||||
# this means we allow X.register(X) and interpret it as a no-op.
|
||||
if issubclass(cls, subclass):
|
||||
# This would create a cycle, which is bad for the algorithm below
|
||||
raise RuntimeError("Refusing to create an inheritance cycle")
|
||||
cls._abc_registry.add(subclass)
|
||||
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
|
||||
return subclass
|
||||
|
||||
def _dump_registry(cls, file=None):
|
||||
"""Debug helper to print the ABC registry."""
|
||||
print("Class: %s.%s" % (cls.__module__, cls.__qualname__), file=file)
|
||||
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
|
||||
for name in sorted(cls.__dict__):
|
||||
if name.startswith("_abc_"):
|
||||
value = getattr(cls, name)
|
||||
if isinstance(value, WeakSet):
|
||||
value = set(value)
|
||||
print("%s: %r" % (name, value), file=file)
|
||||
|
||||
def __instancecheck__(cls, instance):
|
||||
"""Override for isinstance(instance, cls)."""
|
||||
# Inline the cache checking
|
||||
subclass = instance.__class__
|
||||
if subclass in cls._abc_cache:
|
||||
return True
|
||||
subtype = type(instance)
|
||||
if subtype is subclass:
|
||||
if (cls._abc_negative_cache_version ==
|
||||
ABCMeta._abc_invalidation_counter and
|
||||
subclass in cls._abc_negative_cache):
|
||||
return False
|
||||
# Fall back to the subclass check.
|
||||
return cls.__subclasscheck__(subclass)
|
||||
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
|
||||
|
||||
def __subclasscheck__(cls, subclass):
|
||||
"""Override for issubclass(subclass, cls)."""
|
||||
# Check cache
|
||||
if subclass in cls._abc_cache:
|
||||
return True
|
||||
# Check negative cache; may have to invalidate
|
||||
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
|
||||
# Invalidate the negative cache
|
||||
cls._abc_negative_cache = WeakSet()
|
||||
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||
elif subclass in cls._abc_negative_cache:
|
||||
return False
|
||||
# Check the subclass hook
|
||||
ok = cls.__subclasshook__(subclass)
|
||||
if ok is not NotImplemented:
|
||||
assert isinstance(ok, bool)
|
||||
if ok:
|
||||
cls._abc_cache.add(subclass)
|
||||
else:
|
||||
cls._abc_negative_cache.add(subclass)
|
||||
return ok
|
||||
# Check if it's a direct subclass
|
||||
if cls in getattr(subclass, '__mro__', ()):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# Check if it's a subclass of a registered class (recursive)
|
||||
for rcls in cls._abc_registry:
|
||||
if issubclass(subclass, rcls):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# Check if it's a subclass of a subclass (recursive)
|
||||
for scls in cls.__subclasses__():
|
||||
if issubclass(subclass, scls):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# No dice; update negative cache
|
||||
cls._abc_negative_cache.add(subclass)
|
||||
return False
|
||||
|
||||
|
||||
class ABC(metaclass=ABCMeta):
|
||||
"""Helper class that provides a standard way to create an ABC using
|
||||
inheritance.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def get_cache_token():
|
||||
"""Returns the current ABC cache token.
|
||||
|
||||
The token is an opaque object (supporting equality testing) identifying the
|
||||
current version of the ABC cache for virtual subclasses. The token changes
|
||||
with every call to ``register()`` on any ABC.
|
||||
"""
|
||||
return ABCMeta._abc_invalidation_counter
|
944
third_party/python/Lib/aifc.py
vendored
Normal file
944
third_party/python/Lib/aifc.py
vendored
Normal file
|
@ -0,0 +1,944 @@
|
|||
"""Stuff to parse AIFF-C and AIFF files.
|
||||
|
||||
Unless explicitly stated otherwise, the description below is true
|
||||
both for AIFF-C files and AIFF files.
|
||||
|
||||
An AIFF-C file has the following structure.
|
||||
|
||||
+-----------------+
|
||||
| FORM |
|
||||
+-----------------+
|
||||
| <size> |
|
||||
+----+------------+
|
||||
| | AIFC |
|
||||
| +------------+
|
||||
| | <chunks> |
|
||||
| | . |
|
||||
| | . |
|
||||
| | . |
|
||||
+----+------------+
|
||||
|
||||
An AIFF file has the string "AIFF" instead of "AIFC".
|
||||
|
||||
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
|
||||
big endian order), followed by the data. The size field does not include
|
||||
the size of the 8 byte header.
|
||||
|
||||
The following chunk types are recognized.
|
||||
|
||||
FVER
|
||||
<version number of AIFF-C defining document> (AIFF-C only).
|
||||
MARK
|
||||
<# of markers> (2 bytes)
|
||||
list of markers:
|
||||
<marker ID> (2 bytes, must be > 0)
|
||||
<position> (4 bytes)
|
||||
<marker name> ("pstring")
|
||||
COMM
|
||||
<# of channels> (2 bytes)
|
||||
<# of sound frames> (4 bytes)
|
||||
<size of the samples> (2 bytes)
|
||||
<sampling frequency> (10 bytes, IEEE 80-bit extended
|
||||
floating point)
|
||||
in AIFF-C files only:
|
||||
<compression type> (4 bytes)
|
||||
<human-readable version of compression type> ("pstring")
|
||||
SSND
|
||||
<offset> (4 bytes, not used by this program)
|
||||
<blocksize> (4 bytes, not used by this program)
|
||||
<sound data>
|
||||
|
||||
A pstring consists of 1 byte length, a string of characters, and 0 or 1
|
||||
byte pad to make the total length even.
|
||||
|
||||
Usage.
|
||||
|
||||
Reading AIFF files:
|
||||
f = aifc.open(file, 'r')
|
||||
where file is either the name of a file or an open file pointer.
|
||||
The open file pointer must have methods read(), seek(), and close().
|
||||
In some types of audio files, if the setpos() method is not used,
|
||||
the seek() method is not necessary.
|
||||
|
||||
This returns an instance of a class with the following public methods:
|
||||
getnchannels() -- returns number of audio channels (1 for
|
||||
mono, 2 for stereo)
|
||||
getsampwidth() -- returns sample width in bytes
|
||||
getframerate() -- returns sampling frequency
|
||||
getnframes() -- returns number of audio frames
|
||||
getcomptype() -- returns compression type ('NONE' for AIFF files)
|
||||
getcompname() -- returns human-readable version of
|
||||
compression type ('not compressed' for AIFF files)
|
||||
getparams() -- returns a namedtuple consisting of all of the
|
||||
above in the above order
|
||||
getmarkers() -- get the list of marks in the audio file or None
|
||||
if there are no marks
|
||||
getmark(id) -- get mark with the specified id (raises an error
|
||||
if the mark does not exist)
|
||||
readframes(n) -- returns at most n frames of audio
|
||||
rewind() -- rewind to the beginning of the audio stream
|
||||
setpos(pos) -- seek to the specified position
|
||||
tell() -- return the current position
|
||||
close() -- close the instance (make it unusable)
|
||||
The position returned by tell(), the position given to setpos() and
|
||||
the position of marks are all compatible and have nothing to do with
|
||||
the actual position in the file.
|
||||
The close() method is called automatically when the class instance
|
||||
is destroyed.
|
||||
|
||||
Writing AIFF files:
|
||||
f = aifc.open(file, 'w')
|
||||
where file is either the name of a file or an open file pointer.
|
||||
The open file pointer must have methods write(), tell(), seek(), and
|
||||
close().
|
||||
|
||||
This returns an instance of a class with the following public methods:
|
||||
aiff() -- create an AIFF file (AIFF-C default)
|
||||
aifc() -- create an AIFF-C file
|
||||
setnchannels(n) -- set the number of channels
|
||||
setsampwidth(n) -- set the sample width
|
||||
setframerate(n) -- set the frame rate
|
||||
setnframes(n) -- set the number of frames
|
||||
setcomptype(type, name)
|
||||
-- set the compression type and the
|
||||
human-readable compression type
|
||||
setparams(tuple)
|
||||
-- set all parameters at once
|
||||
setmark(id, pos, name)
|
||||
-- add specified mark to the list of marks
|
||||
tell() -- return current position in output file (useful
|
||||
in combination with setmark())
|
||||
writeframesraw(data)
|
||||
-- write audio frames without pathing up the
|
||||
file header
|
||||
writeframes(data)
|
||||
-- write audio frames and patch up the file header
|
||||
close() -- patch up the file header and close the
|
||||
output file
|
||||
You should set the parameters before the first writeframesraw or
|
||||
writeframes. The total number of frames does not need to be set,
|
||||
but when it is set to the correct value, the header does not have to
|
||||
be patched up.
|
||||
It is best to first set all parameters, perhaps possibly the
|
||||
compression type, and then write audio frames using writeframesraw.
|
||||
When all frames have been written, either call writeframes(b'') or
|
||||
close() to patch up the sizes in the header.
|
||||
Marks can be added anytime. If there are any marks, you must call
|
||||
close() after all frames have been written.
|
||||
The close() method is called automatically when the class instance
|
||||
is destroyed.
|
||||
|
||||
When a file is opened with the extension '.aiff', an AIFF file is
|
||||
written, otherwise an AIFF-C file is written. This default can be
|
||||
changed by calling aiff() or aifc() before the first writeframes or
|
||||
writeframesraw.
|
||||
"""
|
||||
|
||||
import struct
|
||||
import builtins
|
||||
import warnings
|
||||
|
||||
__all__ = ["Error", "open", "openfp"]
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
_AIFC_version = 0xA2805140 # Version 1 of AIFF-C
|
||||
|
||||
def _read_long(file):
|
||||
try:
|
||||
return struct.unpack('>l', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_ulong(file):
|
||||
try:
|
||||
return struct.unpack('>L', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_short(file):
|
||||
try:
|
||||
return struct.unpack('>h', file.read(2))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_ushort(file):
|
||||
try:
|
||||
return struct.unpack('>H', file.read(2))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_string(file):
|
||||
length = ord(file.read(1))
|
||||
if length == 0:
|
||||
data = b''
|
||||
else:
|
||||
data = file.read(length)
|
||||
if length & 1 == 0:
|
||||
dummy = file.read(1)
|
||||
return data
|
||||
|
||||
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
|
||||
|
||||
def _read_float(f): # 10 bytes
|
||||
expon = _read_short(f) # 2 bytes
|
||||
sign = 1
|
||||
if expon < 0:
|
||||
sign = -1
|
||||
expon = expon + 0x8000
|
||||
himant = _read_ulong(f) # 4 bytes
|
||||
lomant = _read_ulong(f) # 4 bytes
|
||||
if expon == himant == lomant == 0:
|
||||
f = 0.0
|
||||
elif expon == 0x7FFF:
|
||||
f = _HUGE_VAL
|
||||
else:
|
||||
expon = expon - 16383
|
||||
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
|
||||
return sign * f
|
||||
|
||||
def _write_short(f, x):
|
||||
f.write(struct.pack('>h', x))
|
||||
|
||||
def _write_ushort(f, x):
|
||||
f.write(struct.pack('>H', x))
|
||||
|
||||
def _write_long(f, x):
|
||||
f.write(struct.pack('>l', x))
|
||||
|
||||
def _write_ulong(f, x):
|
||||
f.write(struct.pack('>L', x))
|
||||
|
||||
def _write_string(f, s):
|
||||
if len(s) > 255:
|
||||
raise ValueError("string exceeds maximum pstring length")
|
||||
f.write(struct.pack('B', len(s)))
|
||||
f.write(s)
|
||||
if len(s) & 1 == 0:
|
||||
f.write(b'\x00')
|
||||
|
||||
def _write_float(f, x):
|
||||
import math
|
||||
if x < 0:
|
||||
sign = 0x8000
|
||||
x = x * -1
|
||||
else:
|
||||
sign = 0
|
||||
if x == 0:
|
||||
expon = 0
|
||||
himant = 0
|
||||
lomant = 0
|
||||
else:
|
||||
fmant, expon = math.frexp(x)
|
||||
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
|
||||
expon = sign|0x7FFF
|
||||
himant = 0
|
||||
lomant = 0
|
||||
else: # Finite
|
||||
expon = expon + 16382
|
||||
if expon < 0: # denormalized
|
||||
fmant = math.ldexp(fmant, expon)
|
||||
expon = 0
|
||||
expon = expon | sign
|
||||
fmant = math.ldexp(fmant, 32)
|
||||
fsmant = math.floor(fmant)
|
||||
himant = int(fsmant)
|
||||
fmant = math.ldexp(fmant - fsmant, 32)
|
||||
fsmant = math.floor(fmant)
|
||||
lomant = int(fsmant)
|
||||
_write_ushort(f, expon)
|
||||
_write_ulong(f, himant)
|
||||
_write_ulong(f, lomant)
|
||||
|
||||
from chunk import Chunk
|
||||
from collections import namedtuple
|
||||
|
||||
_aifc_params = namedtuple('_aifc_params',
|
||||
'nchannels sampwidth framerate nframes comptype compname')
|
||||
|
||||
_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)'
|
||||
_aifc_params.sampwidth.__doc__ = 'Sample width in bytes'
|
||||
_aifc_params.framerate.__doc__ = 'Sampling frequency'
|
||||
_aifc_params.nframes.__doc__ = 'Number of audio frames'
|
||||
_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)'
|
||||
_aifc_params.compname.__doc__ = ("""\
|
||||
A human-readable version of the compression type
|
||||
('not compressed' for AIFF files)""")
|
||||
|
||||
|
||||
class Aifc_read:
|
||||
# Variables used in this class:
|
||||
#
|
||||
# These variables are available to the user though appropriate
|
||||
# methods of this class:
|
||||
# _file -- the open file with methods read(), close(), and seek()
|
||||
# set through the __init__() method
|
||||
# _nchannels -- the number of audio channels
|
||||
# available through the getnchannels() method
|
||||
# _nframes -- the number of audio frames
|
||||
# available through the getnframes() method
|
||||
# _sampwidth -- the number of bytes per audio sample
|
||||
# available through the getsampwidth() method
|
||||
# _framerate -- the sampling frequency
|
||||
# available through the getframerate() method
|
||||
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
|
||||
# available through the getcomptype() method
|
||||
# _compname -- the human-readable AIFF-C compression type
|
||||
# available through the getcomptype() method
|
||||
# _markers -- the marks in the audio file
|
||||
# available through the getmarkers() and getmark()
|
||||
# methods
|
||||
# _soundpos -- the position in the audio stream
|
||||
# available through the tell() method, set through the
|
||||
# setpos() method
|
||||
#
|
||||
# These variables are used internally only:
|
||||
# _version -- the AIFF-C version number
|
||||
# _decomp -- the decompressor from builtin module cl
|
||||
# _comm_chunk_read -- 1 iff the COMM chunk has been read
|
||||
# _aifc -- 1 iff reading an AIFF-C file
|
||||
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
|
||||
# file for readframes()
|
||||
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
|
||||
# _framesize -- size of one frame in the file
|
||||
|
||||
_file = None # Set here since __del__ checks it
|
||||
|
||||
def initfp(self, file):
|
||||
self._version = 0
|
||||
self._convert = None
|
||||
self._markers = []
|
||||
self._soundpos = 0
|
||||
self._file = file
|
||||
chunk = Chunk(file)
|
||||
if chunk.getname() != b'FORM':
|
||||
raise Error('file does not start with FORM id')
|
||||
formdata = chunk.read(4)
|
||||
if formdata == b'AIFF':
|
||||
self._aifc = 0
|
||||
elif formdata == b'AIFC':
|
||||
self._aifc = 1
|
||||
else:
|
||||
raise Error('not an AIFF or AIFF-C file')
|
||||
self._comm_chunk_read = 0
|
||||
self._ssnd_chunk = None
|
||||
while 1:
|
||||
self._ssnd_seek_needed = 1
|
||||
try:
|
||||
chunk = Chunk(self._file)
|
||||
except EOFError:
|
||||
break
|
||||
chunkname = chunk.getname()
|
||||
if chunkname == b'COMM':
|
||||
self._read_comm_chunk(chunk)
|
||||
self._comm_chunk_read = 1
|
||||
elif chunkname == b'SSND':
|
||||
self._ssnd_chunk = chunk
|
||||
dummy = chunk.read(8)
|
||||
self._ssnd_seek_needed = 0
|
||||
elif chunkname == b'FVER':
|
||||
self._version = _read_ulong(chunk)
|
||||
elif chunkname == b'MARK':
|
||||
self._readmark(chunk)
|
||||
chunk.skip()
|
||||
if not self._comm_chunk_read or not self._ssnd_chunk:
|
||||
raise Error('COMM chunk and/or SSND chunk missing')
|
||||
|
||||
def __init__(self, f):
|
||||
if isinstance(f, str):
|
||||
file_object = builtins.open(f, 'rb')
|
||||
try:
|
||||
self.initfp(file_object)
|
||||
except:
|
||||
file_object.close()
|
||||
raise
|
||||
else:
|
||||
# assume it is an open file object already
|
||||
self.initfp(f)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
#
|
||||
# User visible methods.
|
||||
#
|
||||
def getfp(self):
|
||||
return self._file
|
||||
|
||||
def rewind(self):
|
||||
self._ssnd_seek_needed = 1
|
||||
self._soundpos = 0
|
||||
|
||||
def close(self):
|
||||
file = self._file
|
||||
if file is not None:
|
||||
self._file = None
|
||||
file.close()
|
||||
|
||||
def tell(self):
|
||||
return self._soundpos
|
||||
|
||||
def getnchannels(self):
|
||||
return self._nchannels
|
||||
|
||||
def getnframes(self):
|
||||
return self._nframes
|
||||
|
||||
def getsampwidth(self):
|
||||
return self._sampwidth
|
||||
|
||||
def getframerate(self):
|
||||
return self._framerate
|
||||
|
||||
def getcomptype(self):
|
||||
return self._comptype
|
||||
|
||||
def getcompname(self):
|
||||
return self._compname
|
||||
|
||||
## def getversion(self):
|
||||
## return self._version
|
||||
|
||||
def getparams(self):
|
||||
return _aifc_params(self.getnchannels(), self.getsampwidth(),
|
||||
self.getframerate(), self.getnframes(),
|
||||
self.getcomptype(), self.getcompname())
|
||||
|
||||
def getmarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return None
|
||||
return self._markers
|
||||
|
||||
def getmark(self, id):
|
||||
for marker in self._markers:
|
||||
if id == marker[0]:
|
||||
return marker
|
||||
raise Error('marker {0!r} does not exist'.format(id))
|
||||
|
||||
def setpos(self, pos):
|
||||
if pos < 0 or pos > self._nframes:
|
||||
raise Error('position not in range')
|
||||
self._soundpos = pos
|
||||
self._ssnd_seek_needed = 1
|
||||
|
||||
def readframes(self, nframes):
|
||||
if self._ssnd_seek_needed:
|
||||
self._ssnd_chunk.seek(0)
|
||||
dummy = self._ssnd_chunk.read(8)
|
||||
pos = self._soundpos * self._framesize
|
||||
if pos:
|
||||
self._ssnd_chunk.seek(pos + 8)
|
||||
self._ssnd_seek_needed = 0
|
||||
if nframes == 0:
|
||||
return b''
|
||||
data = self._ssnd_chunk.read(nframes * self._framesize)
|
||||
if self._convert and data:
|
||||
data = self._convert(data)
|
||||
self._soundpos = self._soundpos + len(data) // (self._nchannels
|
||||
* self._sampwidth)
|
||||
return data
|
||||
|
||||
#
|
||||
# Internal methods.
|
||||
#
|
||||
|
||||
def _alaw2lin(self, data):
|
||||
import audioop
|
||||
return audioop.alaw2lin(data, 2)
|
||||
|
||||
def _ulaw2lin(self, data):
|
||||
import audioop
|
||||
return audioop.ulaw2lin(data, 2)
|
||||
|
||||
def _adpcm2lin(self, data):
|
||||
import audioop
|
||||
if not hasattr(self, '_adpcmstate'):
|
||||
# first time
|
||||
self._adpcmstate = None
|
||||
data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
|
||||
return data
|
||||
|
||||
def _read_comm_chunk(self, chunk):
|
||||
self._nchannels = _read_short(chunk)
|
||||
self._nframes = _read_long(chunk)
|
||||
self._sampwidth = (_read_short(chunk) + 7) // 8
|
||||
self._framerate = int(_read_float(chunk))
|
||||
self._framesize = self._nchannels * self._sampwidth
|
||||
if self._aifc:
|
||||
#DEBUG: SGI's soundeditor produces a bad size :-(
|
||||
kludge = 0
|
||||
if chunk.chunksize == 18:
|
||||
kludge = 1
|
||||
warnings.warn('Warning: bad COMM chunk size')
|
||||
chunk.chunksize = 23
|
||||
#DEBUG end
|
||||
self._comptype = chunk.read(4)
|
||||
#DEBUG start
|
||||
if kludge:
|
||||
length = ord(chunk.file.read(1))
|
||||
if length & 1 == 0:
|
||||
length = length + 1
|
||||
chunk.chunksize = chunk.chunksize + length
|
||||
chunk.file.seek(-1, 1)
|
||||
#DEBUG end
|
||||
self._compname = _read_string(chunk)
|
||||
if self._comptype != b'NONE':
|
||||
if self._comptype == b'G722':
|
||||
self._convert = self._adpcm2lin
|
||||
elif self._comptype in (b'ulaw', b'ULAW'):
|
||||
self._convert = self._ulaw2lin
|
||||
elif self._comptype in (b'alaw', b'ALAW'):
|
||||
self._convert = self._alaw2lin
|
||||
else:
|
||||
raise Error('unsupported compression type')
|
||||
self._sampwidth = 2
|
||||
else:
|
||||
self._comptype = b'NONE'
|
||||
self._compname = b'not compressed'
|
||||
|
||||
def _readmark(self, chunk):
|
||||
nmarkers = _read_short(chunk)
|
||||
# Some files appear to contain invalid counts.
|
||||
# Cope with this by testing for EOF.
|
||||
try:
|
||||
for i in range(nmarkers):
|
||||
id = _read_short(chunk)
|
||||
pos = _read_long(chunk)
|
||||
name = _read_string(chunk)
|
||||
if pos or name:
|
||||
# some files appear to have
|
||||
# dummy markers consisting of
|
||||
# a position 0 and name ''
|
||||
self._markers.append((id, pos, name))
|
||||
except EOFError:
|
||||
w = ('Warning: MARK chunk contains only %s marker%s instead of %s' %
|
||||
(len(self._markers), '' if len(self._markers) == 1 else 's',
|
||||
nmarkers))
|
||||
warnings.warn(w)
|
||||
|
||||
class Aifc_write:
|
||||
# Variables used in this class:
|
||||
#
|
||||
# These variables are user settable through appropriate methods
|
||||
# of this class:
|
||||
# _file -- the open file with methods write(), close(), tell(), seek()
|
||||
# set through the __init__() method
|
||||
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
|
||||
# set through the setcomptype() or setparams() method
|
||||
# _compname -- the human-readable AIFF-C compression type
|
||||
# set through the setcomptype() or setparams() method
|
||||
# _nchannels -- the number of audio channels
|
||||
# set through the setnchannels() or setparams() method
|
||||
# _sampwidth -- the number of bytes per audio sample
|
||||
# set through the setsampwidth() or setparams() method
|
||||
# _framerate -- the sampling frequency
|
||||
# set through the setframerate() or setparams() method
|
||||
# _nframes -- the number of audio frames written to the header
|
||||
# set through the setnframes() or setparams() method
|
||||
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
|
||||
# set through the aifc() method, reset through the
|
||||
# aiff() method
|
||||
#
|
||||
# These variables are used internally only:
|
||||
# _version -- the AIFF-C version number
|
||||
# _comp -- the compressor from builtin module cl
|
||||
# _nframeswritten -- the number of audio frames actually written
|
||||
# _datalength -- the size of the audio samples written to the header
|
||||
# _datawritten -- the size of the audio samples actually written
|
||||
|
||||
_file = None # Set here since __del__ checks it
|
||||
|
||||
def __init__(self, f):
|
||||
if isinstance(f, str):
|
||||
file_object = builtins.open(f, 'wb')
|
||||
try:
|
||||
self.initfp(file_object)
|
||||
except:
|
||||
file_object.close()
|
||||
raise
|
||||
|
||||
# treat .aiff file extensions as non-compressed audio
|
||||
if f.endswith('.aiff'):
|
||||
self._aifc = 0
|
||||
else:
|
||||
# assume it is an open file object already
|
||||
self.initfp(f)
|
||||
|
||||
def initfp(self, file):
|
||||
self._file = file
|
||||
self._version = _AIFC_version
|
||||
self._comptype = b'NONE'
|
||||
self._compname = b'not compressed'
|
||||
self._convert = None
|
||||
self._nchannels = 0
|
||||
self._sampwidth = 0
|
||||
self._framerate = 0
|
||||
self._nframes = 0
|
||||
self._nframeswritten = 0
|
||||
self._datawritten = 0
|
||||
self._datalength = 0
|
||||
self._markers = []
|
||||
self._marklength = 0
|
||||
self._aifc = 1 # AIFF-C is default
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
#
|
||||
# User visible methods.
|
||||
#
|
||||
def aiff(self):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
self._aifc = 0
|
||||
|
||||
def aifc(self):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
self._aifc = 1
|
||||
|
||||
def setnchannels(self, nchannels):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if nchannels < 1:
|
||||
raise Error('bad # of channels')
|
||||
self._nchannels = nchannels
|
||||
|
||||
def getnchannels(self):
|
||||
if not self._nchannels:
|
||||
raise Error('number of channels not set')
|
||||
return self._nchannels
|
||||
|
||||
def setsampwidth(self, sampwidth):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if sampwidth < 1 or sampwidth > 4:
|
||||
raise Error('bad sample width')
|
||||
self._sampwidth = sampwidth
|
||||
|
||||
def getsampwidth(self):
|
||||
if not self._sampwidth:
|
||||
raise Error('sample width not set')
|
||||
return self._sampwidth
|
||||
|
||||
def setframerate(self, framerate):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if framerate <= 0:
|
||||
raise Error('bad frame rate')
|
||||
self._framerate = framerate
|
||||
|
||||
def getframerate(self):
|
||||
if not self._framerate:
|
||||
raise Error('frame rate not set')
|
||||
return self._framerate
|
||||
|
||||
def setnframes(self, nframes):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
self._nframes = nframes
|
||||
|
||||
def getnframes(self):
|
||||
return self._nframeswritten
|
||||
|
||||
def setcomptype(self, comptype, compname):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if comptype not in (b'NONE', b'ulaw', b'ULAW',
|
||||
b'alaw', b'ALAW', b'G722'):
|
||||
raise Error('unsupported compression type')
|
||||
self._comptype = comptype
|
||||
self._compname = compname
|
||||
|
||||
def getcomptype(self):
|
||||
return self._comptype
|
||||
|
||||
def getcompname(self):
|
||||
return self._compname
|
||||
|
||||
## def setversion(self, version):
|
||||
## if self._nframeswritten:
|
||||
## raise Error, 'cannot change parameters after starting to write'
|
||||
## self._version = version
|
||||
|
||||
def setparams(self, params):
|
||||
nchannels, sampwidth, framerate, nframes, comptype, compname = params
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if comptype not in (b'NONE', b'ulaw', b'ULAW',
|
||||
b'alaw', b'ALAW', b'G722'):
|
||||
raise Error('unsupported compression type')
|
||||
self.setnchannels(nchannels)
|
||||
self.setsampwidth(sampwidth)
|
||||
self.setframerate(framerate)
|
||||
self.setnframes(nframes)
|
||||
self.setcomptype(comptype, compname)
|
||||
|
||||
def getparams(self):
|
||||
if not self._nchannels or not self._sampwidth or not self._framerate:
|
||||
raise Error('not all parameters set')
|
||||
return _aifc_params(self._nchannels, self._sampwidth, self._framerate,
|
||||
self._nframes, self._comptype, self._compname)
|
||||
|
||||
def setmark(self, id, pos, name):
|
||||
if id <= 0:
|
||||
raise Error('marker ID must be > 0')
|
||||
if pos < 0:
|
||||
raise Error('marker position must be >= 0')
|
||||
if not isinstance(name, bytes):
|
||||
raise Error('marker name must be bytes')
|
||||
for i in range(len(self._markers)):
|
||||
if id == self._markers[i][0]:
|
||||
self._markers[i] = id, pos, name
|
||||
return
|
||||
self._markers.append((id, pos, name))
|
||||
|
||||
def getmark(self, id):
|
||||
for marker in self._markers:
|
||||
if id == marker[0]:
|
||||
return marker
|
||||
raise Error('marker {0!r} does not exist'.format(id))
|
||||
|
||||
def getmarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return None
|
||||
return self._markers
|
||||
|
||||
def tell(self):
|
||||
return self._nframeswritten
|
||||
|
||||
def writeframesraw(self, data):
|
||||
if not isinstance(data, (bytes, bytearray)):
|
||||
data = memoryview(data).cast('B')
|
||||
self._ensure_header_written(len(data))
|
||||
nframes = len(data) // (self._sampwidth * self._nchannels)
|
||||
if self._convert:
|
||||
data = self._convert(data)
|
||||
self._file.write(data)
|
||||
self._nframeswritten = self._nframeswritten + nframes
|
||||
self._datawritten = self._datawritten + len(data)
|
||||
|
||||
def writeframes(self, data):
|
||||
self.writeframesraw(data)
|
||||
if self._nframeswritten != self._nframes or \
|
||||
self._datalength != self._datawritten:
|
||||
self._patchheader()
|
||||
|
||||
def close(self):
|
||||
if self._file is None:
|
||||
return
|
||||
try:
|
||||
self._ensure_header_written(0)
|
||||
if self._datawritten & 1:
|
||||
# quick pad to even size
|
||||
self._file.write(b'\x00')
|
||||
self._datawritten = self._datawritten + 1
|
||||
self._writemarkers()
|
||||
if self._nframeswritten != self._nframes or \
|
||||
self._datalength != self._datawritten or \
|
||||
self._marklength:
|
||||
self._patchheader()
|
||||
finally:
|
||||
# Prevent ref cycles
|
||||
self._convert = None
|
||||
f = self._file
|
||||
self._file = None
|
||||
f.close()
|
||||
|
||||
#
|
||||
# Internal methods.
|
||||
#
|
||||
|
||||
def _lin2alaw(self, data):
|
||||
import audioop
|
||||
return audioop.lin2alaw(data, 2)
|
||||
|
||||
def _lin2ulaw(self, data):
|
||||
import audioop
|
||||
return audioop.lin2ulaw(data, 2)
|
||||
|
||||
def _lin2adpcm(self, data):
|
||||
import audioop
|
||||
if not hasattr(self, '_adpcmstate'):
|
||||
self._adpcmstate = None
|
||||
data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
|
||||
return data
|
||||
|
||||
def _ensure_header_written(self, datasize):
|
||||
if not self._nframeswritten:
|
||||
if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
|
||||
if not self._sampwidth:
|
||||
self._sampwidth = 2
|
||||
if self._sampwidth != 2:
|
||||
raise Error('sample width must be 2 when compressing '
|
||||
'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)')
|
||||
if not self._nchannels:
|
||||
raise Error('# channels not specified')
|
||||
if not self._sampwidth:
|
||||
raise Error('sample width not specified')
|
||||
if not self._framerate:
|
||||
raise Error('sampling rate not specified')
|
||||
self._write_header(datasize)
|
||||
|
||||
def _init_compression(self):
|
||||
if self._comptype == b'G722':
|
||||
self._convert = self._lin2adpcm
|
||||
elif self._comptype in (b'ulaw', b'ULAW'):
|
||||
self._convert = self._lin2ulaw
|
||||
elif self._comptype in (b'alaw', b'ALAW'):
|
||||
self._convert = self._lin2alaw
|
||||
|
||||
def _write_header(self, initlength):
|
||||
if self._aifc and self._comptype != b'NONE':
|
||||
self._init_compression()
|
||||
self._file.write(b'FORM')
|
||||
if not self._nframes:
|
||||
self._nframes = initlength // (self._nchannels * self._sampwidth)
|
||||
self._datalength = self._nframes * self._nchannels * self._sampwidth
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
if self._aifc:
|
||||
if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'):
|
||||
self._datalength = self._datalength // 2
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
elif self._comptype == b'G722':
|
||||
self._datalength = (self._datalength + 3) // 4
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
try:
|
||||
self._form_length_pos = self._file.tell()
|
||||
except (AttributeError, OSError):
|
||||
self._form_length_pos = None
|
||||
commlength = self._write_form_length(self._datalength)
|
||||
if self._aifc:
|
||||
self._file.write(b'AIFC')
|
||||
self._file.write(b'FVER')
|
||||
_write_ulong(self._file, 4)
|
||||
_write_ulong(self._file, self._version)
|
||||
else:
|
||||
self._file.write(b'AIFF')
|
||||
self._file.write(b'COMM')
|
||||
_write_ulong(self._file, commlength)
|
||||
_write_short(self._file, self._nchannels)
|
||||
if self._form_length_pos is not None:
|
||||
self._nframes_pos = self._file.tell()
|
||||
_write_ulong(self._file, self._nframes)
|
||||
if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
|
||||
_write_short(self._file, 8)
|
||||
else:
|
||||
_write_short(self._file, self._sampwidth * 8)
|
||||
_write_float(self._file, self._framerate)
|
||||
if self._aifc:
|
||||
self._file.write(self._comptype)
|
||||
_write_string(self._file, self._compname)
|
||||
self._file.write(b'SSND')
|
||||
if self._form_length_pos is not None:
|
||||
self._ssnd_length_pos = self._file.tell()
|
||||
_write_ulong(self._file, self._datalength + 8)
|
||||
_write_ulong(self._file, 0)
|
||||
_write_ulong(self._file, 0)
|
||||
|
||||
def _write_form_length(self, datalength):
|
||||
if self._aifc:
|
||||
commlength = 18 + 5 + len(self._compname)
|
||||
if commlength & 1:
|
||||
commlength = commlength + 1
|
||||
verslength = 12
|
||||
else:
|
||||
commlength = 18
|
||||
verslength = 0
|
||||
_write_ulong(self._file, 4 + verslength + self._marklength + \
|
||||
8 + commlength + 16 + datalength)
|
||||
return commlength
|
||||
|
||||
def _patchheader(self):
|
||||
curpos = self._file.tell()
|
||||
if self._datawritten & 1:
|
||||
datalength = self._datawritten + 1
|
||||
self._file.write(b'\x00')
|
||||
else:
|
||||
datalength = self._datawritten
|
||||
if datalength == self._datalength and \
|
||||
self._nframes == self._nframeswritten and \
|
||||
self._marklength == 0:
|
||||
self._file.seek(curpos, 0)
|
||||
return
|
||||
self._file.seek(self._form_length_pos, 0)
|
||||
dummy = self._write_form_length(datalength)
|
||||
self._file.seek(self._nframes_pos, 0)
|
||||
_write_ulong(self._file, self._nframeswritten)
|
||||
self._file.seek(self._ssnd_length_pos, 0)
|
||||
_write_ulong(self._file, datalength + 8)
|
||||
self._file.seek(curpos, 0)
|
||||
self._nframes = self._nframeswritten
|
||||
self._datalength = datalength
|
||||
|
||||
def _writemarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return
|
||||
self._file.write(b'MARK')
|
||||
length = 2
|
||||
for marker in self._markers:
|
||||
id, pos, name = marker
|
||||
length = length + len(name) + 1 + 6
|
||||
if len(name) & 1 == 0:
|
||||
length = length + 1
|
||||
_write_ulong(self._file, length)
|
||||
self._marklength = length + 8
|
||||
_write_short(self._file, len(self._markers))
|
||||
for marker in self._markers:
|
||||
id, pos, name = marker
|
||||
_write_short(self._file, id)
|
||||
_write_ulong(self._file, pos)
|
||||
_write_string(self._file, name)
|
||||
|
||||
def open(f, mode=None):
|
||||
if mode is None:
|
||||
if hasattr(f, 'mode'):
|
||||
mode = f.mode
|
||||
else:
|
||||
mode = 'rb'
|
||||
if mode in ('r', 'rb'):
|
||||
return Aifc_read(f)
|
||||
elif mode in ('w', 'wb'):
|
||||
return Aifc_write(f)
|
||||
else:
|
||||
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
|
||||
|
||||
openfp = open # B/W compatibility
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
if not sys.argv[1:]:
|
||||
sys.argv.append('/usr/demos/data/audio/bach.aiff')
|
||||
fn = sys.argv[1]
|
||||
with open(fn, 'r') as f:
|
||||
print("Reading", fn)
|
||||
print("nchannels =", f.getnchannels())
|
||||
print("nframes =", f.getnframes())
|
||||
print("sampwidth =", f.getsampwidth())
|
||||
print("framerate =", f.getframerate())
|
||||
print("comptype =", f.getcomptype())
|
||||
print("compname =", f.getcompname())
|
||||
if sys.argv[2:]:
|
||||
gn = sys.argv[2]
|
||||
print("Writing", gn)
|
||||
with open(gn, 'w') as g:
|
||||
g.setparams(f.getparams())
|
||||
while 1:
|
||||
data = f.readframes(1024)
|
||||
if not data:
|
||||
break
|
||||
g.writeframes(data)
|
||||
print("Done.")
|
17
third_party/python/Lib/antigravity.py
vendored
Normal file
17
third_party/python/Lib/antigravity.py
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
|
||||
import webbrowser
|
||||
import hashlib
|
||||
|
||||
webbrowser.open("https://xkcd.com/353/")
|
||||
|
||||
def geohash(latitude, longitude, datedow):
|
||||
'''Compute geohash() using the Munroe algorithm.
|
||||
|
||||
>>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
|
||||
37.857713 -122.544543
|
||||
|
||||
'''
|
||||
# https://xkcd.com/426/
|
||||
h = hashlib.md5(datedow).hexdigest()
|
||||
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
|
||||
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
|
2393
third_party/python/Lib/argparse.py
vendored
Normal file
2393
third_party/python/Lib/argparse.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
322
third_party/python/Lib/ast.py
vendored
Normal file
322
third_party/python/Lib/ast.py
vendored
Normal file
|
@ -0,0 +1,322 @@
|
|||
"""
|
||||
ast
|
||||
~~~
|
||||
|
||||
The `ast` module helps Python applications to process trees of the Python
|
||||
abstract syntax grammar. The abstract syntax itself might change with
|
||||
each Python release; this module helps to find out programmatically what
|
||||
the current grammar looks like and allows modifications of it.
|
||||
|
||||
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
|
||||
a flag to the `compile()` builtin function or by using the `parse()`
|
||||
function from this module. The result will be a tree of objects whose
|
||||
classes all inherit from `ast.AST`.
|
||||
|
||||
A modified abstract syntax tree can be compiled into a Python code object
|
||||
using the built-in `compile()` function.
|
||||
|
||||
Additionally various helper functions are provided that make working with
|
||||
the trees simpler. The main intention of the helper functions and this
|
||||
module in general is to provide an easy to use interface for libraries
|
||||
that work tightly with the python syntax (template engines for example).
|
||||
|
||||
|
||||
:copyright: Copyright 2008 by Armin Ronacher.
|
||||
:license: Python License.
|
||||
"""
|
||||
from _ast import *
|
||||
|
||||
|
||||
def parse(source, filename='<unknown>', mode='exec'):
|
||||
"""
|
||||
Parse the source into an AST node.
|
||||
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
|
||||
"""
|
||||
return compile(source, filename, mode, PyCF_ONLY_AST)
|
||||
|
||||
|
||||
_NUM_TYPES = (int, float, complex)
|
||||
|
||||
def literal_eval(node_or_string):
|
||||
"""
|
||||
Safely evaluate an expression node or a string containing a Python
|
||||
expression. The string or node provided may only consist of the following
|
||||
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
|
||||
sets, booleans, and None.
|
||||
"""
|
||||
if isinstance(node_or_string, str):
|
||||
node_or_string = parse(node_or_string, mode='eval')
|
||||
if isinstance(node_or_string, Expression):
|
||||
node_or_string = node_or_string.body
|
||||
def _convert(node):
|
||||
if isinstance(node, Constant):
|
||||
return node.value
|
||||
elif isinstance(node, (Str, Bytes)):
|
||||
return node.s
|
||||
elif isinstance(node, Num):
|
||||
return node.n
|
||||
elif isinstance(node, Tuple):
|
||||
return tuple(map(_convert, node.elts))
|
||||
elif isinstance(node, List):
|
||||
return list(map(_convert, node.elts))
|
||||
elif isinstance(node, Set):
|
||||
return set(map(_convert, node.elts))
|
||||
elif isinstance(node, Dict):
|
||||
return dict((_convert(k), _convert(v)) for k, v
|
||||
in zip(node.keys, node.values))
|
||||
elif isinstance(node, NameConstant):
|
||||
return node.value
|
||||
elif isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):
|
||||
operand = _convert(node.operand)
|
||||
if isinstance(operand, _NUM_TYPES):
|
||||
if isinstance(node.op, UAdd):
|
||||
return + operand
|
||||
else:
|
||||
return - operand
|
||||
elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):
|
||||
left = _convert(node.left)
|
||||
right = _convert(node.right)
|
||||
if isinstance(left, _NUM_TYPES) and isinstance(right, _NUM_TYPES):
|
||||
if isinstance(node.op, Add):
|
||||
return left + right
|
||||
else:
|
||||
return left - right
|
||||
raise ValueError('malformed node or string: ' + repr(node))
|
||||
return _convert(node_or_string)
|
||||
|
||||
|
||||
def dump(node, annotate_fields=True, include_attributes=False):
|
||||
"""
|
||||
Return a formatted dump of the tree in *node*. This is mainly useful for
|
||||
debugging purposes. The returned string will show the names and the values
|
||||
for fields. This makes the code impossible to evaluate, so if evaluation is
|
||||
wanted *annotate_fields* must be set to False. Attributes such as line
|
||||
numbers and column offsets are not dumped by default. If this is wanted,
|
||||
*include_attributes* can be set to True.
|
||||
"""
|
||||
def _format(node):
|
||||
if isinstance(node, AST):
|
||||
fields = [(a, _format(b)) for a, b in iter_fields(node)]
|
||||
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
|
||||
('%s=%s' % field for field in fields)
|
||||
if annotate_fields else
|
||||
(b for a, b in fields)
|
||||
))
|
||||
if include_attributes and node._attributes:
|
||||
rv += fields and ', ' or ' '
|
||||
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
|
||||
for a in node._attributes)
|
||||
return rv + ')'
|
||||
elif isinstance(node, list):
|
||||
return '[%s]' % ', '.join(_format(x) for x in node)
|
||||
return repr(node)
|
||||
if not isinstance(node, AST):
|
||||
raise TypeError('expected AST, got %r' % node.__class__.__name__)
|
||||
return _format(node)
|
||||
|
||||
|
||||
def copy_location(new_node, old_node):
|
||||
"""
|
||||
Copy source location (`lineno` and `col_offset` attributes) from
|
||||
*old_node* to *new_node* if possible, and return *new_node*.
|
||||
"""
|
||||
for attr in 'lineno', 'col_offset':
|
||||
if attr in old_node._attributes and attr in new_node._attributes \
|
||||
and hasattr(old_node, attr):
|
||||
setattr(new_node, attr, getattr(old_node, attr))
|
||||
return new_node
|
||||
|
||||
|
||||
def fix_missing_locations(node):
|
||||
"""
|
||||
When you compile a node tree with compile(), the compiler expects lineno and
|
||||
col_offset attributes for every node that supports them. This is rather
|
||||
tedious to fill in for generated nodes, so this helper adds these attributes
|
||||
recursively where not already set, by setting them to the values of the
|
||||
parent node. It works recursively starting at *node*.
|
||||
"""
|
||||
def _fix(node, lineno, col_offset):
|
||||
if 'lineno' in node._attributes:
|
||||
if not hasattr(node, 'lineno'):
|
||||
node.lineno = lineno
|
||||
else:
|
||||
lineno = node.lineno
|
||||
if 'col_offset' in node._attributes:
|
||||
if not hasattr(node, 'col_offset'):
|
||||
node.col_offset = col_offset
|
||||
else:
|
||||
col_offset = node.col_offset
|
||||
for child in iter_child_nodes(node):
|
||||
_fix(child, lineno, col_offset)
|
||||
_fix(node, 1, 0)
|
||||
return node
|
||||
|
||||
|
||||
def increment_lineno(node, n=1):
|
||||
"""
|
||||
Increment the line number of each node in the tree starting at *node* by *n*.
|
||||
This is useful to "move code" to a different location in a file.
|
||||
"""
|
||||
for child in walk(node):
|
||||
if 'lineno' in child._attributes:
|
||||
child.lineno = getattr(child, 'lineno', 0) + n
|
||||
return node
|
||||
|
||||
|
||||
def iter_fields(node):
|
||||
"""
|
||||
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
|
||||
that is present on *node*.
|
||||
"""
|
||||
for field in node._fields:
|
||||
try:
|
||||
yield field, getattr(node, field)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def iter_child_nodes(node):
|
||||
"""
|
||||
Yield all direct child nodes of *node*, that is, all fields that are nodes
|
||||
and all items of fields that are lists of nodes.
|
||||
"""
|
||||
for name, field in iter_fields(node):
|
||||
if isinstance(field, AST):
|
||||
yield field
|
||||
elif isinstance(field, list):
|
||||
for item in field:
|
||||
if isinstance(item, AST):
|
||||
yield item
|
||||
|
||||
|
||||
def get_docstring(node, clean=True):
|
||||
"""
|
||||
Return the docstring for the given node or None if no docstring can
|
||||
be found. If the node provided does not have docstrings a TypeError
|
||||
will be raised.
|
||||
"""
|
||||
if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)):
|
||||
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
|
||||
if not(node.body and isinstance(node.body[0], Expr)):
|
||||
return
|
||||
node = node.body[0].value
|
||||
if isinstance(node, Str):
|
||||
text = node.s
|
||||
elif isinstance(node, Constant) and isinstance(node.value, str):
|
||||
text = node.value
|
||||
else:
|
||||
return
|
||||
if clean:
|
||||
import inspect
|
||||
text = inspect.cleandoc(text)
|
||||
return text
|
||||
|
||||
|
||||
def walk(node):
|
||||
"""
|
||||
Recursively yield all descendant nodes in the tree starting at *node*
|
||||
(including *node* itself), in no specified order. This is useful if you
|
||||
only want to modify nodes in place and don't care about the context.
|
||||
"""
|
||||
from collections import deque
|
||||
todo = deque([node])
|
||||
while todo:
|
||||
node = todo.popleft()
|
||||
todo.extend(iter_child_nodes(node))
|
||||
yield node
|
||||
|
||||
|
||||
class NodeVisitor(object):
|
||||
"""
|
||||
A node visitor base class that walks the abstract syntax tree and calls a
|
||||
visitor function for every node found. This function may return a value
|
||||
which is forwarded by the `visit` method.
|
||||
|
||||
This class is meant to be subclassed, with the subclass adding visitor
|
||||
methods.
|
||||
|
||||
Per default the visitor functions for the nodes are ``'visit_'`` +
|
||||
class name of the node. So a `TryFinally` node visit function would
|
||||
be `visit_TryFinally`. This behavior can be changed by overriding
|
||||
the `visit` method. If no visitor function exists for a node
|
||||
(return value `None`) the `generic_visit` visitor is used instead.
|
||||
|
||||
Don't use the `NodeVisitor` if you want to apply changes to nodes during
|
||||
traversing. For this a special visitor exists (`NodeTransformer`) that
|
||||
allows modifications.
|
||||
"""
|
||||
|
||||
def visit(self, node):
|
||||
"""Visit a node."""
|
||||
method = 'visit_' + node.__class__.__name__
|
||||
visitor = getattr(self, method, self.generic_visit)
|
||||
return visitor(node)
|
||||
|
||||
def generic_visit(self, node):
|
||||
"""Called if no explicit visitor function exists for a node."""
|
||||
for field, value in iter_fields(node):
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
if isinstance(item, AST):
|
||||
self.visit(item)
|
||||
elif isinstance(value, AST):
|
||||
self.visit(value)
|
||||
|
||||
|
||||
class NodeTransformer(NodeVisitor):
|
||||
"""
|
||||
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
|
||||
allows modification of nodes.
|
||||
|
||||
The `NodeTransformer` will walk the AST and use the return value of the
|
||||
visitor methods to replace or remove the old node. If the return value of
|
||||
the visitor method is ``None``, the node will be removed from its location,
|
||||
otherwise it is replaced with the return value. The return value may be the
|
||||
original node in which case no replacement takes place.
|
||||
|
||||
Here is an example transformer that rewrites all occurrences of name lookups
|
||||
(``foo``) to ``data['foo']``::
|
||||
|
||||
class RewriteName(NodeTransformer):
|
||||
|
||||
def visit_Name(self, node):
|
||||
return copy_location(Subscript(
|
||||
value=Name(id='data', ctx=Load()),
|
||||
slice=Index(value=Str(s=node.id)),
|
||||
ctx=node.ctx
|
||||
), node)
|
||||
|
||||
Keep in mind that if the node you're operating on has child nodes you must
|
||||
either transform the child nodes yourself or call the :meth:`generic_visit`
|
||||
method for the node first.
|
||||
|
||||
For nodes that were part of a collection of statements (that applies to all
|
||||
statement nodes), the visitor may also return a list of nodes rather than
|
||||
just a single node.
|
||||
|
||||
Usually you use the transformer like this::
|
||||
|
||||
node = YourTransformer().visit(node)
|
||||
"""
|
||||
|
||||
def generic_visit(self, node):
|
||||
for field, old_value in iter_fields(node):
|
||||
if isinstance(old_value, list):
|
||||
new_values = []
|
||||
for value in old_value:
|
||||
if isinstance(value, AST):
|
||||
value = self.visit(value)
|
||||
if value is None:
|
||||
continue
|
||||
elif not isinstance(value, AST):
|
||||
new_values.extend(value)
|
||||
continue
|
||||
new_values.append(value)
|
||||
old_value[:] = new_values
|
||||
elif isinstance(old_value, AST):
|
||||
new_node = self.visit(old_value)
|
||||
if new_node is None:
|
||||
delattr(node, field)
|
||||
else:
|
||||
setattr(node, field, new_node)
|
||||
return node
|
307
third_party/python/Lib/asynchat.py
vendored
Normal file
307
third_party/python/Lib/asynchat.py
vendored
Normal file
|
@ -0,0 +1,307 @@
|
|||
# -*- Mode: Python; tab-width: 4 -*-
|
||||
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
r"""A class supporting chat-style (command/response) protocols.
|
||||
|
||||
This class adds support for 'chat' style protocols - where one side
|
||||
sends a 'command', and the other sends a response (examples would be
|
||||
the common internet protocols - smtp, nntp, ftp, etc..).
|
||||
|
||||
The handle_read() method looks at the input stream for the current
|
||||
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
||||
for multi-line output), calling self.found_terminator() on its
|
||||
receipt.
|
||||
|
||||
for example:
|
||||
Say you build an async nntp client using this class. At the start
|
||||
of the connection, you'll have self.terminator set to '\r\n', in
|
||||
order to process the single-line greeting. Just before issuing a
|
||||
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
||||
command will be accumulated (using your own 'collect_incoming_data'
|
||||
method) up to the terminator, and then control will be returned to
|
||||
you - by calling your self.found_terminator() method.
|
||||
"""
|
||||
import asyncore
|
||||
from collections import deque
|
||||
|
||||
|
||||
class async_chat(asyncore.dispatcher):
|
||||
"""This is an abstract class. You must derive from this class, and add
|
||||
the two methods collect_incoming_data() and found_terminator()"""
|
||||
|
||||
# these are overridable defaults
|
||||
|
||||
ac_in_buffer_size = 65536
|
||||
ac_out_buffer_size = 65536
|
||||
|
||||
# we don't want to enable the use of encoding by default, because that is a
|
||||
# sign of an application bug that we don't want to pass silently
|
||||
|
||||
use_encoding = 0
|
||||
encoding = 'latin-1'
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
# for string terminator matching
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
# we use a list here rather than io.BytesIO for a few reasons...
|
||||
# del lst[:] is faster than bio.truncate(0)
|
||||
# lst = [] is faster than bio.truncate(0)
|
||||
self.incoming = []
|
||||
|
||||
# we toss the use of the "simple producer" and replace it with
|
||||
# a pure deque, which the original fifo was a wrapping of
|
||||
self.producer_fifo = deque()
|
||||
asyncore.dispatcher.__init__(self, sock, map)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def _collect_incoming_data(self, data):
|
||||
self.incoming.append(data)
|
||||
|
||||
def _get_data(self):
|
||||
d = b''.join(self.incoming)
|
||||
del self.incoming[:]
|
||||
return d
|
||||
|
||||
def found_terminator(self):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def set_terminator(self, term):
|
||||
"""Set the input delimiter.
|
||||
|
||||
Can be a fixed string of any length, an integer, or None.
|
||||
"""
|
||||
if isinstance(term, str) and self.use_encoding:
|
||||
term = bytes(term, self.encoding)
|
||||
elif isinstance(term, int) and term < 0:
|
||||
raise ValueError('the number of received bytes must be positive')
|
||||
self.terminator = term
|
||||
|
||||
def get_terminator(self):
|
||||
return self.terminator
|
||||
|
||||
# grab some more data from the socket,
|
||||
# throw it to the collector method,
|
||||
# check for the terminator,
|
||||
# if found, transition to the next state.
|
||||
|
||||
def handle_read(self):
|
||||
|
||||
try:
|
||||
data = self.recv(self.ac_in_buffer_size)
|
||||
except BlockingIOError:
|
||||
return
|
||||
except OSError as why:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(str, self.encoding)
|
||||
self.ac_in_buffer = self.ac_in_buffer + data
|
||||
|
||||
# Continue to search for self.terminator in self.ac_in_buffer,
|
||||
# while calling self.collect_incoming_data. The while loop
|
||||
# is necessary because we might read several data+terminator
|
||||
# combos with a single recv(4096).
|
||||
|
||||
while self.ac_in_buffer:
|
||||
lb = len(self.ac_in_buffer)
|
||||
terminator = self.get_terminator()
|
||||
if not terminator:
|
||||
# no terminator, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
elif isinstance(terminator, int):
|
||||
# numeric terminator
|
||||
n = terminator
|
||||
if lb < n:
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
self.terminator = self.terminator - lb
|
||||
else:
|
||||
self.collect_incoming_data(self.ac_in_buffer[:n])
|
||||
self.ac_in_buffer = self.ac_in_buffer[n:]
|
||||
self.terminator = 0
|
||||
self.found_terminator()
|
||||
else:
|
||||
# 3 cases:
|
||||
# 1) end of buffer matches terminator exactly:
|
||||
# collect data, transition
|
||||
# 2) end of buffer matches some prefix:
|
||||
# collect data to the prefix
|
||||
# 3) end of buffer does not match any prefix:
|
||||
# collect data
|
||||
terminator_len = len(terminator)
|
||||
index = self.ac_in_buffer.find(terminator)
|
||||
if index != -1:
|
||||
# we found the terminator
|
||||
if index > 0:
|
||||
# don't bother reporting the empty string
|
||||
# (source of subtle bugs)
|
||||
self.collect_incoming_data(self.ac_in_buffer[:index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
||||
# This does the Right Thing if the terminator
|
||||
# is changed here.
|
||||
self.found_terminator()
|
||||
else:
|
||||
# check for a prefix of the terminator
|
||||
index = find_prefix_at_end(self.ac_in_buffer, terminator)
|
||||
if index:
|
||||
if index != lb:
|
||||
# we found a prefix, collect up to the prefix
|
||||
self.collect_incoming_data(self.ac_in_buffer[:-index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
||||
break
|
||||
else:
|
||||
# no prefix, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def handle_close(self):
|
||||
self.close()
|
||||
|
||||
def push(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be byte-ish (%r)',
|
||||
type(data))
|
||||
sabs = self.ac_out_buffer_size
|
||||
if len(data) > sabs:
|
||||
for i in range(0, len(data), sabs):
|
||||
self.producer_fifo.append(data[i:i+sabs])
|
||||
else:
|
||||
self.producer_fifo.append(data)
|
||||
self.initiate_send()
|
||||
|
||||
def push_with_producer(self, producer):
|
||||
self.producer_fifo.append(producer)
|
||||
self.initiate_send()
|
||||
|
||||
def readable(self):
|
||||
"predicate for inclusion in the readable for select()"
|
||||
# cannot use the old predicate, it violates the claim of the
|
||||
# set_terminator method.
|
||||
|
||||
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
||||
return 1
|
||||
|
||||
def writable(self):
|
||||
"predicate for inclusion in the writable for select()"
|
||||
return self.producer_fifo or (not self.connected)
|
||||
|
||||
def close_when_done(self):
|
||||
"automatically close this channel once the outgoing queue is empty"
|
||||
self.producer_fifo.append(None)
|
||||
|
||||
def initiate_send(self):
|
||||
while self.producer_fifo and self.connected:
|
||||
first = self.producer_fifo[0]
|
||||
# handle empty string/buffer or None entry
|
||||
if not first:
|
||||
del self.producer_fifo[0]
|
||||
if first is None:
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
# handle classic producer behavior
|
||||
obs = self.ac_out_buffer_size
|
||||
try:
|
||||
data = first[:obs]
|
||||
except TypeError:
|
||||
data = first.more()
|
||||
if data:
|
||||
self.producer_fifo.appendleft(data)
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
continue
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(data, self.encoding)
|
||||
|
||||
# send the data
|
||||
try:
|
||||
num_sent = self.send(data)
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if num_sent:
|
||||
if num_sent < len(data) or obs < len(first):
|
||||
self.producer_fifo[0] = first[num_sent:]
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
# we tried to send some actual data
|
||||
return
|
||||
|
||||
def discard_buffers(self):
|
||||
# Emergencies only!
|
||||
self.ac_in_buffer = b''
|
||||
del self.incoming[:]
|
||||
self.producer_fifo.clear()
|
||||
|
||||
|
||||
class simple_producer:
|
||||
|
||||
def __init__(self, data, buffer_size=512):
|
||||
self.data = data
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def more(self):
|
||||
if len(self.data) > self.buffer_size:
|
||||
result = self.data[:self.buffer_size]
|
||||
self.data = self.data[self.buffer_size:]
|
||||
return result
|
||||
else:
|
||||
result = self.data
|
||||
self.data = b''
|
||||
return result
|
||||
|
||||
|
||||
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
||||
# assumes an exact match has already been checked. Return the number of
|
||||
# characters matched.
|
||||
# for example:
|
||||
# f_p_a_e("qwerty\r", "\r\n") => 1
|
||||
# f_p_a_e("qwertydkjf", "\r\n") => 0
|
||||
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
|
||||
|
||||
# this could maybe be made faster with a computed regex?
|
||||
# [answer: no; circa Python-2.0, Jan 2001]
|
||||
# new python: 28961/s
|
||||
# old python: 18307/s
|
||||
# re: 12820/s
|
||||
# regex: 14035/s
|
||||
|
||||
def find_prefix_at_end(haystack, needle):
|
||||
l = len(needle) - 1
|
||||
while l and not haystack.endswith(needle[:l]):
|
||||
l -= 1
|
||||
return l
|
50
third_party/python/Lib/asyncio/__init__.py
vendored
Normal file
50
third_party/python/Lib/asyncio/__init__.py
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
"""The asyncio package, tracking PEP 3156."""
|
||||
|
||||
import sys
|
||||
|
||||
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
|
||||
# Do this first, so the other submodules can use "from . import selectors".
|
||||
# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
|
||||
try:
|
||||
from . import selectors
|
||||
except ImportError:
|
||||
import selectors # Will also be exported.
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# Similar thing for _overlapped.
|
||||
try:
|
||||
from . import _overlapped
|
||||
except ImportError:
|
||||
import _overlapped # Will also be exported.
|
||||
|
||||
# This relies on each of the submodules having an __all__ variable.
|
||||
from .base_events import *
|
||||
from .coroutines import *
|
||||
from .events import *
|
||||
from .futures import *
|
||||
from .locks import *
|
||||
from .protocols import *
|
||||
from .queues import *
|
||||
from .streams import *
|
||||
from .subprocess import *
|
||||
from .tasks import *
|
||||
from .transports import *
|
||||
|
||||
__all__ = (base_events.__all__ +
|
||||
coroutines.__all__ +
|
||||
events.__all__ +
|
||||
futures.__all__ +
|
||||
locks.__all__ +
|
||||
protocols.__all__ +
|
||||
queues.__all__ +
|
||||
streams.__all__ +
|
||||
subprocess.__all__ +
|
||||
tasks.__all__ +
|
||||
transports.__all__)
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
from .windows_events import *
|
||||
__all__ += windows_events.__all__
|
||||
else:
|
||||
from .unix_events import * # pragma: no cover
|
||||
__all__ += unix_events.__all__
|
1505
third_party/python/Lib/asyncio/base_events.py
vendored
Normal file
1505
third_party/python/Lib/asyncio/base_events.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
71
third_party/python/Lib/asyncio/base_futures.py
vendored
Normal file
71
third_party/python/Lib/asyncio/base_futures.py
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
__all__ = []
|
||||
|
||||
import concurrent.futures._base
|
||||
import reprlib
|
||||
|
||||
from . import events
|
||||
|
||||
Error = concurrent.futures._base.Error
|
||||
CancelledError = concurrent.futures.CancelledError
|
||||
TimeoutError = concurrent.futures.TimeoutError
|
||||
|
||||
|
||||
class InvalidStateError(Error):
|
||||
"""The operation is not allowed in this state."""
|
||||
|
||||
|
||||
# States for Future.
|
||||
_PENDING = 'PENDING'
|
||||
_CANCELLED = 'CANCELLED'
|
||||
_FINISHED = 'FINISHED'
|
||||
|
||||
|
||||
def isfuture(obj):
|
||||
"""Check for a Future.
|
||||
|
||||
This returns True when obj is a Future instance or is advertising
|
||||
itself as duck-type compatible by setting _asyncio_future_blocking.
|
||||
See comment in Future for more details.
|
||||
"""
|
||||
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
|
||||
obj._asyncio_future_blocking is not None)
|
||||
|
||||
|
||||
def _format_callbacks(cb):
|
||||
"""helper function for Future.__repr__"""
|
||||
size = len(cb)
|
||||
if not size:
|
||||
cb = ''
|
||||
|
||||
def format_cb(callback):
|
||||
return events._format_callback_source(callback, ())
|
||||
|
||||
if size == 1:
|
||||
cb = format_cb(cb[0])
|
||||
elif size == 2:
|
||||
cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
|
||||
elif size > 2:
|
||||
cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
|
||||
size - 2,
|
||||
format_cb(cb[-1]))
|
||||
return 'cb=[%s]' % cb
|
||||
|
||||
|
||||
def _future_repr_info(future):
|
||||
# (Future) -> str
|
||||
"""helper function for Future.__repr__"""
|
||||
info = [future._state.lower()]
|
||||
if future._state == _FINISHED:
|
||||
if future._exception is not None:
|
||||
info.append('exception={!r}'.format(future._exception))
|
||||
else:
|
||||
# use reprlib to limit the length of the output, especially
|
||||
# for very long strings
|
||||
result = reprlib.repr(future._result)
|
||||
info.append('result={}'.format(result))
|
||||
if future._callbacks:
|
||||
info.append(_format_callbacks(future._callbacks))
|
||||
if future._source_traceback:
|
||||
frame = future._source_traceback[-1]
|
||||
info.append('created at %s:%s' % (frame[0], frame[1]))
|
||||
return info
|
293
third_party/python/Lib/asyncio/base_subprocess.py
vendored
Normal file
293
third_party/python/Lib/asyncio/base_subprocess.py
vendored
Normal file
|
@ -0,0 +1,293 @@
|
|||
import collections
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
from . import compat
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
|
||||
def __init__(self, loop, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
waiter=None, extra=None, **kwargs):
|
||||
super().__init__(extra)
|
||||
self._closed = False
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self._proc = None
|
||||
self._pid = None
|
||||
self._returncode = None
|
||||
self._exit_waiters = []
|
||||
self._pending_calls = collections.deque()
|
||||
self._pipes = {}
|
||||
self._finished = False
|
||||
|
||||
if stdin == subprocess.PIPE:
|
||||
self._pipes[0] = None
|
||||
if stdout == subprocess.PIPE:
|
||||
self._pipes[1] = None
|
||||
if stderr == subprocess.PIPE:
|
||||
self._pipes[2] = None
|
||||
|
||||
# Create the child process: set the _proc attribute
|
||||
try:
|
||||
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, bufsize=bufsize, **kwargs)
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
self._pid = self._proc.pid
|
||||
self._extra['subprocess'] = self._proc
|
||||
|
||||
if self._loop.get_debug():
|
||||
if isinstance(args, (bytes, str)):
|
||||
program = args
|
||||
else:
|
||||
program = args[0]
|
||||
logger.debug('process %r created: pid %s',
|
||||
program, self._pid)
|
||||
|
||||
self._loop.create_task(self._connect_pipes(waiter))
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._closed:
|
||||
info.append('closed')
|
||||
if self._pid is not None:
|
||||
info.append('pid=%s' % self._pid)
|
||||
if self._returncode is not None:
|
||||
info.append('returncode=%s' % self._returncode)
|
||||
elif self._pid is not None:
|
||||
info.append('running')
|
||||
else:
|
||||
info.append('not started')
|
||||
|
||||
stdin = self._pipes.get(0)
|
||||
if stdin is not None:
|
||||
info.append('stdin=%s' % stdin.pipe)
|
||||
|
||||
stdout = self._pipes.get(1)
|
||||
stderr = self._pipes.get(2)
|
||||
if stdout is not None and stderr is stdout:
|
||||
info.append('stdout=stderr=%s' % stdout.pipe)
|
||||
else:
|
||||
if stdout is not None:
|
||||
info.append('stdout=%s' % stdout.pipe)
|
||||
if stderr is not None:
|
||||
info.append('stderr=%s' % stderr.pipe)
|
||||
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closed
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
|
||||
for proto in self._pipes.values():
|
||||
if proto is None:
|
||||
continue
|
||||
proto.pipe.close()
|
||||
|
||||
if (self._proc is not None
|
||||
# the child process finished?
|
||||
and self._returncode is None
|
||||
# the child process finished but the transport was not notified yet?
|
||||
and self._proc.poll() is None
|
||||
):
|
||||
if self._loop.get_debug():
|
||||
logger.warning('Close running child process: kill %r', self)
|
||||
|
||||
try:
|
||||
self._proc.kill()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# Don't clear the _proc reference yet: _post_init() may still run
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if not self._closed:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def get_pid(self):
|
||||
return self._pid
|
||||
|
||||
def get_returncode(self):
|
||||
return self._returncode
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
if fd in self._pipes:
|
||||
return self._pipes[fd].pipe
|
||||
else:
|
||||
return None
|
||||
|
||||
def _check_proc(self):
|
||||
if self._proc is None:
|
||||
raise ProcessLookupError()
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._check_proc()
|
||||
self._proc.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._check_proc()
|
||||
self._proc.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._check_proc()
|
||||
self._proc.kill()
|
||||
|
||||
@coroutine
|
||||
def _connect_pipes(self, waiter):
|
||||
try:
|
||||
proc = self._proc
|
||||
loop = self._loop
|
||||
|
||||
if proc.stdin is not None:
|
||||
_, pipe = yield from loop.connect_write_pipe(
|
||||
lambda: WriteSubprocessPipeProto(self, 0),
|
||||
proc.stdin)
|
||||
self._pipes[0] = pipe
|
||||
|
||||
if proc.stdout is not None:
|
||||
_, pipe = yield from loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 1),
|
||||
proc.stdout)
|
||||
self._pipes[1] = pipe
|
||||
|
||||
if proc.stderr is not None:
|
||||
_, pipe = yield from loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 2),
|
||||
proc.stderr)
|
||||
self._pipes[2] = pipe
|
||||
|
||||
assert self._pending_calls is not None
|
||||
|
||||
loop.call_soon(self._protocol.connection_made, self)
|
||||
for callback, data in self._pending_calls:
|
||||
loop.call_soon(callback, *data)
|
||||
self._pending_calls = None
|
||||
except Exception as exc:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
else:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def _call(self, cb, *data):
|
||||
if self._pending_calls is not None:
|
||||
self._pending_calls.append((cb, data))
|
||||
else:
|
||||
self._loop.call_soon(cb, *data)
|
||||
|
||||
def _pipe_connection_lost(self, fd, exc):
|
||||
self._call(self._protocol.pipe_connection_lost, fd, exc)
|
||||
self._try_finish()
|
||||
|
||||
def _pipe_data_received(self, fd, data):
|
||||
self._call(self._protocol.pipe_data_received, fd, data)
|
||||
|
||||
def _process_exited(self, returncode):
|
||||
assert returncode is not None, returncode
|
||||
assert self._returncode is None, self._returncode
|
||||
if self._loop.get_debug():
|
||||
logger.info('%r exited with return code %r',
|
||||
self, returncode)
|
||||
self._returncode = returncode
|
||||
if self._proc.returncode is None:
|
||||
# asyncio uses a child watcher: copy the status into the Popen
|
||||
# object. On Python 3.6, it is required to avoid a ResourceWarning.
|
||||
self._proc.returncode = returncode
|
||||
self._call(self._protocol.process_exited)
|
||||
self._try_finish()
|
||||
|
||||
# wake up futures waiting for wait()
|
||||
for waiter in self._exit_waiters:
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(returncode)
|
||||
self._exit_waiters = None
|
||||
|
||||
@coroutine
|
||||
def _wait(self):
|
||||
"""Wait until the process exit and return the process return code.
|
||||
|
||||
This method is a coroutine."""
|
||||
if self._returncode is not None:
|
||||
return self._returncode
|
||||
|
||||
waiter = self._loop.create_future()
|
||||
self._exit_waiters.append(waiter)
|
||||
return (yield from waiter)
|
||||
|
||||
def _try_finish(self):
|
||||
assert not self._finished
|
||||
if self._returncode is None:
|
||||
return
|
||||
if all(p is not None and p.disconnected
|
||||
for p in self._pipes.values()):
|
||||
self._finished = True
|
||||
self._call(self._call_connection_lost, None)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._loop = None
|
||||
self._proc = None
|
||||
self._protocol = None
|
||||
|
||||
|
||||
class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
||||
|
||||
def __init__(self, proc, fd):
|
||||
self.proc = proc
|
||||
self.fd = fd
|
||||
self.pipe = None
|
||||
self.disconnected = False
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.pipe = transport
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s fd=%s pipe=%r>'
|
||||
% (self.__class__.__name__, self.fd, self.pipe))
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self.disconnected = True
|
||||
self.proc._pipe_connection_lost(self.fd, exc)
|
||||
self.proc = None
|
||||
|
||||
def pause_writing(self):
|
||||
self.proc._protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
self.proc._protocol.resume_writing()
|
||||
|
||||
|
||||
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
|
||||
protocols.Protocol):
|
||||
|
||||
def data_received(self, data):
|
||||
self.proc._pipe_data_received(self.fd, data)
|
76
third_party/python/Lib/asyncio/base_tasks.py
vendored
Normal file
76
third_party/python/Lib/asyncio/base_tasks.py
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
import linecache
|
||||
import traceback
|
||||
|
||||
from . import base_futures
|
||||
from . import coroutines
|
||||
|
||||
|
||||
def _task_repr_info(task):
|
||||
info = base_futures._future_repr_info(task)
|
||||
|
||||
if task._must_cancel:
|
||||
# replace status
|
||||
info[0] = 'cancelling'
|
||||
|
||||
coro = coroutines._format_coroutine(task._coro)
|
||||
info.insert(1, 'coro=<%s>' % coro)
|
||||
|
||||
if task._fut_waiter is not None:
|
||||
info.insert(2, 'wait_for=%r' % task._fut_waiter)
|
||||
return info
|
||||
|
||||
|
||||
def _task_get_stack(task, limit):
|
||||
frames = []
|
||||
try:
|
||||
# 'async def' coroutines
|
||||
f = task._coro.cr_frame
|
||||
except AttributeError:
|
||||
f = task._coro.gi_frame
|
||||
if f is not None:
|
||||
while f is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(f)
|
||||
f = f.f_back
|
||||
frames.reverse()
|
||||
elif task._exception is not None:
|
||||
tb = task._exception.__traceback__
|
||||
while tb is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(tb.tb_frame)
|
||||
tb = tb.tb_next
|
||||
return frames
|
||||
|
||||
|
||||
def _task_print_stack(task, limit, file):
|
||||
extracted_list = []
|
||||
checked = set()
|
||||
for f in task.get_stack(limit=limit):
|
||||
lineno = f.f_lineno
|
||||
co = f.f_code
|
||||
filename = co.co_filename
|
||||
name = co.co_name
|
||||
if filename not in checked:
|
||||
checked.add(filename)
|
||||
linecache.checkcache(filename)
|
||||
line = linecache.getline(filename, lineno, f.f_globals)
|
||||
extracted_list.append((filename, lineno, name, line))
|
||||
exc = task._exception
|
||||
if not extracted_list:
|
||||
print('No stack for %r' % task, file=file)
|
||||
elif exc is not None:
|
||||
print('Traceback for %r (most recent call last):' % task,
|
||||
file=file)
|
||||
else:
|
||||
print('Stack for %r (most recent call last):' % task,
|
||||
file=file)
|
||||
traceback.print_list(extracted_list, file=file)
|
||||
if exc is not None:
|
||||
for line in traceback.format_exception_only(exc.__class__, exc):
|
||||
print(line, file=file, end='')
|
18
third_party/python/Lib/asyncio/compat.py
vendored
Normal file
18
third_party/python/Lib/asyncio/compat.py
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
"""Compatibility helpers for the different Python versions."""
|
||||
|
||||
import sys
|
||||
|
||||
PY34 = sys.version_info >= (3, 4)
|
||||
PY35 = sys.version_info >= (3, 5)
|
||||
PY352 = sys.version_info >= (3, 5, 2)
|
||||
|
||||
|
||||
def flatten_list_bytes(list_of_data):
|
||||
"""Concatenate a sequence of bytes-like objects."""
|
||||
if not PY34:
|
||||
# On Python 3.3 and older, bytes.join() doesn't handle
|
||||
# memoryview.
|
||||
list_of_data = (
|
||||
bytes(data) if isinstance(data, memoryview) else data
|
||||
for data in list_of_data)
|
||||
return b''.join(list_of_data)
|
12
third_party/python/Lib/asyncio/constants.py
vendored
Normal file
12
third_party/python/Lib/asyncio/constants.py
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
"""Constants."""
|
||||
|
||||
# After the connection is lost, log warnings after this many write()s.
|
||||
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
|
||||
|
||||
# Seconds to wait before retrying accept().
|
||||
ACCEPT_RETRY_DELAY = 1
|
||||
|
||||
# Number of stack entries to capture in debug mode.
|
||||
# The large the number, the slower the operation in debug mode
|
||||
# (see extract_stack() in events.py)
|
||||
DEBUG_STACK_DEPTH = 10
|
353
third_party/python/Lib/asyncio/coroutines.py
vendored
Normal file
353
third_party/python/Lib/asyncio/coroutines.py
vendored
Normal file
|
@ -0,0 +1,353 @@
|
|||
__all__ = ['coroutine',
|
||||
'iscoroutinefunction', 'iscoroutine']
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import opcode
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
|
||||
from . import compat
|
||||
from . import constants
|
||||
from . import events
|
||||
from . import base_futures
|
||||
from .log import logger
|
||||
|
||||
|
||||
# Opcode of "yield from" instruction
|
||||
_YIELD_FROM = opcode.opmap['YIELD_FROM']
|
||||
|
||||
# If you set _DEBUG to true, @coroutine will wrap the resulting
|
||||
# generator objects in a CoroWrapper instance (defined below). That
|
||||
# instance will log a message when the generator is never iterated
|
||||
# over, which may happen when you forget to use "yield from" with a
|
||||
# coroutine call. Note that the value of the _DEBUG flag is taken
|
||||
# when the decorator is used, so to be of any use it must be set
|
||||
# before you define your coroutines. A downside of using this feature
|
||||
# is that tracebacks show entries for the CoroWrapper.__next__ method
|
||||
# when _DEBUG is true.
|
||||
_DEBUG = (not sys.flags.ignore_environment and
|
||||
bool(os.environ.get('PYTHONASYNCIODEBUG')))
|
||||
|
||||
|
||||
try:
|
||||
_types_coroutine = types.coroutine
|
||||
_types_CoroutineType = types.CoroutineType
|
||||
except AttributeError:
|
||||
# Python 3.4
|
||||
_types_coroutine = None
|
||||
_types_CoroutineType = None
|
||||
|
||||
try:
|
||||
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
|
||||
except AttributeError:
|
||||
# Python 3.4
|
||||
_inspect_iscoroutinefunction = lambda func: False
|
||||
|
||||
try:
|
||||
from collections.abc import Coroutine as _CoroutineABC, \
|
||||
Awaitable as _AwaitableABC
|
||||
except ImportError:
|
||||
_CoroutineABC = _AwaitableABC = None
|
||||
|
||||
|
||||
# Check for CPython issue #21209
|
||||
def has_yield_from_bug():
|
||||
class MyGen:
|
||||
def __init__(self):
|
||||
self.send_args = None
|
||||
def __iter__(self):
|
||||
return self
|
||||
def __next__(self):
|
||||
return 42
|
||||
def send(self, *what):
|
||||
self.send_args = what
|
||||
return None
|
||||
def yield_from_gen(gen):
|
||||
yield from gen
|
||||
value = (1, 2, 3)
|
||||
gen = MyGen()
|
||||
coro = yield_from_gen(gen)
|
||||
next(coro)
|
||||
coro.send(value)
|
||||
return gen.send_args != (value,)
|
||||
_YIELD_FROM_BUG = has_yield_from_bug()
|
||||
del has_yield_from_bug
|
||||
|
||||
|
||||
def debug_wrapper(gen):
|
||||
# This function is called from 'sys.set_coroutine_wrapper'.
|
||||
# We only wrap here coroutines defined via 'async def' syntax.
|
||||
# Generator-based coroutines are wrapped in @coroutine
|
||||
# decorator.
|
||||
return CoroWrapper(gen, None)
|
||||
|
||||
|
||||
class CoroWrapper:
|
||||
# Wrapper for coroutine object in _DEBUG mode.
|
||||
|
||||
def __init__(self, gen, func=None):
|
||||
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
|
||||
self.gen = gen
|
||||
self.func = func # Used to unwrap @coroutine decorator
|
||||
self._source_traceback = events.extract_stack(sys._getframe(1))
|
||||
self.__name__ = getattr(gen, '__name__', None)
|
||||
self.__qualname__ = getattr(gen, '__qualname__', None)
|
||||
|
||||
def __repr__(self):
|
||||
coro_repr = _format_coroutine(self)
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
|
||||
return '<%s %s>' % (self.__class__.__name__, coro_repr)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return self.gen.send(None)
|
||||
|
||||
if _YIELD_FROM_BUG:
|
||||
# For for CPython issue #21209: using "yield from" and a custom
|
||||
# generator, generator.send(tuple) unpacks the tuple instead of passing
|
||||
# the tuple unchanged. Check if the caller is a generator using "yield
|
||||
# from" to decide if the parameter should be unpacked or not.
|
||||
def send(self, *value):
|
||||
frame = sys._getframe()
|
||||
caller = frame.f_back
|
||||
assert caller.f_lasti >= 0
|
||||
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
|
||||
value = value[0]
|
||||
return self.gen.send(value)
|
||||
else:
|
||||
def send(self, value):
|
||||
return self.gen.send(value)
|
||||
|
||||
def throw(self, type, value=None, traceback=None):
|
||||
return self.gen.throw(type, value, traceback)
|
||||
|
||||
def close(self):
|
||||
return self.gen.close()
|
||||
|
||||
@property
|
||||
def gi_frame(self):
|
||||
return self.gen.gi_frame
|
||||
|
||||
@property
|
||||
def gi_running(self):
|
||||
return self.gen.gi_running
|
||||
|
||||
@property
|
||||
def gi_code(self):
|
||||
return self.gen.gi_code
|
||||
|
||||
if compat.PY35:
|
||||
|
||||
def __await__(self):
|
||||
cr_await = getattr(self.gen, 'cr_await', None)
|
||||
if cr_await is not None:
|
||||
raise RuntimeError(
|
||||
"Cannot await on coroutine {!r} while it's "
|
||||
"awaiting for {!r}".format(self.gen, cr_await))
|
||||
return self
|
||||
|
||||
@property
|
||||
def gi_yieldfrom(self):
|
||||
return self.gen.gi_yieldfrom
|
||||
|
||||
@property
|
||||
def cr_await(self):
|
||||
return self.gen.cr_await
|
||||
|
||||
@property
|
||||
def cr_running(self):
|
||||
return self.gen.cr_running
|
||||
|
||||
@property
|
||||
def cr_code(self):
|
||||
return self.gen.cr_code
|
||||
|
||||
@property
|
||||
def cr_frame(self):
|
||||
return self.gen.cr_frame
|
||||
|
||||
def __del__(self):
|
||||
# Be careful accessing self.gen.frame -- self.gen might not exist.
|
||||
gen = getattr(self, 'gen', None)
|
||||
frame = getattr(gen, 'gi_frame', None)
|
||||
if frame is None:
|
||||
frame = getattr(gen, 'cr_frame', None)
|
||||
if frame is not None and frame.f_lasti == -1:
|
||||
msg = '%r was never yielded from' % self
|
||||
tb = getattr(self, '_source_traceback', ())
|
||||
if tb:
|
||||
tb = ''.join(traceback.format_list(tb))
|
||||
msg += (f'\nCoroutine object created at '
|
||||
f'(most recent call last, truncated to '
|
||||
f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
|
||||
msg += tb.rstrip()
|
||||
logger.error(msg)
|
||||
|
||||
|
||||
def coroutine(func):
|
||||
"""Decorator to mark coroutines.
|
||||
|
||||
If the coroutine is not yielded from before it is destroyed,
|
||||
an error message is logged.
|
||||
"""
|
||||
if _inspect_iscoroutinefunction(func):
|
||||
# In Python 3.5 that's all we need to do for coroutines
|
||||
# defined with "async def".
|
||||
# Wrapping in CoroWrapper will happen via
|
||||
# 'sys.set_coroutine_wrapper' function.
|
||||
return func
|
||||
|
||||
if inspect.isgeneratorfunction(func):
|
||||
coro = func
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def coro(*args, **kw):
|
||||
res = func(*args, **kw)
|
||||
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
|
||||
isinstance(res, CoroWrapper)):
|
||||
res = yield from res
|
||||
elif _AwaitableABC is not None:
|
||||
# If 'func' returns an Awaitable (new in 3.5) we
|
||||
# want to run it.
|
||||
try:
|
||||
await_meth = res.__await__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if isinstance(res, _AwaitableABC):
|
||||
res = yield from await_meth()
|
||||
return res
|
||||
|
||||
if not _DEBUG:
|
||||
if _types_coroutine is None:
|
||||
wrapper = coro
|
||||
else:
|
||||
wrapper = _types_coroutine(coro)
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
w = CoroWrapper(coro(*args, **kwds), func=func)
|
||||
if w._source_traceback:
|
||||
del w._source_traceback[-1]
|
||||
# Python < 3.5 does not implement __qualname__
|
||||
# on generator objects, so we set it manually.
|
||||
# We use getattr as some callables (such as
|
||||
# functools.partial may lack __qualname__).
|
||||
w.__name__ = getattr(func, '__name__', None)
|
||||
w.__qualname__ = getattr(func, '__qualname__', None)
|
||||
return w
|
||||
|
||||
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
|
||||
return wrapper
|
||||
|
||||
|
||||
# A marker for iscoroutinefunction.
|
||||
_is_coroutine = object()
|
||||
|
||||
|
||||
def iscoroutinefunction(func):
|
||||
"""Return True if func is a decorated coroutine function."""
|
||||
return (getattr(func, '_is_coroutine', None) is _is_coroutine or
|
||||
_inspect_iscoroutinefunction(func))
|
||||
|
||||
|
||||
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
|
||||
if _CoroutineABC is not None:
|
||||
_COROUTINE_TYPES += (_CoroutineABC,)
|
||||
if _types_CoroutineType is not None:
|
||||
# Prioritize native coroutine check to speed-up
|
||||
# asyncio.iscoroutine.
|
||||
_COROUTINE_TYPES = (_types_CoroutineType,) + _COROUTINE_TYPES
|
||||
|
||||
|
||||
def iscoroutine(obj):
|
||||
"""Return True if obj is a coroutine object."""
|
||||
return isinstance(obj, _COROUTINE_TYPES)
|
||||
|
||||
|
||||
def _format_coroutine(coro):
|
||||
assert iscoroutine(coro)
|
||||
|
||||
if not hasattr(coro, 'cr_code') and not hasattr(coro, 'gi_code'):
|
||||
# Most likely a built-in type or a Cython coroutine.
|
||||
|
||||
# Built-in types might not have __qualname__ or __name__.
|
||||
coro_name = getattr(
|
||||
coro, '__qualname__',
|
||||
getattr(coro, '__name__', type(coro).__name__))
|
||||
coro_name = '{}()'.format(coro_name)
|
||||
|
||||
running = False
|
||||
try:
|
||||
running = coro.cr_running
|
||||
except AttributeError:
|
||||
try:
|
||||
running = coro.gi_running
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if running:
|
||||
return '{} running'.format(coro_name)
|
||||
else:
|
||||
return coro_name
|
||||
|
||||
coro_name = None
|
||||
if isinstance(coro, CoroWrapper):
|
||||
func = coro.func
|
||||
coro_name = coro.__qualname__
|
||||
if coro_name is not None:
|
||||
coro_name = '{}()'.format(coro_name)
|
||||
else:
|
||||
func = coro
|
||||
|
||||
if coro_name is None:
|
||||
coro_name = events._format_callback(func, (), {})
|
||||
|
||||
coro_code = None
|
||||
if hasattr(coro, 'cr_code') and coro.cr_code:
|
||||
coro_code = coro.cr_code
|
||||
elif hasattr(coro, 'gi_code') and coro.gi_code:
|
||||
coro_code = coro.gi_code
|
||||
|
||||
coro_frame = None
|
||||
if hasattr(coro, 'cr_frame') and coro.cr_frame:
|
||||
coro_frame = coro.cr_frame
|
||||
elif hasattr(coro, 'gi_frame') and coro.gi_frame:
|
||||
coro_frame = coro.gi_frame
|
||||
|
||||
filename = '<empty co_filename>'
|
||||
if coro_code and coro_code.co_filename:
|
||||
filename = coro_code.co_filename
|
||||
|
||||
lineno = 0
|
||||
coro_repr = coro_name
|
||||
|
||||
if (isinstance(coro, CoroWrapper) and
|
||||
not inspect.isgeneratorfunction(coro.func) and
|
||||
coro.func is not None):
|
||||
source = events._get_function_source(coro.func)
|
||||
if source is not None:
|
||||
filename, lineno = source
|
||||
if coro_frame is None:
|
||||
coro_repr = ('%s done, defined at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
else:
|
||||
coro_repr = ('%s running, defined at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
elif coro_frame is not None:
|
||||
lineno = coro_frame.f_lineno
|
||||
coro_repr = ('%s running at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
elif coro_code:
|
||||
lineno = coro_code.co_firstlineno
|
||||
coro_repr = ('%s done, defined at %s:%s'
|
||||
% (coro_name, filename, lineno))
|
||||
|
||||
return coro_repr
|
715
third_party/python/Lib/asyncio/events.py
vendored
Normal file
715
third_party/python/Lib/asyncio/events.py
vendored
Normal file
|
@ -0,0 +1,715 @@
|
|||
"""Event loop and event loop policy."""
|
||||
|
||||
__all__ = ['AbstractEventLoopPolicy',
|
||||
'AbstractEventLoop', 'AbstractServer',
|
||||
'Handle', 'TimerHandle',
|
||||
'get_event_loop_policy', 'set_event_loop_policy',
|
||||
'get_event_loop', 'set_event_loop', 'new_event_loop',
|
||||
'get_child_watcher', 'set_child_watcher',
|
||||
'_set_running_loop', '_get_running_loop',
|
||||
]
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import reprlib
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from . import compat
|
||||
from . import constants
|
||||
|
||||
|
||||
def _get_function_source(func):
|
||||
if compat.PY34:
|
||||
func = inspect.unwrap(func)
|
||||
elif hasattr(func, '__wrapped__'):
|
||||
func = func.__wrapped__
|
||||
if inspect.isfunction(func):
|
||||
code = func.__code__
|
||||
return (code.co_filename, code.co_firstlineno)
|
||||
if isinstance(func, functools.partial):
|
||||
return _get_function_source(func.func)
|
||||
if compat.PY34 and isinstance(func, functools.partialmethod):
|
||||
return _get_function_source(func.func)
|
||||
return None
|
||||
|
||||
|
||||
def _format_args_and_kwargs(args, kwargs):
|
||||
"""Format function arguments and keyword arguments.
|
||||
|
||||
Special case for a single parameter: ('hello',) is formatted as ('hello').
|
||||
"""
|
||||
# use reprlib to limit the length of the output
|
||||
items = []
|
||||
if args:
|
||||
items.extend(reprlib.repr(arg) for arg in args)
|
||||
if kwargs:
|
||||
items.extend('{}={}'.format(k, reprlib.repr(v))
|
||||
for k, v in kwargs.items())
|
||||
return '(' + ', '.join(items) + ')'
|
||||
|
||||
|
||||
def _format_callback(func, args, kwargs, suffix=''):
|
||||
if isinstance(func, functools.partial):
|
||||
suffix = _format_args_and_kwargs(args, kwargs) + suffix
|
||||
return _format_callback(func.func, func.args, func.keywords, suffix)
|
||||
|
||||
if hasattr(func, '__qualname__') and func.__qualname__:
|
||||
func_repr = func.__qualname__
|
||||
elif hasattr(func, '__name__') and func.__name__:
|
||||
func_repr = func.__name__
|
||||
else:
|
||||
func_repr = repr(func)
|
||||
|
||||
func_repr += _format_args_and_kwargs(args, kwargs)
|
||||
if suffix:
|
||||
func_repr += suffix
|
||||
return func_repr
|
||||
|
||||
def _format_callback_source(func, args):
|
||||
func_repr = _format_callback(func, args, None)
|
||||
source = _get_function_source(func)
|
||||
if source:
|
||||
func_repr += ' at %s:%s' % source
|
||||
return func_repr
|
||||
|
||||
|
||||
def extract_stack(f=None, limit=None):
|
||||
"""Replacement for traceback.extract_stack() that only does the
|
||||
necessary work for asyncio debug mode.
|
||||
"""
|
||||
if f is None:
|
||||
f = sys._getframe().f_back
|
||||
if limit is None:
|
||||
# Limit the amount of work to a reasonable amount, as extract_stack()
|
||||
# can be called for each coroutine and future in debug mode.
|
||||
limit = constants.DEBUG_STACK_DEPTH
|
||||
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
|
||||
limit=limit,
|
||||
lookup_lines=False)
|
||||
stack.reverse()
|
||||
return stack
|
||||
|
||||
|
||||
class Handle:
|
||||
"""Object returned by callback registration methods."""
|
||||
|
||||
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
||||
'_source_traceback', '_repr', '__weakref__')
|
||||
|
||||
def __init__(self, callback, args, loop):
|
||||
self._loop = loop
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._cancelled = False
|
||||
self._repr = None
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = extract_stack(sys._getframe(1))
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def _repr_info(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._cancelled:
|
||||
info.append('cancelled')
|
||||
if self._callback is not None:
|
||||
info.append(_format_callback_source(self._callback, self._args))
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append('created at %s:%s' % (frame[0], frame[1]))
|
||||
return info
|
||||
|
||||
def __repr__(self):
|
||||
if self._repr is not None:
|
||||
return self._repr
|
||||
info = self._repr_info()
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._cancelled = True
|
||||
if self._loop.get_debug():
|
||||
# Keep a representation in debug mode to keep callback and
|
||||
# parameters. For example, to log the warning
|
||||
# "Executing <Handle...> took 2.5 second"
|
||||
self._repr = repr(self)
|
||||
self._callback = None
|
||||
self._args = None
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
self._callback(*self._args)
|
||||
except Exception as exc:
|
||||
cb = _format_callback_source(self._callback, self._args)
|
||||
msg = 'Exception in callback {}'.format(cb)
|
||||
context = {
|
||||
'message': msg,
|
||||
'exception': exc,
|
||||
'handle': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
class TimerHandle(Handle):
|
||||
"""Object returned by timed callback registration methods."""
|
||||
|
||||
__slots__ = ['_scheduled', '_when']
|
||||
|
||||
def __init__(self, when, callback, args, loop):
|
||||
assert when is not None
|
||||
super().__init__(callback, args, loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._when = when
|
||||
self._scheduled = False
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
pos = 2 if self._cancelled else 1
|
||||
info.insert(pos, 'when=%s' % self._when)
|
||||
return info
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._when)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._when < other._when
|
||||
|
||||
def __le__(self, other):
|
||||
if self._when < other._when:
|
||||
return True
|
||||
return self.__eq__(other)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._when > other._when
|
||||
|
||||
def __ge__(self, other):
|
||||
if self._when > other._when:
|
||||
return True
|
||||
return self.__eq__(other)
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return (self._when == other._when and
|
||||
self._callback == other._callback and
|
||||
self._args == other._args and
|
||||
self._cancelled == other._cancelled)
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
equal = self.__eq__(other)
|
||||
return NotImplemented if equal is NotImplemented else not equal
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._loop._timer_handle_cancelled(self)
|
||||
super().cancel()
|
||||
|
||||
|
||||
class AbstractServer:
|
||||
"""Abstract server returned by create_server()."""
|
||||
|
||||
def close(self):
|
||||
"""Stop serving. This leaves existing connections open."""
|
||||
return NotImplemented
|
||||
|
||||
def wait_closed(self):
|
||||
"""Coroutine to wait until service is closed."""
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class AbstractEventLoop:
|
||||
"""Abstract event loop."""
|
||||
|
||||
# Running and stopping the event loop.
|
||||
|
||||
def run_forever(self):
|
||||
"""Run the event loop until stop() is called."""
|
||||
raise NotImplementedError
|
||||
|
||||
def run_until_complete(self, future):
|
||||
"""Run the event loop until a Future is done.
|
||||
|
||||
Return the Future's result, or raise its exception.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def stop(self):
|
||||
"""Stop the event loop as soon as reasonable.
|
||||
|
||||
Exactly how soon that is may depend on the implementation, but
|
||||
no more I/O callbacks should be scheduled.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_running(self):
|
||||
"""Return whether the event loop is currently running."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_closed(self):
|
||||
"""Returns True if the event loop was closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the loop.
|
||||
|
||||
The loop should not be running.
|
||||
|
||||
This is idempotent and irreversible.
|
||||
|
||||
No other methods should be called after this one.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown_asyncgens(self):
|
||||
"""Shutdown all active asynchronous generators."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods scheduling callbacks. All these return Handles.
|
||||
|
||||
def _timer_handle_cancelled(self, handle):
|
||||
"""Notification that a TimerHandle has been cancelled."""
|
||||
raise NotImplementedError
|
||||
|
||||
def call_soon(self, callback, *args):
|
||||
return self.call_later(0, callback, *args)
|
||||
|
||||
def call_later(self, delay, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_at(self, when, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def time(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_future(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Method scheduling a coroutine object: create a task.
|
||||
|
||||
def create_task(self, coro):
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods for interacting with threads.
|
||||
|
||||
def call_soon_threadsafe(self, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def run_in_executor(self, executor, func, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_default_executor(self, executor):
|
||||
raise NotImplementedError
|
||||
|
||||
# Network I/O methods returning Futures.
|
||||
|
||||
def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
def getnameinfo(self, sockaddr, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_connection(self, protocol_factory, host=None, port=None, *,
|
||||
ssl=None, family=0, proto=0, flags=0, sock=None,
|
||||
local_addr=None, server_hostname=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_server(self, protocol_factory, host=None, port=None, *,
|
||||
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
|
||||
sock=None, backlog=100, ssl=None, reuse_address=None,
|
||||
reuse_port=None):
|
||||
"""A coroutine which creates a TCP server bound to host and port.
|
||||
|
||||
The return value is a Server object which can be used to stop
|
||||
the service.
|
||||
|
||||
If host is an empty string or None all interfaces are assumed
|
||||
and a list of multiple sockets will be returned (most likely
|
||||
one for IPv4 and another one for IPv6). The host parameter can also be a
|
||||
sequence (e.g. list) of hosts to bind to.
|
||||
|
||||
family can be set to either AF_INET or AF_INET6 to force the
|
||||
socket to use IPv4 or IPv6. If not set it will be determined
|
||||
from host (defaults to AF_UNSPEC).
|
||||
|
||||
flags is a bitmask for getaddrinfo().
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified will automatically be set to True on
|
||||
UNIX.
|
||||
|
||||
reuse_port tells the kernel to allow this endpoint to be bound to
|
||||
the same port as other existing endpoints are bound to, so long as
|
||||
they all set this flag when being created. This option is not
|
||||
supported on Windows.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def create_unix_connection(self, protocol_factory, path, *,
|
||||
ssl=None, sock=None,
|
||||
server_hostname=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_unix_server(self, protocol_factory, path, *,
|
||||
sock=None, backlog=100, ssl=None):
|
||||
"""A coroutine which creates a UNIX Domain Socket server.
|
||||
|
||||
The return value is a Server object, which can be used to stop
|
||||
the service.
|
||||
|
||||
path is a str, representing a file systsem path to bind the
|
||||
server socket to.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def create_datagram_endpoint(self, protocol_factory,
|
||||
local_addr=None, remote_addr=None, *,
|
||||
family=0, proto=0, flags=0,
|
||||
reuse_address=None, reuse_port=None,
|
||||
allow_broadcast=None, sock=None):
|
||||
"""A coroutine which creates a datagram endpoint.
|
||||
|
||||
This method will try to establish the endpoint in the background.
|
||||
When successful, the coroutine returns a (transport, protocol) pair.
|
||||
|
||||
protocol_factory must be a callable returning a protocol instance.
|
||||
|
||||
socket family AF_INET or socket.AF_INET6 depending on host (or
|
||||
family if specified), socket type SOCK_DGRAM.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified it will automatically be set to True on
|
||||
UNIX.
|
||||
|
||||
reuse_port tells the kernel to allow this endpoint to be bound to
|
||||
the same port as other existing endpoints are bound to, so long as
|
||||
they all set this flag when being created. This option is not
|
||||
supported on Windows and some UNIX's. If the
|
||||
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
|
||||
capability is unsupported.
|
||||
|
||||
allow_broadcast tells the kernel to allow this endpoint to send
|
||||
messages to the broadcast address.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# Pipes and subprocesses.
|
||||
|
||||
def connect_read_pipe(self, protocol_factory, pipe):
|
||||
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
|
||||
|
||||
protocol_factory should instantiate object with Protocol interface.
|
||||
pipe is a file-like object.
|
||||
Return pair (transport, protocol), where transport supports the
|
||||
ReadTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vise versa.
|
||||
raise NotImplementedError
|
||||
|
||||
def connect_write_pipe(self, protocol_factory, pipe):
|
||||
"""Register write pipe in event loop.
|
||||
|
||||
protocol_factory should instantiate object with BaseProtocol interface.
|
||||
Pipe is file-like object already switched to nonblocking.
|
||||
Return pair (transport, protocol), where transport support
|
||||
WriteTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vise versa.
|
||||
raise NotImplementedError
|
||||
|
||||
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
# Ready-based callback registration methods.
|
||||
# The add_*() methods return None.
|
||||
# The remove_*() methods return True if something was removed,
|
||||
# False if there was nothing to delete.
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_reader(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def add_writer(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_writer(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
# Completion based I/O methods returning Futures.
|
||||
|
||||
def sock_recv(self, sock, nbytes):
|
||||
raise NotImplementedError
|
||||
|
||||
def sock_sendall(self, sock, data):
|
||||
raise NotImplementedError
|
||||
|
||||
def sock_connect(self, sock, address):
|
||||
raise NotImplementedError
|
||||
|
||||
def sock_accept(self, sock):
|
||||
raise NotImplementedError
|
||||
|
||||
# Signal handling.
|
||||
|
||||
def add_signal_handler(self, sig, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_signal_handler(self, sig):
|
||||
raise NotImplementedError
|
||||
|
||||
# Task factory.
|
||||
|
||||
def set_task_factory(self, factory):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_task_factory(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Error handlers.
|
||||
|
||||
def get_exception_handler(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_exception_handler(self, handler):
|
||||
raise NotImplementedError
|
||||
|
||||
def default_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
# Debug flag management.
|
||||
|
||||
def get_debug(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_debug(self, enabled):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractEventLoopPolicy:
|
||||
"""Abstract policy for accessing the event loop."""
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop for the current context.
|
||||
|
||||
Returns an event loop object implementing the BaseEventLoop interface,
|
||||
or raises an exception in case no event loop has been set for the
|
||||
current context and the current policy does not specify to create one.
|
||||
|
||||
It should never return None."""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop for the current context to loop."""
|
||||
raise NotImplementedError
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create and return a new event loop object according to this
|
||||
policy's rules. If there's need to set this loop as the event loop for
|
||||
the current context, set_event_loop must be called explicitly."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Child processes handling (Unix only).
|
||||
|
||||
def get_child_watcher(self):
|
||||
"Get the watcher for child processes."
|
||||
raise NotImplementedError
|
||||
|
||||
def set_child_watcher(self, watcher):
|
||||
"""Set the watcher for child processes."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
||||
"""Default policy implementation for accessing the event loop.
|
||||
|
||||
In this policy, each thread has its own event loop. However, we
|
||||
only automatically create an event loop by default for the main
|
||||
thread; other threads by default have no event loop.
|
||||
|
||||
Other policies may have different rules (e.g. a single global
|
||||
event loop, or automatically creating an event loop per thread, or
|
||||
using some other notion of context to which an event loop is
|
||||
associated).
|
||||
"""
|
||||
|
||||
_loop_factory = None
|
||||
|
||||
class _Local(threading.local):
|
||||
_loop = None
|
||||
_set_called = False
|
||||
|
||||
def __init__(self):
|
||||
self._local = self._Local()
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop.
|
||||
|
||||
This may be None or an instance of EventLoop.
|
||||
"""
|
||||
if (self._local._loop is None and
|
||||
not self._local._set_called and
|
||||
isinstance(threading.current_thread(), threading._MainThread)):
|
||||
self.set_event_loop(self.new_event_loop())
|
||||
if self._local._loop is None:
|
||||
raise RuntimeError('There is no current event loop in thread %r.'
|
||||
% threading.current_thread().name)
|
||||
return self._local._loop
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop."""
|
||||
self._local._set_called = True
|
||||
assert loop is None or isinstance(loop, AbstractEventLoop)
|
||||
self._local._loop = loop
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create a new event loop.
|
||||
|
||||
You must call set_event_loop() to make this the current event
|
||||
loop.
|
||||
"""
|
||||
return self._loop_factory()
|
||||
|
||||
|
||||
# Event loop policy. The policy itself is always global, even if the
|
||||
# policy's rules say that there is an event loop per thread (or other
|
||||
# notion of context). The default policy is installed by the first
|
||||
# call to get_event_loop_policy().
|
||||
_event_loop_policy = None
|
||||
|
||||
# Lock for protecting the on-the-fly creation of the event loop policy.
|
||||
_lock = threading.Lock()
|
||||
|
||||
|
||||
# A TLS for the running event loop, used by _get_running_loop.
|
||||
class _RunningLoop(threading.local):
|
||||
loop_pid = (None, None)
|
||||
|
||||
|
||||
_running_loop = _RunningLoop()
|
||||
|
||||
|
||||
def _get_running_loop():
|
||||
"""Return the running event loop or None.
|
||||
|
||||
This is a low-level function intended to be used by event loops.
|
||||
This function is thread-specific.
|
||||
"""
|
||||
running_loop, pid = _running_loop.loop_pid
|
||||
if running_loop is not None and pid == os.getpid():
|
||||
return running_loop
|
||||
|
||||
|
||||
def _set_running_loop(loop):
|
||||
"""Set the running event loop.
|
||||
|
||||
This is a low-level function intended to be used by event loops.
|
||||
This function is thread-specific.
|
||||
"""
|
||||
_running_loop.loop_pid = (loop, os.getpid())
|
||||
|
||||
|
||||
def _init_event_loop_policy():
|
||||
global _event_loop_policy
|
||||
with _lock:
|
||||
if _event_loop_policy is None: # pragma: no branch
|
||||
from . import DefaultEventLoopPolicy
|
||||
_event_loop_policy = DefaultEventLoopPolicy()
|
||||
|
||||
|
||||
def get_event_loop_policy():
|
||||
"""Get the current event loop policy."""
|
||||
if _event_loop_policy is None:
|
||||
_init_event_loop_policy()
|
||||
return _event_loop_policy
|
||||
|
||||
|
||||
def set_event_loop_policy(policy):
|
||||
"""Set the current event loop policy.
|
||||
|
||||
If policy is None, the default policy is restored."""
|
||||
global _event_loop_policy
|
||||
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
|
||||
_event_loop_policy = policy
|
||||
|
||||
|
||||
def get_event_loop():
|
||||
"""Return an asyncio event loop.
|
||||
|
||||
When called from a coroutine or a callback (e.g. scheduled with call_soon
|
||||
or similar API), this function will always return the running event loop.
|
||||
|
||||
If there is no running event loop set, the function will return
|
||||
the result of `get_event_loop_policy().get_event_loop()` call.
|
||||
"""
|
||||
current_loop = _get_running_loop()
|
||||
if current_loop is not None:
|
||||
return current_loop
|
||||
return get_event_loop_policy().get_event_loop()
|
||||
|
||||
|
||||
def set_event_loop(loop):
|
||||
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
|
||||
get_event_loop_policy().set_event_loop(loop)
|
||||
|
||||
|
||||
def new_event_loop():
|
||||
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
|
||||
return get_event_loop_policy().new_event_loop()
|
||||
|
||||
|
||||
def get_child_watcher():
|
||||
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
|
||||
return get_event_loop_policy().get_child_watcher()
|
||||
|
||||
|
||||
def set_child_watcher(watcher):
|
||||
"""Equivalent to calling
|
||||
get_event_loop_policy().set_child_watcher(watcher)."""
|
||||
return get_event_loop_policy().set_child_watcher(watcher)
|
442
third_party/python/Lib/asyncio/futures.py
vendored
Normal file
442
third_party/python/Lib/asyncio/futures.py
vendored
Normal file
|
@ -0,0 +1,442 @@
|
|||
"""A Future class similar to the one in PEP 3148."""
|
||||
|
||||
__all__ = ['CancelledError', 'TimeoutError', 'InvalidStateError',
|
||||
'Future', 'wrap_future', 'isfuture']
|
||||
|
||||
import concurrent.futures
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from . import base_futures
|
||||
from . import compat
|
||||
from . import events
|
||||
|
||||
|
||||
CancelledError = base_futures.CancelledError
|
||||
InvalidStateError = base_futures.InvalidStateError
|
||||
TimeoutError = base_futures.TimeoutError
|
||||
isfuture = base_futures.isfuture
|
||||
|
||||
|
||||
_PENDING = base_futures._PENDING
|
||||
_CANCELLED = base_futures._CANCELLED
|
||||
_FINISHED = base_futures._FINISHED
|
||||
|
||||
|
||||
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
|
||||
|
||||
|
||||
class _TracebackLogger:
|
||||
"""Helper to log a traceback upon destruction if not cleared.
|
||||
|
||||
This solves a nasty problem with Futures and Tasks that have an
|
||||
exception set: if nobody asks for the exception, the exception is
|
||||
never logged. This violates the Zen of Python: 'Errors should
|
||||
never pass silently. Unless explicitly silenced.'
|
||||
|
||||
However, we don't want to log the exception as soon as
|
||||
set_exception() is called: if the calling code is written
|
||||
properly, it will get the exception and handle it properly. But
|
||||
we *do* want to log it if result() or exception() was never called
|
||||
-- otherwise developers waste a lot of time wondering why their
|
||||
buggy code fails silently.
|
||||
|
||||
An earlier attempt added a __del__() method to the Future class
|
||||
itself, but this backfired because the presence of __del__()
|
||||
prevents garbage collection from breaking cycles. A way out of
|
||||
this catch-22 is to avoid having a __del__() method on the Future
|
||||
class itself, but instead to have a reference to a helper object
|
||||
with a __del__() method that logs the traceback, where we ensure
|
||||
that the helper object doesn't participate in cycles, and only the
|
||||
Future has a reference to it.
|
||||
|
||||
The helper object is added when set_exception() is called. When
|
||||
the Future is collected, and the helper is present, the helper
|
||||
object is also collected, and its __del__() method will log the
|
||||
traceback. When the Future's result() or exception() method is
|
||||
called (and a helper object is present), it removes the helper
|
||||
object, after calling its clear() method to prevent it from
|
||||
logging.
|
||||
|
||||
One downside is that we do a fair amount of work to extract the
|
||||
traceback from the exception, even when it is never logged. It
|
||||
would seem cheaper to just store the exception object, but that
|
||||
references the traceback, which references stack frames, which may
|
||||
reference the Future, which references the _TracebackLogger, and
|
||||
then the _TracebackLogger would be included in a cycle, which is
|
||||
what we're trying to avoid! As an optimization, we don't
|
||||
immediately format the exception; we only do the work when
|
||||
activate() is called, which call is delayed until after all the
|
||||
Future's callbacks have run. Since usually a Future has at least
|
||||
one callback (typically set by 'yield from') and usually that
|
||||
callback extracts the callback, thereby removing the need to
|
||||
format the exception.
|
||||
|
||||
PS. I don't claim credit for this solution. I first heard of it
|
||||
in a discussion about closing files when they are collected.
|
||||
"""
|
||||
|
||||
__slots__ = ('loop', 'source_traceback', 'exc', 'tb')
|
||||
|
||||
def __init__(self, future, exc):
|
||||
self.loop = future._loop
|
||||
self.source_traceback = future._source_traceback
|
||||
self.exc = exc
|
||||
self.tb = None
|
||||
|
||||
def activate(self):
|
||||
exc = self.exc
|
||||
if exc is not None:
|
||||
self.exc = None
|
||||
self.tb = traceback.format_exception(exc.__class__, exc,
|
||||
exc.__traceback__)
|
||||
|
||||
def clear(self):
|
||||
self.exc = None
|
||||
self.tb = None
|
||||
|
||||
def __del__(self):
|
||||
if self.tb:
|
||||
msg = 'Future/Task exception was never retrieved\n'
|
||||
if self.source_traceback:
|
||||
src = ''.join(traceback.format_list(self.source_traceback))
|
||||
msg += 'Future/Task created at (most recent call last):\n'
|
||||
msg += '%s\n' % src.rstrip()
|
||||
msg += ''.join(self.tb).rstrip()
|
||||
self.loop.call_exception_handler({'message': msg})
|
||||
|
||||
|
||||
class Future:
|
||||
"""This class is *almost* compatible with concurrent.futures.Future.
|
||||
|
||||
Differences:
|
||||
|
||||
- This class is not thread-safe.
|
||||
|
||||
- result() and exception() do not take a timeout argument and
|
||||
raise an exception when the future isn't done yet.
|
||||
|
||||
- Callbacks registered with add_done_callback() are always called
|
||||
via the event loop's call_soon().
|
||||
|
||||
- This class is not compatible with the wait() and as_completed()
|
||||
methods in the concurrent.futures package.
|
||||
|
||||
(In Python 3.4 or later we may be able to unify the implementations.)
|
||||
"""
|
||||
|
||||
# Class variables serving as defaults for instance variables.
|
||||
_state = _PENDING
|
||||
_result = None
|
||||
_exception = None
|
||||
_loop = None
|
||||
_source_traceback = None
|
||||
|
||||
# This field is used for a dual purpose:
|
||||
# - Its presence is a marker to declare that a class implements
|
||||
# the Future protocol (i.e. is intended to be duck-type compatible).
|
||||
# The value must also be not-None, to enable a subclass to declare
|
||||
# that it is not compatible by setting this to None.
|
||||
# - It is set by __iter__() below so that Task._step() can tell
|
||||
# the difference between `yield from Future()` (correct) vs.
|
||||
# `yield Future()` (incorrect).
|
||||
_asyncio_future_blocking = False
|
||||
|
||||
_log_traceback = False
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
"""Initialize the future.
|
||||
|
||||
The optional event_loop argument allows explicitly setting the event
|
||||
loop object used by the future. If it's not provided, the future uses
|
||||
the default event loop.
|
||||
"""
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._callbacks = []
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = events.extract_stack(sys._getframe(1))
|
||||
|
||||
_repr_info = base_futures._future_repr_info
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self.__class__.__name__, ' '.join(self._repr_info()))
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if not self._log_traceback:
|
||||
# set_exception() was not called, or result() or exception()
|
||||
# has consumed the exception
|
||||
return
|
||||
exc = self._exception
|
||||
context = {
|
||||
'message': ('%s exception was never retrieved'
|
||||
% self.__class__.__name__),
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the future and schedule callbacks.
|
||||
|
||||
If the future is already done or cancelled, return False. Otherwise,
|
||||
change the future's state to cancelled, schedule the callbacks and
|
||||
return True.
|
||||
"""
|
||||
self._log_traceback = False
|
||||
if self._state != _PENDING:
|
||||
return False
|
||||
self._state = _CANCELLED
|
||||
self._schedule_callbacks()
|
||||
return True
|
||||
|
||||
def _schedule_callbacks(self):
|
||||
"""Internal: Ask the event loop to call all callbacks.
|
||||
|
||||
The callbacks are scheduled to be called as soon as possible. Also
|
||||
clears the callback list.
|
||||
"""
|
||||
callbacks = self._callbacks[:]
|
||||
if not callbacks:
|
||||
return
|
||||
|
||||
self._callbacks[:] = []
|
||||
for callback in callbacks:
|
||||
self._loop.call_soon(callback, self)
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future was cancelled."""
|
||||
return self._state == _CANCELLED
|
||||
|
||||
# Don't implement running(); see http://bugs.python.org/issue18699
|
||||
|
||||
def done(self):
|
||||
"""Return True if the future is done.
|
||||
|
||||
Done means either that a result / exception are available, or that the
|
||||
future was cancelled.
|
||||
"""
|
||||
return self._state != _PENDING
|
||||
|
||||
def result(self):
|
||||
"""Return the result this future represents.
|
||||
|
||||
If the future has been cancelled, raises CancelledError. If the
|
||||
future's result isn't yet available, raises InvalidStateError. If
|
||||
the future is done and has an exception set, this exception is raised.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
raise CancelledError
|
||||
if self._state != _FINISHED:
|
||||
raise InvalidStateError('Result is not ready.')
|
||||
self._log_traceback = False
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
return self._result
|
||||
|
||||
def exception(self):
|
||||
"""Return the exception that was set on this future.
|
||||
|
||||
The exception (or None if no exception was set) is returned only if
|
||||
the future is done. If the future has been cancelled, raises
|
||||
CancelledError. If the future isn't done yet, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
raise CancelledError
|
||||
if self._state != _FINISHED:
|
||||
raise InvalidStateError('Exception is not set.')
|
||||
self._log_traceback = False
|
||||
return self._exception
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
"""Add a callback to be run when the future becomes done.
|
||||
|
||||
The callback is called with a single argument - the future object. If
|
||||
the future is already done when this is called, the callback is
|
||||
scheduled with call_soon.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
self._loop.call_soon(fn, self)
|
||||
else:
|
||||
self._callbacks.append(fn)
|
||||
|
||||
# New method not in PEP 3148.
|
||||
|
||||
def remove_done_callback(self, fn):
|
||||
"""Remove all instances of a callback from the "call when done" list.
|
||||
|
||||
Returns the number of callbacks removed.
|
||||
"""
|
||||
filtered_callbacks = [f for f in self._callbacks if f != fn]
|
||||
removed_count = len(self._callbacks) - len(filtered_callbacks)
|
||||
if removed_count:
|
||||
self._callbacks[:] = filtered_callbacks
|
||||
return removed_count
|
||||
|
||||
# So-called internal methods (note: no set_running_or_notify_cancel()).
|
||||
|
||||
def set_result(self, result):
|
||||
"""Mark the future done and set its result.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise InvalidStateError('{}: {!r}'.format(self._state, self))
|
||||
self._result = result
|
||||
self._state = _FINISHED
|
||||
self._schedule_callbacks()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Mark the future done and set an exception.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise InvalidStateError('{}: {!r}'.format(self._state, self))
|
||||
if isinstance(exception, type):
|
||||
exception = exception()
|
||||
if type(exception) is StopIteration:
|
||||
raise TypeError("StopIteration interacts badly with generators "
|
||||
"and cannot be raised into a Future")
|
||||
self._exception = exception
|
||||
self._state = _FINISHED
|
||||
self._schedule_callbacks()
|
||||
if compat.PY34:
|
||||
self._log_traceback = True
|
||||
else:
|
||||
self._tb_logger = _TracebackLogger(self, exception)
|
||||
# Arrange for the logger to be activated after all callbacks
|
||||
# have had a chance to call result() or exception().
|
||||
self._loop.call_soon(self._tb_logger.activate)
|
||||
|
||||
def __iter__(self):
|
||||
if not self.done():
|
||||
self._asyncio_future_blocking = True
|
||||
yield self # This tells Task to wait for completion.
|
||||
assert self.done(), "yield from wasn't used with future"
|
||||
return self.result() # May raise too.
|
||||
|
||||
if compat.PY35:
|
||||
__await__ = __iter__ # make compatible with 'await' expression
|
||||
|
||||
|
||||
# Needed for testing purposes.
|
||||
_PyFuture = Future
|
||||
|
||||
|
||||
def _set_result_unless_cancelled(fut, result):
|
||||
"""Helper setting the result only if the future was not cancelled."""
|
||||
if fut.cancelled():
|
||||
return
|
||||
fut.set_result(result)
|
||||
|
||||
|
||||
def _set_concurrent_future_state(concurrent, source):
|
||||
"""Copy state from a future to a concurrent.futures.Future."""
|
||||
assert source.done()
|
||||
if source.cancelled():
|
||||
concurrent.cancel()
|
||||
if not concurrent.set_running_or_notify_cancel():
|
||||
return
|
||||
exception = source.exception()
|
||||
if exception is not None:
|
||||
concurrent.set_exception(exception)
|
||||
else:
|
||||
result = source.result()
|
||||
concurrent.set_result(result)
|
||||
|
||||
|
||||
def _copy_future_state(source, dest):
|
||||
"""Internal helper to copy state from another Future.
|
||||
|
||||
The other Future may be a concurrent.futures.Future.
|
||||
"""
|
||||
assert source.done()
|
||||
if dest.cancelled():
|
||||
return
|
||||
assert not dest.done()
|
||||
if source.cancelled():
|
||||
dest.cancel()
|
||||
else:
|
||||
exception = source.exception()
|
||||
if exception is not None:
|
||||
dest.set_exception(exception)
|
||||
else:
|
||||
result = source.result()
|
||||
dest.set_result(result)
|
||||
|
||||
|
||||
def _chain_future(source, destination):
|
||||
"""Chain two futures so that when one completes, so does the other.
|
||||
|
||||
The result (or exception) of source will be copied to destination.
|
||||
If destination is cancelled, source gets cancelled too.
|
||||
Compatible with both asyncio.Future and concurrent.futures.Future.
|
||||
"""
|
||||
if not isfuture(source) and not isinstance(source,
|
||||
concurrent.futures.Future):
|
||||
raise TypeError('A future is required for source argument')
|
||||
if not isfuture(destination) and not isinstance(destination,
|
||||
concurrent.futures.Future):
|
||||
raise TypeError('A future is required for destination argument')
|
||||
source_loop = source._loop if isfuture(source) else None
|
||||
dest_loop = destination._loop if isfuture(destination) else None
|
||||
|
||||
def _set_state(future, other):
|
||||
if isfuture(future):
|
||||
_copy_future_state(other, future)
|
||||
else:
|
||||
_set_concurrent_future_state(future, other)
|
||||
|
||||
def _call_check_cancel(destination):
|
||||
if destination.cancelled():
|
||||
if source_loop is None or source_loop is dest_loop:
|
||||
source.cancel()
|
||||
else:
|
||||
source_loop.call_soon_threadsafe(source.cancel)
|
||||
|
||||
def _call_set_state(source):
|
||||
if (destination.cancelled() and
|
||||
dest_loop is not None and dest_loop.is_closed()):
|
||||
return
|
||||
if dest_loop is None or dest_loop is source_loop:
|
||||
_set_state(destination, source)
|
||||
else:
|
||||
dest_loop.call_soon_threadsafe(_set_state, destination, source)
|
||||
|
||||
destination.add_done_callback(_call_check_cancel)
|
||||
source.add_done_callback(_call_set_state)
|
||||
|
||||
|
||||
def wrap_future(future, *, loop=None):
|
||||
"""Wrap concurrent.futures.Future object."""
|
||||
if isfuture(future):
|
||||
return future
|
||||
assert isinstance(future, concurrent.futures.Future), \
|
||||
'concurrent.futures.Future is expected, got {!r}'.format(future)
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
new_future = loop.create_future()
|
||||
_chain_future(future, new_future)
|
||||
return new_future
|
||||
|
||||
|
||||
try:
|
||||
import _asyncio
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# _CFuture is needed for tests.
|
||||
Future = _CFuture = _asyncio.Future
|
501
third_party/python/Lib/asyncio/locks.py
vendored
Normal file
501
third_party/python/Lib/asyncio/locks.py
vendored
Normal file
|
@ -0,0 +1,501 @@
|
|||
"""Synchronization primitives."""
|
||||
|
||||
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
|
||||
|
||||
import collections
|
||||
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import futures
|
||||
from .coroutines import coroutine
|
||||
|
||||
|
||||
class _ContextManager:
|
||||
"""Context manager.
|
||||
|
||||
This enables the following idiom for acquiring and releasing a
|
||||
lock around a block:
|
||||
|
||||
with (yield from lock):
|
||||
<block>
|
||||
|
||||
while failing loudly when accidentally using:
|
||||
|
||||
with lock:
|
||||
<block>
|
||||
"""
|
||||
|
||||
def __init__(self, lock):
|
||||
self._lock = lock
|
||||
|
||||
def __enter__(self):
|
||||
# We have no use for the "as ..." clause in the with
|
||||
# statement for locks.
|
||||
return None
|
||||
|
||||
def __exit__(self, *args):
|
||||
try:
|
||||
self._lock.release()
|
||||
finally:
|
||||
self._lock = None # Crudely prevent reuse.
|
||||
|
||||
|
||||
class _ContextManagerMixin:
|
||||
def __enter__(self):
|
||||
raise RuntimeError(
|
||||
'"yield from" should be used as context manager expression')
|
||||
|
||||
def __exit__(self, *args):
|
||||
# This must exist because __enter__ exists, even though that
|
||||
# always raises; that's how the with-statement works.
|
||||
pass
|
||||
|
||||
@coroutine
|
||||
def __iter__(self):
|
||||
# This is not a coroutine. It is meant to enable the idiom:
|
||||
#
|
||||
# with (yield from lock):
|
||||
# <block>
|
||||
#
|
||||
# as an alternative to:
|
||||
#
|
||||
# yield from lock.acquire()
|
||||
# try:
|
||||
# <block>
|
||||
# finally:
|
||||
# lock.release()
|
||||
yield from self.acquire()
|
||||
return _ContextManager(self)
|
||||
|
||||
if compat.PY35:
|
||||
|
||||
def __await__(self):
|
||||
# To make "with await lock" work.
|
||||
yield from self.acquire()
|
||||
return _ContextManager(self)
|
||||
|
||||
@coroutine
|
||||
def __aenter__(self):
|
||||
yield from self.acquire()
|
||||
# We have no use for the "as ..." clause in the with
|
||||
# statement for locks.
|
||||
return None
|
||||
|
||||
@coroutine
|
||||
def __aexit__(self, exc_type, exc, tb):
|
||||
self.release()
|
||||
|
||||
|
||||
class Lock(_ContextManagerMixin):
|
||||
"""Primitive lock objects.
|
||||
|
||||
A primitive lock is a synchronization primitive that is not owned
|
||||
by a particular coroutine when locked. A primitive lock is in one
|
||||
of two states, 'locked' or 'unlocked'.
|
||||
|
||||
It is created in the unlocked state. It has two basic methods,
|
||||
acquire() and release(). When the state is unlocked, acquire()
|
||||
changes the state to locked and returns immediately. When the
|
||||
state is locked, acquire() blocks until a call to release() in
|
||||
another coroutine changes it to unlocked, then the acquire() call
|
||||
resets it to locked and returns. The release() method should only
|
||||
be called in the locked state; it changes the state to unlocked
|
||||
and returns immediately. If an attempt is made to release an
|
||||
unlocked lock, a RuntimeError will be raised.
|
||||
|
||||
When more than one coroutine is blocked in acquire() waiting for
|
||||
the state to turn to unlocked, only one coroutine proceeds when a
|
||||
release() call resets the state to unlocked; first coroutine which
|
||||
is blocked in acquire() is being processed.
|
||||
|
||||
acquire() is a coroutine and should be called with 'yield from'.
|
||||
|
||||
Locks also support the context management protocol. '(yield from lock)'
|
||||
should be used as the context manager expression.
|
||||
|
||||
Usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
yield from lock
|
||||
try:
|
||||
...
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
Context manager usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
with (yield from lock):
|
||||
...
|
||||
|
||||
Lock objects can be tested for locking state:
|
||||
|
||||
if not lock.locked():
|
||||
yield from lock
|
||||
else:
|
||||
# lock is acquired
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
self._waiters = collections.deque()
|
||||
self._locked = False
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self._locked else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
def locked(self):
|
||||
"""Return True if lock is acquired."""
|
||||
return self._locked
|
||||
|
||||
@coroutine
|
||||
def acquire(self):
|
||||
"""Acquire a lock.
|
||||
|
||||
This method blocks until the lock is unlocked, then sets it to
|
||||
locked and returns True.
|
||||
"""
|
||||
if not self._locked and all(w.cancelled() for w in self._waiters):
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
|
||||
# Finally block should be called before the CancelledError
|
||||
# handling as we don't want CancelledError to call
|
||||
# _wake_up_first() and attempt to wake up itself.
|
||||
try:
|
||||
try:
|
||||
yield from fut
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
except futures.CancelledError:
|
||||
if not self._locked:
|
||||
self._wake_up_first()
|
||||
raise
|
||||
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
"""Release a lock.
|
||||
|
||||
When the lock is locked, reset it to unlocked, and return.
|
||||
If any other coroutines are blocked waiting for the lock to become
|
||||
unlocked, allow exactly one of them to proceed.
|
||||
|
||||
When invoked on an unlocked lock, a RuntimeError is raised.
|
||||
|
||||
There is no return value.
|
||||
"""
|
||||
if self._locked:
|
||||
self._locked = False
|
||||
self._wake_up_first()
|
||||
else:
|
||||
raise RuntimeError('Lock is not acquired.')
|
||||
|
||||
def _wake_up_first(self):
|
||||
"""Wake up the first waiter if it isn't done."""
|
||||
try:
|
||||
fut = next(iter(self._waiters))
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
# .done() necessarily means that a waiter will wake up later on and
|
||||
# either take the lock, or, if it was cancelled and lock wasn't
|
||||
# taken already, will hit this again and wake up a new waiter.
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
|
||||
class Event:
|
||||
"""Asynchronous equivalent to threading.Event.
|
||||
|
||||
Class implementing event objects. An event manages a flag that can be set
|
||||
to true with the set() method and reset to false with the clear() method.
|
||||
The wait() method blocks until the flag is true. The flag is initially
|
||||
false.
|
||||
"""
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
self._waiters = collections.deque()
|
||||
self._value = False
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'set' if self._value else 'unset'
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
def is_set(self):
|
||||
"""Return True if and only if the internal flag is true."""
|
||||
return self._value
|
||||
|
||||
def set(self):
|
||||
"""Set the internal flag to true. All coroutines waiting for it to
|
||||
become true are awakened. Coroutine that call wait() once the flag is
|
||||
true will not block at all.
|
||||
"""
|
||||
if not self._value:
|
||||
self._value = True
|
||||
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
def clear(self):
|
||||
"""Reset the internal flag to false. Subsequently, coroutines calling
|
||||
wait() will block until set() is called to set the internal flag
|
||||
to true again."""
|
||||
self._value = False
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Block until the internal flag is true.
|
||||
|
||||
If the internal flag is true on entry, return True
|
||||
immediately. Otherwise, block until another coroutine calls
|
||||
set() to set the flag to true, then return True.
|
||||
"""
|
||||
if self._value:
|
||||
return True
|
||||
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
|
||||
class Condition(_ContextManagerMixin):
|
||||
"""Asynchronous equivalent to threading.Condition.
|
||||
|
||||
This class implements condition variable objects. A condition variable
|
||||
allows one or more coroutines to wait until they are notified by another
|
||||
coroutine.
|
||||
|
||||
A new Lock object is created and used as the underlying lock.
|
||||
"""
|
||||
|
||||
def __init__(self, lock=None, *, loop=None):
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
if lock is None:
|
||||
lock = Lock(loop=self._loop)
|
||||
elif lock._loop is not self._loop:
|
||||
raise ValueError("loop argument must agree with lock")
|
||||
|
||||
self._lock = lock
|
||||
# Export the lock's locked(), acquire() and release() methods.
|
||||
self.locked = lock.locked
|
||||
self.acquire = lock.acquire
|
||||
self.release = lock.release
|
||||
|
||||
self._waiters = collections.deque()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Wait until notified.
|
||||
|
||||
If the calling coroutine has not acquired the lock when this
|
||||
method is called, a RuntimeError is raised.
|
||||
|
||||
This method releases the underlying lock, and then blocks
|
||||
until it is awakened by a notify() or notify_all() call for
|
||||
the same condition variable in another coroutine. Once
|
||||
awakened, it re-acquires the lock and returns True.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot wait on un-acquired lock')
|
||||
|
||||
self.release()
|
||||
try:
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
finally:
|
||||
# Must reacquire lock even if wait is cancelled
|
||||
cancelled = False
|
||||
while True:
|
||||
try:
|
||||
yield from self.acquire()
|
||||
break
|
||||
except futures.CancelledError:
|
||||
cancelled = True
|
||||
|
||||
if cancelled:
|
||||
raise futures.CancelledError
|
||||
|
||||
@coroutine
|
||||
def wait_for(self, predicate):
|
||||
"""Wait until a predicate becomes true.
|
||||
|
||||
The predicate should be a callable which result will be
|
||||
interpreted as a boolean value. The final predicate value is
|
||||
the return value.
|
||||
"""
|
||||
result = predicate()
|
||||
while not result:
|
||||
yield from self.wait()
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
def notify(self, n=1):
|
||||
"""By default, wake up one coroutine waiting on this condition, if any.
|
||||
If the calling coroutine has not acquired the lock when this method
|
||||
is called, a RuntimeError is raised.
|
||||
|
||||
This method wakes up at most n of the coroutines waiting for the
|
||||
condition variable; it is a no-op if no coroutines are waiting.
|
||||
|
||||
Note: an awakened coroutine does not actually return from its
|
||||
wait() call until it can reacquire the lock. Since notify() does
|
||||
not release the lock, its caller should.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot notify on un-acquired lock')
|
||||
|
||||
idx = 0
|
||||
for fut in self._waiters:
|
||||
if idx >= n:
|
||||
break
|
||||
|
||||
if not fut.done():
|
||||
idx += 1
|
||||
fut.set_result(False)
|
||||
|
||||
def notify_all(self):
|
||||
"""Wake up all threads waiting on this condition. This method acts
|
||||
like notify(), but wakes up all waiting threads instead of one. If the
|
||||
calling thread has not acquired the lock when this method is called,
|
||||
a RuntimeError is raised.
|
||||
"""
|
||||
self.notify(len(self._waiters))
|
||||
|
||||
|
||||
class Semaphore(_ContextManagerMixin):
|
||||
"""A Semaphore implementation.
|
||||
|
||||
A semaphore manages an internal counter which is decremented by each
|
||||
acquire() call and incremented by each release() call. The counter
|
||||
can never go below zero; when acquire() finds that it is zero, it blocks,
|
||||
waiting until some other thread calls release().
|
||||
|
||||
Semaphores also support the context management protocol.
|
||||
|
||||
The optional argument gives the initial value for the internal
|
||||
counter; it defaults to 1. If the value given is less than 0,
|
||||
ValueError is raised.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1, *, loop=None):
|
||||
if value < 0:
|
||||
raise ValueError("Semaphore initial value must be >= 0")
|
||||
self._value = value
|
||||
self._waiters = collections.deque()
|
||||
if loop is not None:
|
||||
self._loop = loop
|
||||
else:
|
||||
self._loop = events.get_event_loop()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
|
||||
self._value)
|
||||
if self._waiters:
|
||||
extra = '{},waiters:{}'.format(extra, len(self._waiters))
|
||||
return '<{} [{}]>'.format(res[1:-1], extra)
|
||||
|
||||
def _wake_up_next(self):
|
||||
while self._waiters:
|
||||
waiter = self._waiters.popleft()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
return
|
||||
|
||||
def locked(self):
|
||||
"""Returns True if semaphore can not be acquired immediately."""
|
||||
return self._value == 0
|
||||
|
||||
@coroutine
|
||||
def acquire(self):
|
||||
"""Acquire a semaphore.
|
||||
|
||||
If the internal counter is larger than zero on entry,
|
||||
decrement it by one and return True immediately. If it is
|
||||
zero on entry, block, waiting until some other coroutine has
|
||||
called release() to make it larger than 0, and then return
|
||||
True.
|
||||
"""
|
||||
while self._value <= 0:
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
yield from fut
|
||||
except:
|
||||
# See the similar code in Queue.get.
|
||||
fut.cancel()
|
||||
if self._value > 0 and not fut.cancelled():
|
||||
self._wake_up_next()
|
||||
raise
|
||||
self._value -= 1
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
"""Release a semaphore, incrementing the internal counter by one.
|
||||
When it was zero on entry and another coroutine is waiting for it to
|
||||
become larger than zero again, wake up that coroutine.
|
||||
"""
|
||||
self._value += 1
|
||||
self._wake_up_next()
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A bounded semaphore implementation.
|
||||
|
||||
This raises ValueError in release() if it would increase the value
|
||||
above the initial value.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1, *, loop=None):
|
||||
self._bound_value = value
|
||||
super().__init__(value, loop=loop)
|
||||
|
||||
def release(self):
|
||||
if self._value >= self._bound_value:
|
||||
raise ValueError('BoundedSemaphore released too many times')
|
||||
super().release()
|
7
third_party/python/Lib/asyncio/log.py
vendored
Normal file
7
third_party/python/Lib/asyncio/log.py
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
"""Logging configuration."""
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
# Name the logger after the package.
|
||||
logger = logging.getLogger(__package__)
|
556
third_party/python/Lib/asyncio/proactor_events.py
vendored
Normal file
556
third_party/python/Lib/asyncio/proactor_events.py
vendored
Normal file
|
@ -0,0 +1,556 @@
|
|||
"""Event loop using a proactor and related classes.
|
||||
|
||||
A proactor is a "notify-on-completion" multiplexer. Currently a
|
||||
proactor is only implemented on Windows with IOCP.
|
||||
"""
|
||||
|
||||
__all__ = ['BaseProactorEventLoop']
|
||||
|
||||
import socket
|
||||
import warnings
|
||||
|
||||
from . import base_events
|
||||
from . import compat
|
||||
from . import constants
|
||||
from . import futures
|
||||
from . import sslproto
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
|
||||
class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
transports.BaseTransport):
|
||||
"""Base class for pipe and socket transports."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(extra, loop)
|
||||
self._set_extra(sock)
|
||||
self._sock = sock
|
||||
self._protocol = protocol
|
||||
self._server = server
|
||||
self._buffer = None # None or bytearray.
|
||||
self._read_fut = None
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
self._conn_lost = 0
|
||||
self._closing = False # Set when close() called.
|
||||
self._eof_written = False
|
||||
if self._server is not None:
|
||||
self._server._attach()
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(futures._set_result_unless_cancelled,
|
||||
waiter, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._sock is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
if self._sock is not None:
|
||||
info.append('fd=%s' % self._sock.fileno())
|
||||
if self._read_fut is not None:
|
||||
info.append('read=%s' % self._read_fut)
|
||||
if self._write_fut is not None:
|
||||
info.append("write=%r" % self._write_fut)
|
||||
if self._buffer:
|
||||
bufsize = len(self._buffer)
|
||||
info.append('write_bufsize=%s' % bufsize)
|
||||
if self._eof_written:
|
||||
info.append('EOF written')
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
self._extra['pipe'] = sock
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closing
|
||||
|
||||
def close(self):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if not self._buffer and self._write_fut is None:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if self._sock is not None:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
self._force_close(exc)
|
||||
|
||||
def _force_close(self, exc):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if self._write_fut:
|
||||
self._write_fut.cancel()
|
||||
self._write_fut = None
|
||||
if self._read_fut:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
self._pending_write = 0
|
||||
self._buffer = None
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
# XXX If there is a pending overlapped read on the other
|
||||
# end then it may fail with ERROR_NETNAME_DELETED if we
|
||||
# just close our end. First calling shutdown() seems to
|
||||
# cure it, but maybe using DisconnectEx() would be better.
|
||||
if hasattr(self._sock, 'shutdown'):
|
||||
self._sock.shutdown(socket.SHUT_RDWR)
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
server = self._server
|
||||
if server is not None:
|
||||
server._detach()
|
||||
self._server = None
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
size = self._pending_write
|
||||
if self._buffer is not None:
|
||||
size += len(self._buffer)
|
||||
return size
|
||||
|
||||
|
||||
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
|
||||
transports.ReadTransport):
|
||||
"""Transport for read pipes."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
self._paused = False
|
||||
self._reschedule_on_resume = False
|
||||
self._loop.call_soon(self._loop_reading)
|
||||
|
||||
def pause_reading(self):
|
||||
if self._closing or self._paused:
|
||||
return
|
||||
self._paused = True
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses reading", self)
|
||||
|
||||
def resume_reading(self):
|
||||
if self._closing or not self._paused:
|
||||
return
|
||||
self._paused = False
|
||||
if self._reschedule_on_resume:
|
||||
self._loop.call_soon(self._loop_reading, self._read_fut)
|
||||
self._reschedule_on_resume = False
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes reading", self)
|
||||
|
||||
def _loop_reading(self, fut=None):
|
||||
if self._paused:
|
||||
self._reschedule_on_resume = True
|
||||
return
|
||||
data = None
|
||||
|
||||
try:
|
||||
if fut is not None:
|
||||
assert self._read_fut is fut or (self._read_fut is None and
|
||||
self._closing)
|
||||
self._read_fut = None
|
||||
data = fut.result() # deliver data later in "finally" clause
|
||||
|
||||
if self._closing:
|
||||
# since close() has been called we ignore any read data
|
||||
data = None
|
||||
return
|
||||
|
||||
if data == b'':
|
||||
# we got end-of-file so no need to reschedule a new read
|
||||
return
|
||||
|
||||
# reschedule a new read
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 4096)
|
||||
except ConnectionAbortedError as exc:
|
||||
if not self._closing:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
elif self._loop.get_debug():
|
||||
logger.debug("Read error on pipe transport while closing",
|
||||
exc_info=True)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
except futures.CancelledError:
|
||||
if not self._closing:
|
||||
raise
|
||||
else:
|
||||
self._read_fut.add_done_callback(self._loop_reading)
|
||||
finally:
|
||||
if data:
|
||||
self._protocol.data_received(data)
|
||||
elif data is not None:
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
keep_open = self._protocol.eof_received()
|
||||
if not keep_open:
|
||||
self.close()
|
||||
|
||||
|
||||
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
|
||||
transports.WriteTransport):
|
||||
"""Transport for write pipes."""
|
||||
|
||||
def write(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
msg = ("data argument must be a bytes-like object, not '%s'" %
|
||||
type(data).__name__)
|
||||
raise TypeError(msg)
|
||||
if self._eof_written:
|
||||
raise RuntimeError('write_eof() already called')
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
if self._conn_lost:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('socket.send() raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
# Observable states:
|
||||
# 1. IDLE: _write_fut and _buffer both None
|
||||
# 2. WRITING: _write_fut set; _buffer None
|
||||
# 3. BACKED UP: _write_fut set; _buffer a bytearray
|
||||
# We always copy the data, so the caller can't modify it
|
||||
# while we're still waiting for the I/O to happen.
|
||||
if self._write_fut is None: # IDLE -> WRITING
|
||||
assert self._buffer is None
|
||||
# Pass a copy, except if it's already immutable.
|
||||
self._loop_writing(data=bytes(data))
|
||||
elif not self._buffer: # WRITING -> BACKED UP
|
||||
# Make a mutable copy which we can extend.
|
||||
self._buffer = bytearray(data)
|
||||
self._maybe_pause_protocol()
|
||||
else: # BACKED UP
|
||||
# Append to buffer (also copies).
|
||||
self._buffer.extend(data)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _loop_writing(self, f=None, data=None):
|
||||
try:
|
||||
assert f is self._write_fut
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
if f:
|
||||
f.result()
|
||||
if data is None:
|
||||
data = self._buffer
|
||||
self._buffer = None
|
||||
if not data:
|
||||
if self._closing:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._eof_written:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
# Now that we've reduced the buffer size, tell the
|
||||
# protocol to resume writing if it was paused. Note that
|
||||
# we do this last since the callback is called immediately
|
||||
# and it may add more data to the buffer (even causing the
|
||||
# protocol to be paused again).
|
||||
self._maybe_resume_protocol()
|
||||
else:
|
||||
self._write_fut = self._loop._proactor.send(self._sock, data)
|
||||
if not self._write_fut.done():
|
||||
assert self._pending_write == 0
|
||||
self._pending_write = len(data)
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
self._maybe_pause_protocol()
|
||||
else:
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
self.close()
|
||||
|
||||
def abort(self):
|
||||
self._force_close(None)
|
||||
|
||||
|
||||
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 16)
|
||||
self._read_fut.add_done_callback(self._pipe_closed)
|
||||
|
||||
def _pipe_closed(self, fut):
|
||||
if fut.cancelled():
|
||||
# the transport has been closed
|
||||
return
|
||||
assert fut.result() == b''
|
||||
if self._closing:
|
||||
assert self._read_fut is None
|
||||
return
|
||||
assert fut is self._read_fut, (fut, self._read_fut)
|
||||
self._read_fut = None
|
||||
if self._write_fut is not None:
|
||||
self._force_close(BrokenPipeError())
|
||||
else:
|
||||
self.close()
|
||||
|
||||
|
||||
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for duplex pipes."""
|
||||
|
||||
def can_write_eof(self):
|
||||
return False
|
||||
|
||||
def write_eof(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _ProactorSocketTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for connected sockets."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
base_events._set_nodelay(sock)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
self._extra['socket'] = sock
|
||||
try:
|
||||
self._extra['sockname'] = sock.getsockname()
|
||||
except (socket.error, AttributeError):
|
||||
if self._loop.get_debug():
|
||||
logger.warning("getsockname() failed on %r",
|
||||
sock, exc_info=True)
|
||||
if 'peername' not in self._extra:
|
||||
try:
|
||||
self._extra['peername'] = sock.getpeername()
|
||||
except (socket.error, AttributeError):
|
||||
if self._loop.get_debug():
|
||||
logger.warning("getpeername() failed on %r",
|
||||
sock, exc_info=True)
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing or self._eof_written:
|
||||
return
|
||||
self._eof_written = True
|
||||
if self._write_fut is None:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
|
||||
|
||||
class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
def __init__(self, proactor):
|
||||
super().__init__()
|
||||
logger.debug('Using proactor: %s', proactor.__class__.__name__)
|
||||
self._proactor = proactor
|
||||
self._selector = proactor # convenient alias
|
||||
self._self_reading_future = None
|
||||
self._accept_futures = {} # socket file descriptor => Future
|
||||
proactor.set_loop(self)
|
||||
self._make_self_pipe()
|
||||
|
||||
def _make_socket_transport(self, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
return _ProactorSocketTransport(self, sock, protocol, waiter,
|
||||
extra, server)
|
||||
|
||||
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
|
||||
*, server_side=False, server_hostname=None,
|
||||
extra=None, server=None):
|
||||
if not sslproto._is_sslproto_available():
|
||||
raise NotImplementedError("Proactor event loop requires Python 3.5"
|
||||
" or newer (ssl.MemoryBIO) to support "
|
||||
"SSL")
|
||||
|
||||
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
|
||||
server_side, server_hostname)
|
||||
_ProactorSocketTransport(self, rawsock, ssl_protocol,
|
||||
extra=extra, server=server)
|
||||
return ssl_protocol._app_transport
|
||||
|
||||
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorDuplexPipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
|
||||
|
||||
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
# We want connection_lost() to be called when other end closes
|
||||
return _ProactorWritePipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def close(self):
|
||||
if self.is_running():
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self.is_closed():
|
||||
return
|
||||
|
||||
# Call these methods before closing the event loop (before calling
|
||||
# BaseEventLoop.close), because they can schedule callbacks with
|
||||
# call_soon(), which is forbidden when the event loop is closed.
|
||||
self._stop_accept_futures()
|
||||
self._close_self_pipe()
|
||||
self._proactor.close()
|
||||
self._proactor = None
|
||||
self._selector = None
|
||||
|
||||
# Close the event loop
|
||||
super().close()
|
||||
|
||||
def sock_recv(self, sock, n):
|
||||
return self._proactor.recv(sock, n)
|
||||
|
||||
def sock_sendall(self, sock, data):
|
||||
return self._proactor.send(sock, data)
|
||||
|
||||
def sock_connect(self, sock, address):
|
||||
return self._proactor.connect(sock, address)
|
||||
|
||||
def sock_accept(self, sock):
|
||||
return self._proactor.accept(sock)
|
||||
|
||||
def _socketpair(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def _close_self_pipe(self):
|
||||
if self._self_reading_future is not None:
|
||||
self._self_reading_future.cancel()
|
||||
self._self_reading_future = None
|
||||
self._ssock.close()
|
||||
self._ssock = None
|
||||
self._csock.close()
|
||||
self._csock = None
|
||||
self._internal_fds -= 1
|
||||
|
||||
def _make_self_pipe(self):
|
||||
# A self-socket, really. :-)
|
||||
self._ssock, self._csock = self._socketpair()
|
||||
self._ssock.setblocking(False)
|
||||
self._csock.setblocking(False)
|
||||
self._internal_fds += 1
|
||||
self.call_soon(self._loop_self_reading)
|
||||
|
||||
def _loop_self_reading(self, f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
f.result() # may raise
|
||||
f = self._proactor.recv(self._ssock, 4096)
|
||||
except futures.CancelledError:
|
||||
# _close_self_pipe() has been called, stop waiting for data
|
||||
return
|
||||
except Exception as exc:
|
||||
self.call_exception_handler({
|
||||
'message': 'Error on reading from the event loop self pipe',
|
||||
'exception': exc,
|
||||
'loop': self,
|
||||
})
|
||||
else:
|
||||
self._self_reading_future = f
|
||||
f.add_done_callback(self._loop_self_reading)
|
||||
|
||||
def _write_to_self(self):
|
||||
self._csock.send(b'\0')
|
||||
|
||||
def _start_serving(self, protocol_factory, sock,
|
||||
sslcontext=None, server=None, backlog=100):
|
||||
|
||||
def loop(f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
conn, addr = f.result()
|
||||
if self._debug:
|
||||
logger.debug("%r got a new connection from %r: %r",
|
||||
server, addr, conn)
|
||||
protocol = protocol_factory()
|
||||
if sslcontext is not None:
|
||||
self._make_ssl_transport(
|
||||
conn, protocol, sslcontext, server_side=True,
|
||||
extra={'peername': addr}, server=server)
|
||||
else:
|
||||
self._make_socket_transport(
|
||||
conn, protocol,
|
||||
extra={'peername': addr}, server=server)
|
||||
if self.is_closed():
|
||||
return
|
||||
f = self._proactor.accept(sock)
|
||||
except OSError as exc:
|
||||
if sock.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Accept failed on a socket',
|
||||
'exception': exc,
|
||||
'socket': sock,
|
||||
})
|
||||
sock.close()
|
||||
elif self._debug:
|
||||
logger.debug("Accept failed on socket %r",
|
||||
sock, exc_info=True)
|
||||
except futures.CancelledError:
|
||||
sock.close()
|
||||
else:
|
||||
self._accept_futures[sock.fileno()] = f
|
||||
f.add_done_callback(loop)
|
||||
|
||||
self.call_soon(loop)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
# Events are processed in the IocpProactor._poll() method
|
||||
pass
|
||||
|
||||
def _stop_accept_futures(self):
|
||||
for future in self._accept_futures.values():
|
||||
future.cancel()
|
||||
self._accept_futures.clear()
|
||||
|
||||
def _stop_serving(self, sock):
|
||||
self._stop_accept_futures()
|
||||
self._proactor._stop_serving(sock)
|
||||
sock.close()
|
134
third_party/python/Lib/asyncio/protocols.py
vendored
Normal file
134
third_party/python/Lib/asyncio/protocols.py
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
"""Abstract Protocol class."""
|
||||
|
||||
__all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
|
||||
'SubprocessProtocol']
|
||||
|
||||
|
||||
class BaseProtocol:
|
||||
"""Common base class for protocol interfaces.
|
||||
|
||||
Usually user implements protocols that derived from BaseProtocol
|
||||
like Protocol or ProcessProtocol.
|
||||
|
||||
The only case when BaseProtocol should be implemented directly is
|
||||
write-only transport like write pipe
|
||||
"""
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when a connection is made.
|
||||
|
||||
The argument is the transport representing the pipe connection.
|
||||
To receive data, wait for data_received() calls.
|
||||
When the connection is closed, connection_lost() is called.
|
||||
"""
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the transport's buffer goes over the high-water mark.
|
||||
|
||||
Pause and resume calls are paired -- pause_writing() is called
|
||||
once when the buffer goes strictly over the high-water mark
|
||||
(even if subsequent writes increases the buffer size even
|
||||
more), and eventually resume_writing() is called once when the
|
||||
buffer size reaches the low-water mark.
|
||||
|
||||
Note that if the buffer size equals the high-water mark,
|
||||
pause_writing() is not called -- it must go strictly over.
|
||||
Conversely, resume_writing() is called when the buffer size is
|
||||
equal or lower than the low-water mark. These end conditions
|
||||
are important to ensure that things go as expected when either
|
||||
mark is zero.
|
||||
|
||||
NOTE: This is the only Protocol callback that is not called
|
||||
through EventLoop.call_soon() -- if it were, it would have no
|
||||
effect when it's most needed (when the app keeps writing
|
||||
without yielding until pause_writing() is called).
|
||||
"""
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the transport's buffer drains below the low-water mark.
|
||||
|
||||
See pause_writing() for details.
|
||||
"""
|
||||
|
||||
|
||||
class Protocol(BaseProtocol):
|
||||
"""Interface for stream protocol.
|
||||
|
||||
The user should implement this interface. They can inherit from
|
||||
this class but don't need to. The implementations here do
|
||||
nothing (they don't raise exceptions).
|
||||
|
||||
When the user wants to requests a transport, they pass a protocol
|
||||
factory to a utility function (e.g., EventLoop.create_connection()).
|
||||
|
||||
When the connection is made successfully, connection_made() is
|
||||
called with a suitable transport object. Then data_received()
|
||||
will be called 0 or more times with data (bytes) received from the
|
||||
transport; finally, connection_lost() will be called exactly once
|
||||
with either an exception object or None as an argument.
|
||||
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> DR*] [-> ER?] -> CL -> end
|
||||
|
||||
* CM: connection_made()
|
||||
* DR: data_received()
|
||||
* ER: eof_received()
|
||||
* CL: connection_lost()
|
||||
"""
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end calls write_eof() or equivalent.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
|
||||
|
||||
class DatagramProtocol(BaseProtocol):
|
||||
"""Interface for datagram protocol."""
|
||||
|
||||
def datagram_received(self, data, addr):
|
||||
"""Called when some datagram is received."""
|
||||
|
||||
def error_received(self, exc):
|
||||
"""Called when a send or receive operation raises an OSError.
|
||||
|
||||
(Other than BlockingIOError or InterruptedError.)
|
||||
"""
|
||||
|
||||
|
||||
class SubprocessProtocol(BaseProtocol):
|
||||
"""Interface for protocol for subprocess calls."""
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
"""Called when the subprocess writes data into stdout/stderr pipe.
|
||||
|
||||
fd is int file descriptor.
|
||||
data is bytes object.
|
||||
"""
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
"""Called when a file descriptor associated with the child process is
|
||||
closed.
|
||||
|
||||
fd is the int file descriptor that was closed.
|
||||
"""
|
||||
|
||||
def process_exited(self):
|
||||
"""Called when subprocess has exited."""
|
259
third_party/python/Lib/asyncio/queues.py
vendored
Normal file
259
third_party/python/Lib/asyncio/queues.py
vendored
Normal file
|
@ -0,0 +1,259 @@
|
|||
"""Queues"""
|
||||
|
||||
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
|
||||
|
||||
import collections
|
||||
import heapq
|
||||
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import locks
|
||||
from .coroutines import coroutine
|
||||
|
||||
|
||||
class QueueEmpty(Exception):
|
||||
"""Exception raised when Queue.get_nowait() is called on a Queue object
|
||||
which is empty.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class QueueFull(Exception):
|
||||
"""Exception raised when the Queue.put_nowait() method is called on a Queue
|
||||
object which is full.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Queue:
|
||||
"""A queue, useful for coordinating producer and consumer coroutines.
|
||||
|
||||
If maxsize is less than or equal to zero, the queue size is infinite. If it
|
||||
is an integer greater than 0, then "yield from put()" will block when the
|
||||
queue reaches maxsize, until an item is removed by get().
|
||||
|
||||
Unlike the standard library Queue, you can reliably know this Queue's size
|
||||
with qsize(), since your single-threaded asyncio application won't be
|
||||
interrupted between calling qsize() and doing an operation on the Queue.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0, *, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._maxsize = maxsize
|
||||
|
||||
# Futures.
|
||||
self._getters = collections.deque()
|
||||
# Futures.
|
||||
self._putters = collections.deque()
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = locks.Event(loop=self._loop)
|
||||
self._finished.set()
|
||||
self._init(maxsize)
|
||||
|
||||
# These three are overridable in subclasses.
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = collections.deque()
|
||||
|
||||
def _get(self):
|
||||
return self._queue.popleft()
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
# End of the overridable methods.
|
||||
|
||||
def _wakeup_next(self, waiters):
|
||||
# Wake up the next waiter (if any) that isn't cancelled.
|
||||
while waiters:
|
||||
waiter = waiters.popleft()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
break
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} at {:#x} {}>'.format(
|
||||
type(self).__name__, id(self), self._format())
|
||||
|
||||
def __str__(self):
|
||||
return '<{} {}>'.format(type(self).__name__, self._format())
|
||||
|
||||
def _format(self):
|
||||
result = 'maxsize={!r}'.format(self._maxsize)
|
||||
if getattr(self, '_queue', None):
|
||||
result += ' _queue={!r}'.format(list(self._queue))
|
||||
if self._getters:
|
||||
result += ' _getters[{}]'.format(len(self._getters))
|
||||
if self._putters:
|
||||
result += ' _putters[{}]'.format(len(self._putters))
|
||||
if self._unfinished_tasks:
|
||||
result += ' tasks={}'.format(self._unfinished_tasks)
|
||||
return result
|
||||
|
||||
def qsize(self):
|
||||
"""Number of items in the queue."""
|
||||
return len(self._queue)
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
"""Number of items allowed in the queue."""
|
||||
return self._maxsize
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise."""
|
||||
return not self._queue
|
||||
|
||||
def full(self):
|
||||
"""Return True if there are maxsize items in the queue.
|
||||
|
||||
Note: if the Queue was initialized with maxsize=0 (the default),
|
||||
then full() is never True.
|
||||
"""
|
||||
if self._maxsize <= 0:
|
||||
return False
|
||||
else:
|
||||
return self.qsize() >= self._maxsize
|
||||
|
||||
@coroutine
|
||||
def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
|
||||
Put an item into the queue. If the queue is full, wait until a free
|
||||
slot is available before adding item.
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
while self.full():
|
||||
putter = self._loop.create_future()
|
||||
self._putters.append(putter)
|
||||
try:
|
||||
yield from putter
|
||||
except:
|
||||
putter.cancel() # Just in case putter is not done yet.
|
||||
if not self.full() and not putter.cancelled():
|
||||
# We were woken up by get_nowait(), but can't take
|
||||
# the call. Wake up the next in line.
|
||||
self._wakeup_next(self._putters)
|
||||
raise
|
||||
return self.put_nowait(item)
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
If no free slot is immediately available, raise QueueFull.
|
||||
"""
|
||||
if self.full():
|
||||
raise QueueFull
|
||||
self._put(item)
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
self._wakeup_next(self._getters)
|
||||
|
||||
@coroutine
|
||||
def get(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If queue is empty, wait until an item is available.
|
||||
|
||||
This method is a coroutine.
|
||||
"""
|
||||
while self.empty():
|
||||
getter = self._loop.create_future()
|
||||
self._getters.append(getter)
|
||||
try:
|
||||
yield from getter
|
||||
except:
|
||||
getter.cancel() # Just in case getter is not done yet.
|
||||
|
||||
try:
|
||||
self._getters.remove(getter)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if not self.empty() and not getter.cancelled():
|
||||
# We were woken up by put_nowait(), but can't take
|
||||
# the call. Wake up the next in line.
|
||||
self._wakeup_next(self._getters)
|
||||
raise
|
||||
return self.get_nowait()
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
Return an item if one is immediately available, else raise QueueEmpty.
|
||||
"""
|
||||
if self.empty():
|
||||
raise QueueEmpty
|
||||
item = self._get()
|
||||
self._wakeup_next(self._putters)
|
||||
return item
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items have
|
||||
been processed (meaning that a task_done() call was received for every
|
||||
item that had been put() into the queue).
|
||||
|
||||
Raises ValueError if called more times than there were items placed in
|
||||
the queue.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
@coroutine
|
||||
def join(self):
|
||||
"""Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer calls task_done() to
|
||||
indicate that the item was retrieved and all work on it is complete.
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
if self._unfinished_tasks > 0:
|
||||
yield from self._finished.wait()
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A subclass of Queue; retrieves entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: (priority number, data).
|
||||
"""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self._queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self._queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
"""A subclass of Queue that retrieves most recently added entries first."""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self._queue.pop()
|
||||
|
||||
|
||||
if not compat.PY35:
|
||||
JoinableQueue = Queue
|
||||
"""Deprecated alias for Queue."""
|
||||
__all__.append('JoinableQueue')
|
1132
third_party/python/Lib/asyncio/selector_events.py
vendored
Normal file
1132
third_party/python/Lib/asyncio/selector_events.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
701
third_party/python/Lib/asyncio/sslproto.py
vendored
Normal file
701
third_party/python/Lib/asyncio/sslproto.py
vendored
Normal file
|
@ -0,0 +1,701 @@
|
|||
import collections
|
||||
import warnings
|
||||
try:
|
||||
import ssl
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import base_events
|
||||
from . import compat
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
|
||||
def _create_transport_context(server_side, server_hostname):
|
||||
if server_side:
|
||||
raise ValueError('Server side SSL needs a valid SSLContext')
|
||||
|
||||
# Client side may pass ssl=True to use a default
|
||||
# context; in that case the sslcontext passed is None.
|
||||
# The default is secure for client connections.
|
||||
if hasattr(ssl, 'create_default_context'):
|
||||
# Python 3.4+: use up-to-date strong settings.
|
||||
sslcontext = ssl.create_default_context()
|
||||
if not server_hostname:
|
||||
sslcontext.check_hostname = False
|
||||
else:
|
||||
# Fallback for Python 3.3.
|
||||
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
sslcontext.options |= ssl.OP_NO_SSLv2
|
||||
sslcontext.options |= ssl.OP_NO_SSLv3
|
||||
sslcontext.set_default_verify_paths()
|
||||
sslcontext.verify_mode = ssl.CERT_REQUIRED
|
||||
return sslcontext
|
||||
|
||||
|
||||
def _is_sslproto_available():
|
||||
return hasattr(ssl, "MemoryBIO")
|
||||
|
||||
|
||||
# States of an _SSLPipe.
|
||||
_UNWRAPPED = "UNWRAPPED"
|
||||
_DO_HANDSHAKE = "DO_HANDSHAKE"
|
||||
_WRAPPED = "WRAPPED"
|
||||
_SHUTDOWN = "SHUTDOWN"
|
||||
|
||||
|
||||
class _SSLPipe(object):
|
||||
"""An SSL "Pipe".
|
||||
|
||||
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
|
||||
through memory buffers. It can be used to implement a security layer for an
|
||||
existing connection where you don't have access to the connection's file
|
||||
descriptor, or for some reason you don't want to use it.
|
||||
|
||||
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
|
||||
data is passed through untransformed. In wrapped mode, application level
|
||||
data is encrypted to SSL record level data and vice versa. The SSL record
|
||||
level is the lowest level in the SSL protocol suite and is what travels
|
||||
as-is over the wire.
|
||||
|
||||
An SslPipe initially is in "unwrapped" mode. To start SSL, call
|
||||
do_handshake(). To shutdown SSL again, call unwrap().
|
||||
"""
|
||||
|
||||
max_size = 256 * 1024 # Buffer size passed to read()
|
||||
|
||||
def __init__(self, context, server_side, server_hostname=None):
|
||||
"""
|
||||
The *context* argument specifies the ssl.SSLContext to use.
|
||||
|
||||
The *server_side* argument indicates whether this is a server side or
|
||||
client side transport.
|
||||
|
||||
The optional *server_hostname* argument can be used to specify the
|
||||
hostname you are connecting to. You may only specify this parameter if
|
||||
the _ssl module supports Server Name Indication (SNI).
|
||||
"""
|
||||
self._context = context
|
||||
self._server_side = server_side
|
||||
self._server_hostname = server_hostname
|
||||
self._state = _UNWRAPPED
|
||||
self._incoming = ssl.MemoryBIO()
|
||||
self._outgoing = ssl.MemoryBIO()
|
||||
self._sslobj = None
|
||||
self._need_ssldata = False
|
||||
self._handshake_cb = None
|
||||
self._shutdown_cb = None
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
"""The SSL context passed to the constructor."""
|
||||
return self._context
|
||||
|
||||
@property
|
||||
def ssl_object(self):
|
||||
"""The internal ssl.SSLObject instance.
|
||||
|
||||
Return None if the pipe is not wrapped.
|
||||
"""
|
||||
return self._sslobj
|
||||
|
||||
@property
|
||||
def need_ssldata(self):
|
||||
"""Whether more record level data is needed to complete a handshake
|
||||
that is currently in progress."""
|
||||
return self._need_ssldata
|
||||
|
||||
@property
|
||||
def wrapped(self):
|
||||
"""
|
||||
Whether a security layer is currently in effect.
|
||||
|
||||
Return False during handshake.
|
||||
"""
|
||||
return self._state == _WRAPPED
|
||||
|
||||
def do_handshake(self, callback=None):
|
||||
"""Start the SSL handshake.
|
||||
|
||||
Return a list of ssldata. A ssldata element is a list of buffers
|
||||
|
||||
The optional *callback* argument can be used to install a callback that
|
||||
will be called when the handshake is complete. The callback will be
|
||||
called with None if successful, else an exception instance.
|
||||
"""
|
||||
if self._state != _UNWRAPPED:
|
||||
raise RuntimeError('handshake in progress or completed')
|
||||
self._sslobj = self._context.wrap_bio(
|
||||
self._incoming, self._outgoing,
|
||||
server_side=self._server_side,
|
||||
server_hostname=self._server_hostname)
|
||||
self._state = _DO_HANDSHAKE
|
||||
self._handshake_cb = callback
|
||||
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
|
||||
assert len(appdata) == 0
|
||||
return ssldata
|
||||
|
||||
def shutdown(self, callback=None):
|
||||
"""Start the SSL shutdown sequence.
|
||||
|
||||
Return a list of ssldata. A ssldata element is a list of buffers
|
||||
|
||||
The optional *callback* argument can be used to install a callback that
|
||||
will be called when the shutdown is complete. The callback will be
|
||||
called without arguments.
|
||||
"""
|
||||
if self._state == _UNWRAPPED:
|
||||
raise RuntimeError('no security layer present')
|
||||
if self._state == _SHUTDOWN:
|
||||
raise RuntimeError('shutdown in progress')
|
||||
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
|
||||
self._state = _SHUTDOWN
|
||||
self._shutdown_cb = callback
|
||||
ssldata, appdata = self.feed_ssldata(b'')
|
||||
assert appdata == [] or appdata == [b'']
|
||||
return ssldata
|
||||
|
||||
def feed_eof(self):
|
||||
"""Send a potentially "ragged" EOF.
|
||||
|
||||
This method will raise an SSL_ERROR_EOF exception if the EOF is
|
||||
unexpected.
|
||||
"""
|
||||
self._incoming.write_eof()
|
||||
ssldata, appdata = self.feed_ssldata(b'')
|
||||
assert appdata == [] or appdata == [b'']
|
||||
|
||||
def feed_ssldata(self, data, only_handshake=False):
|
||||
"""Feed SSL record level data into the pipe.
|
||||
|
||||
The data must be a bytes instance. It is OK to send an empty bytes
|
||||
instance. This can be used to get ssldata for a handshake initiated by
|
||||
this endpoint.
|
||||
|
||||
Return a (ssldata, appdata) tuple. The ssldata element is a list of
|
||||
buffers containing SSL data that needs to be sent to the remote SSL.
|
||||
|
||||
The appdata element is a list of buffers containing plaintext data that
|
||||
needs to be forwarded to the application. The appdata list may contain
|
||||
an empty buffer indicating an SSL "close_notify" alert. This alert must
|
||||
be acknowledged by calling shutdown().
|
||||
"""
|
||||
if self._state == _UNWRAPPED:
|
||||
# If unwrapped, pass plaintext data straight through.
|
||||
if data:
|
||||
appdata = [data]
|
||||
else:
|
||||
appdata = []
|
||||
return ([], appdata)
|
||||
|
||||
self._need_ssldata = False
|
||||
if data:
|
||||
self._incoming.write(data)
|
||||
|
||||
ssldata = []
|
||||
appdata = []
|
||||
try:
|
||||
if self._state == _DO_HANDSHAKE:
|
||||
# Call do_handshake() until it doesn't raise anymore.
|
||||
self._sslobj.do_handshake()
|
||||
self._state = _WRAPPED
|
||||
if self._handshake_cb:
|
||||
self._handshake_cb(None)
|
||||
if only_handshake:
|
||||
return (ssldata, appdata)
|
||||
# Handshake done: execute the wrapped block
|
||||
|
||||
if self._state == _WRAPPED:
|
||||
# Main state: read data from SSL until close_notify
|
||||
while True:
|
||||
chunk = self._sslobj.read(self.max_size)
|
||||
appdata.append(chunk)
|
||||
if not chunk: # close_notify
|
||||
break
|
||||
|
||||
elif self._state == _SHUTDOWN:
|
||||
# Call shutdown() until it doesn't raise anymore.
|
||||
self._sslobj.unwrap()
|
||||
self._sslobj = None
|
||||
self._state = _UNWRAPPED
|
||||
if self._shutdown_cb:
|
||||
self._shutdown_cb()
|
||||
|
||||
elif self._state == _UNWRAPPED:
|
||||
# Drain possible plaintext data after close_notify.
|
||||
appdata.append(self._incoming.read())
|
||||
except (ssl.SSLError, ssl.CertificateError) as exc:
|
||||
if getattr(exc, 'errno', None) not in (
|
||||
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
|
||||
ssl.SSL_ERROR_SYSCALL):
|
||||
if self._state == _DO_HANDSHAKE and self._handshake_cb:
|
||||
self._handshake_cb(exc)
|
||||
raise
|
||||
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
|
||||
|
||||
# Check for record level data that needs to be sent back.
|
||||
# Happens for the initial handshake and renegotiations.
|
||||
if self._outgoing.pending:
|
||||
ssldata.append(self._outgoing.read())
|
||||
return (ssldata, appdata)
|
||||
|
||||
def feed_appdata(self, data, offset=0):
|
||||
"""Feed plaintext data into the pipe.
|
||||
|
||||
Return an (ssldata, offset) tuple. The ssldata element is a list of
|
||||
buffers containing record level data that needs to be sent to the
|
||||
remote SSL instance. The offset is the number of plaintext bytes that
|
||||
were processed, which may be less than the length of data.
|
||||
|
||||
NOTE: In case of short writes, this call MUST be retried with the SAME
|
||||
buffer passed into the *data* argument (i.e. the id() must be the
|
||||
same). This is an OpenSSL requirement. A further particularity is that
|
||||
a short write will always have offset == 0, because the _ssl module
|
||||
does not enable partial writes. And even though the offset is zero,
|
||||
there will still be encrypted data in ssldata.
|
||||
"""
|
||||
assert 0 <= offset <= len(data)
|
||||
if self._state == _UNWRAPPED:
|
||||
# pass through data in unwrapped mode
|
||||
if offset < len(data):
|
||||
ssldata = [data[offset:]]
|
||||
else:
|
||||
ssldata = []
|
||||
return (ssldata, len(data))
|
||||
|
||||
ssldata = []
|
||||
view = memoryview(data)
|
||||
while True:
|
||||
self._need_ssldata = False
|
||||
try:
|
||||
if offset < len(view):
|
||||
offset += self._sslobj.write(view[offset:])
|
||||
except ssl.SSLError as exc:
|
||||
# It is not allowed to call write() after unwrap() until the
|
||||
# close_notify is acknowledged. We return the condition to the
|
||||
# caller as a short write.
|
||||
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
|
||||
exc.errno = ssl.SSL_ERROR_WANT_READ
|
||||
if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
|
||||
ssl.SSL_ERROR_WANT_WRITE,
|
||||
ssl.SSL_ERROR_SYSCALL):
|
||||
raise
|
||||
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
|
||||
|
||||
# See if there's any record level data back for us.
|
||||
if self._outgoing.pending:
|
||||
ssldata.append(self._outgoing.read())
|
||||
if offset == len(view) or self._need_ssldata:
|
||||
break
|
||||
return (ssldata, offset)
|
||||
|
||||
|
||||
class _SSLProtocolTransport(transports._FlowControlMixin,
|
||||
transports.Transport):
|
||||
|
||||
def __init__(self, loop, ssl_protocol):
|
||||
self._loop = loop
|
||||
# SSLProtocol instance
|
||||
self._ssl_protocol = ssl_protocol
|
||||
self._closed = False
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._ssl_protocol._get_extra_info(name, default)
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._ssl_protocol._app_protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._ssl_protocol._app_protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closed
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) called
|
||||
with None as its argument.
|
||||
"""
|
||||
self._closed = True
|
||||
self._ssl_protocol._start_shutdown()
|
||||
|
||||
# On Python 3.3 and older, objects with a destructor part of a reference
|
||||
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
|
||||
# to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if not self._closed:
|
||||
warnings.warn("unclosed transport %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
self._ssl_protocol._transport.pause_reading()
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
self._ssl_protocol._transport.resume_reading()
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
return self._ssl_protocol._transport.get_write_buffer_size()
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError("data: expecting a bytes-like instance, got {!r}"
|
||||
.format(type(data).__name__))
|
||||
if not data:
|
||||
return
|
||||
self._ssl_protocol._write_appdata(data)
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
return False
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
self._ssl_protocol._abort()
|
||||
|
||||
|
||||
class SSLProtocol(protocols.Protocol):
|
||||
"""SSL protocol.
|
||||
|
||||
Implementation of SSL on top of a socket using incoming and outgoing
|
||||
buffers which are ssl.MemoryBIO objects.
|
||||
"""
|
||||
|
||||
def __init__(self, loop, app_protocol, sslcontext, waiter,
|
||||
server_side=False, server_hostname=None,
|
||||
call_connection_made=True):
|
||||
if ssl is None:
|
||||
raise RuntimeError('stdlib ssl module not available')
|
||||
|
||||
if not sslcontext:
|
||||
sslcontext = _create_transport_context(server_side, server_hostname)
|
||||
|
||||
self._server_side = server_side
|
||||
if server_hostname and not server_side:
|
||||
self._server_hostname = server_hostname
|
||||
else:
|
||||
self._server_hostname = None
|
||||
self._sslcontext = sslcontext
|
||||
# SSL-specific extra info. More info are set when the handshake
|
||||
# completes.
|
||||
self._extra = dict(sslcontext=sslcontext)
|
||||
|
||||
# App data write buffering
|
||||
self._write_backlog = collections.deque()
|
||||
self._write_buffer_size = 0
|
||||
|
||||
self._waiter = waiter
|
||||
self._loop = loop
|
||||
self._app_protocol = app_protocol
|
||||
self._app_transport = _SSLProtocolTransport(self._loop, self)
|
||||
# _SSLPipe instance (None until the connection is made)
|
||||
self._sslpipe = None
|
||||
self._session_established = False
|
||||
self._in_handshake = False
|
||||
self._in_shutdown = False
|
||||
# transport, ex: SelectorSocketTransport
|
||||
self._transport = None
|
||||
self._call_connection_made = call_connection_made
|
||||
|
||||
def _wakeup_waiter(self, exc=None):
|
||||
if self._waiter is None:
|
||||
return
|
||||
if not self._waiter.cancelled():
|
||||
if exc is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
else:
|
||||
self._waiter.set_result(None)
|
||||
self._waiter = None
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when the low-level connection is made.
|
||||
|
||||
Start the SSL handshake.
|
||||
"""
|
||||
self._transport = transport
|
||||
self._sslpipe = _SSLPipe(self._sslcontext,
|
||||
self._server_side,
|
||||
self._server_hostname)
|
||||
self._start_handshake()
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the low-level connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
if self._session_established:
|
||||
self._session_established = False
|
||||
self._loop.call_soon(self._app_protocol.connection_lost, exc)
|
||||
self._transport = None
|
||||
self._app_transport = None
|
||||
self._wakeup_waiter(exc)
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the low-level transport's buffer goes over
|
||||
the high-water mark.
|
||||
"""
|
||||
self._app_protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the low-level transport's buffer drains below
|
||||
the low-water mark.
|
||||
"""
|
||||
self._app_protocol.resume_writing()
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some SSL data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
if self._sslpipe is None:
|
||||
# transport closing, sslpipe is destroyed
|
||||
return
|
||||
|
||||
try:
|
||||
ssldata, appdata = self._sslpipe.feed_ssldata(data)
|
||||
except ssl.SSLError as e:
|
||||
if self._loop.get_debug():
|
||||
logger.warning('%r: SSL error %s (reason %s)',
|
||||
self, e.errno, e.reason)
|
||||
self._abort()
|
||||
return
|
||||
|
||||
for chunk in ssldata:
|
||||
self._transport.write(chunk)
|
||||
|
||||
for chunk in appdata:
|
||||
if chunk:
|
||||
self._app_protocol.data_received(chunk)
|
||||
else:
|
||||
self._start_shutdown()
|
||||
break
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end of the low-level stream
|
||||
is half-closed.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
try:
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
|
||||
self._wakeup_waiter(ConnectionResetError)
|
||||
|
||||
if not self._in_handshake:
|
||||
keep_open = self._app_protocol.eof_received()
|
||||
if keep_open:
|
||||
logger.warning('returning true from eof_received() '
|
||||
'has no effect when using ssl')
|
||||
finally:
|
||||
self._transport.close()
|
||||
|
||||
def _get_extra_info(self, name, default=None):
|
||||
if name in self._extra:
|
||||
return self._extra[name]
|
||||
elif self._transport is not None:
|
||||
return self._transport.get_extra_info(name, default)
|
||||
else:
|
||||
return default
|
||||
|
||||
def _start_shutdown(self):
|
||||
if self._in_shutdown:
|
||||
return
|
||||
if self._in_handshake:
|
||||
self._abort()
|
||||
else:
|
||||
self._in_shutdown = True
|
||||
self._write_appdata(b'')
|
||||
|
||||
def _write_appdata(self, data):
|
||||
self._write_backlog.append((data, 0))
|
||||
self._write_buffer_size += len(data)
|
||||
self._process_write_backlog()
|
||||
|
||||
def _start_handshake(self):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r starts SSL handshake", self)
|
||||
self._handshake_start_time = self._loop.time()
|
||||
else:
|
||||
self._handshake_start_time = None
|
||||
self._in_handshake = True
|
||||
# (b'', 1) is a special value in _process_write_backlog() to do
|
||||
# the SSL handshake
|
||||
self._write_backlog.append((b'', 1))
|
||||
self._process_write_backlog()
|
||||
|
||||
def _on_handshake_complete(self, handshake_exc):
|
||||
self._in_handshake = False
|
||||
|
||||
sslobj = self._sslpipe.ssl_object
|
||||
try:
|
||||
if handshake_exc is not None:
|
||||
raise handshake_exc
|
||||
|
||||
peercert = sslobj.getpeercert()
|
||||
if not hasattr(self._sslcontext, 'check_hostname'):
|
||||
# Verify hostname if requested, Python 3.4+ uses check_hostname
|
||||
# and checks the hostname in do_handshake()
|
||||
if (self._server_hostname
|
||||
and self._sslcontext.verify_mode != ssl.CERT_NONE):
|
||||
ssl.match_hostname(peercert, self._server_hostname)
|
||||
except BaseException as exc:
|
||||
if self._loop.get_debug():
|
||||
if isinstance(exc, ssl.CertificateError):
|
||||
logger.warning("%r: SSL handshake failed "
|
||||
"on verifying the certificate",
|
||||
self, exc_info=True)
|
||||
else:
|
||||
logger.warning("%r: SSL handshake failed",
|
||||
self, exc_info=True)
|
||||
self._transport.close()
|
||||
if isinstance(exc, Exception):
|
||||
self._wakeup_waiter(exc)
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
if self._loop.get_debug():
|
||||
dt = self._loop.time() - self._handshake_start_time
|
||||
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
|
||||
|
||||
# Add extra info that becomes available after handshake.
|
||||
self._extra.update(peercert=peercert,
|
||||
cipher=sslobj.cipher(),
|
||||
compression=sslobj.compression(),
|
||||
ssl_object=sslobj,
|
||||
)
|
||||
if self._call_connection_made:
|
||||
self._app_protocol.connection_made(self._app_transport)
|
||||
self._wakeup_waiter()
|
||||
self._session_established = True
|
||||
# In case transport.write() was already called. Don't call
|
||||
# immediately _process_write_backlog(), but schedule it:
|
||||
# _on_handshake_complete() can be called indirectly from
|
||||
# _process_write_backlog(), and _process_write_backlog() is not
|
||||
# reentrant.
|
||||
self._loop.call_soon(self._process_write_backlog)
|
||||
|
||||
def _process_write_backlog(self):
|
||||
# Try to make progress on the write backlog.
|
||||
if self._transport is None or self._sslpipe is None:
|
||||
return
|
||||
|
||||
try:
|
||||
for i in range(len(self._write_backlog)):
|
||||
data, offset = self._write_backlog[0]
|
||||
if data:
|
||||
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
|
||||
elif offset:
|
||||
ssldata = self._sslpipe.do_handshake(
|
||||
self._on_handshake_complete)
|
||||
offset = 1
|
||||
else:
|
||||
ssldata = self._sslpipe.shutdown(self._finalize)
|
||||
offset = 1
|
||||
|
||||
for chunk in ssldata:
|
||||
self._transport.write(chunk)
|
||||
|
||||
if offset < len(data):
|
||||
self._write_backlog[0] = (data, offset)
|
||||
# A short write means that a write is blocked on a read
|
||||
# We need to enable reading if it is paused!
|
||||
assert self._sslpipe.need_ssldata
|
||||
if self._transport._paused:
|
||||
self._transport.resume_reading()
|
||||
break
|
||||
|
||||
# An entire chunk from the backlog was processed. We can
|
||||
# delete it and reduce the outstanding buffer size.
|
||||
del self._write_backlog[0]
|
||||
self._write_buffer_size -= len(data)
|
||||
except BaseException as exc:
|
||||
if self._in_handshake:
|
||||
# BaseExceptions will be re-raised in _on_handshake_complete.
|
||||
self._on_handshake_complete(exc)
|
||||
else:
|
||||
self._fatal_error(exc, 'Fatal error on SSL transport')
|
||||
if not isinstance(exc, Exception):
|
||||
# BaseException
|
||||
raise
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on transport'):
|
||||
# Should be called from exception handler only.
|
||||
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self._transport,
|
||||
'protocol': self,
|
||||
})
|
||||
if self._transport:
|
||||
self._transport._force_close(exc)
|
||||
|
||||
def _finalize(self):
|
||||
self._sslpipe = None
|
||||
|
||||
if self._transport is not None:
|
||||
self._transport.close()
|
||||
|
||||
def _abort(self):
|
||||
try:
|
||||
if self._transport is not None:
|
||||
self._transport.abort()
|
||||
finally:
|
||||
self._finalize()
|
701
third_party/python/Lib/asyncio/streams.py
vendored
Normal file
701
third_party/python/Lib/asyncio/streams.py
vendored
Normal file
|
@ -0,0 +1,701 @@
|
|||
"""Stream-related things."""
|
||||
|
||||
__all__ = ['StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
||||
'open_connection', 'start_server',
|
||||
'IncompleteReadError',
|
||||
'LimitOverrunError',
|
||||
]
|
||||
|
||||
import socket
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
__all__.extend(['open_unix_connection', 'start_unix_server'])
|
||||
|
||||
from . import coroutines
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import protocols
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
_DEFAULT_LIMIT = 2 ** 16
|
||||
|
||||
|
||||
class IncompleteReadError(EOFError):
|
||||
"""
|
||||
Incomplete read error. Attributes:
|
||||
|
||||
- partial: read bytes string before the end of stream was reached
|
||||
- expected: total number of expected bytes (or None if unknown)
|
||||
"""
|
||||
def __init__(self, partial, expected):
|
||||
super().__init__("%d bytes read on a total of %r expected bytes"
|
||||
% (len(partial), expected))
|
||||
self.partial = partial
|
||||
self.expected = expected
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), (self.partial, self.expected)
|
||||
|
||||
|
||||
class LimitOverrunError(Exception):
|
||||
"""Reached the buffer limit while looking for a separator.
|
||||
|
||||
Attributes:
|
||||
- consumed: total number of to be consumed bytes.
|
||||
"""
|
||||
def __init__(self, message, consumed):
|
||||
super().__init__(message)
|
||||
self.consumed = consumed
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), (self.args[0], self.consumed)
|
||||
|
||||
|
||||
@coroutine
|
||||
def open_connection(host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""A wrapper for create_connection() returning a (reader, writer) pair.
|
||||
|
||||
The reader returned is a StreamReader instance; the writer is a
|
||||
StreamWriter instance.
|
||||
|
||||
The arguments are all the usual arguments to create_connection()
|
||||
except protocol_factory; most common are positional host and port,
|
||||
with various optional keyword arguments following.
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
(If you want to customize the StreamReader and/or
|
||||
StreamReaderProtocol classes, just copy the code -- there's
|
||||
really nothing special here except some convenience.)
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = yield from loop.create_connection(
|
||||
lambda: protocol, host, port, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
|
||||
@coroutine
|
||||
def start_server(client_connected_cb, host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Start a socket server, call back for each client connected.
|
||||
|
||||
The first parameter, `client_connected_cb`, takes two parameters:
|
||||
client_reader, client_writer. client_reader is a StreamReader
|
||||
object, while client_writer is a StreamWriter object. This
|
||||
parameter can either be a plain callback function or a coroutine;
|
||||
if it is a coroutine, it will be automatically converted into a
|
||||
Task.
|
||||
|
||||
The rest of the arguments are all the usual arguments to
|
||||
loop.create_server() except protocol_factory; most common are
|
||||
positional host and port, with various optional keyword arguments
|
||||
following. The return value is the same as loop.create_server().
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
The return value is the same as loop.create_server(), i.e. a
|
||||
Server object which can be used to stop the service.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return (yield from loop.create_server(factory, host, port, **kwds))
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
# UNIX Domain Sockets are supported on this platform
|
||||
|
||||
@coroutine
|
||||
def open_unix_connection(path=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = yield from loop.create_unix_connection(
|
||||
lambda: protocol, path, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
@coroutine
|
||||
def start_unix_server(client_connected_cb, path=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `start_server` but works with UNIX Domain Sockets."""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return (yield from loop.create_unix_server(factory, path, **kwds))
|
||||
|
||||
|
||||
class FlowControlMixin(protocols.Protocol):
|
||||
"""Reusable flow control logic for StreamWriter.drain().
|
||||
|
||||
This implements the protocol methods pause_writing(),
|
||||
resume_reading() and connection_lost(). If the subclass overrides
|
||||
these it must call the super methods.
|
||||
|
||||
StreamWriter.drain() must wait for _drain_helper() coroutine.
|
||||
"""
|
||||
|
||||
def __init__(self, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._paused = False
|
||||
self._drain_waiter = None
|
||||
self._connection_lost = False
|
||||
|
||||
def pause_writing(self):
|
||||
assert not self._paused
|
||||
self._paused = True
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses writing", self)
|
||||
|
||||
def resume_writing(self):
|
||||
assert self._paused
|
||||
self._paused = False
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes writing", self)
|
||||
|
||||
waiter = self._drain_waiter
|
||||
if waiter is not None:
|
||||
self._drain_waiter = None
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self._connection_lost = True
|
||||
# Wake up the writer if currently paused.
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._drain_waiter
|
||||
if waiter is None:
|
||||
return
|
||||
self._drain_waiter = None
|
||||
if waiter.done():
|
||||
return
|
||||
if exc is None:
|
||||
waiter.set_result(None)
|
||||
else:
|
||||
waiter.set_exception(exc)
|
||||
|
||||
@coroutine
|
||||
def _drain_helper(self):
|
||||
if self._connection_lost:
|
||||
raise ConnectionResetError('Connection lost')
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._drain_waiter
|
||||
assert waiter is None or waiter.cancelled()
|
||||
waiter = self._loop.create_future()
|
||||
self._drain_waiter = waiter
|
||||
yield from waiter
|
||||
|
||||
|
||||
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
|
||||
"""Helper class to adapt between Protocol and StreamReader.
|
||||
|
||||
(This is a helper class instead of making StreamReader itself a
|
||||
Protocol subclass, because the StreamReader has other potential
|
||||
uses, and to prevent the user of the StreamReader to accidentally
|
||||
call inappropriate methods of the protocol.)
|
||||
"""
|
||||
|
||||
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
self._stream_reader = stream_reader
|
||||
self._stream_writer = None
|
||||
self._client_connected_cb = client_connected_cb
|
||||
self._over_ssl = False
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._stream_reader.set_transport(transport)
|
||||
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
||||
if self._client_connected_cb is not None:
|
||||
self._stream_writer = StreamWriter(transport, self,
|
||||
self._stream_reader,
|
||||
self._loop)
|
||||
res = self._client_connected_cb(self._stream_reader,
|
||||
self._stream_writer)
|
||||
if coroutines.iscoroutine(res):
|
||||
self._loop.create_task(res)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
if self._stream_reader is not None:
|
||||
if exc is None:
|
||||
self._stream_reader.feed_eof()
|
||||
else:
|
||||
self._stream_reader.set_exception(exc)
|
||||
super().connection_lost(exc)
|
||||
self._stream_reader = None
|
||||
self._stream_writer = None
|
||||
|
||||
def data_received(self, data):
|
||||
self._stream_reader.feed_data(data)
|
||||
|
||||
def eof_received(self):
|
||||
self._stream_reader.feed_eof()
|
||||
if self._over_ssl:
|
||||
# Prevent a warning in SSLProtocol.eof_received:
|
||||
# "returning true from eof_received()
|
||||
# has no effect when using ssl"
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class StreamWriter:
|
||||
"""Wraps a Transport.
|
||||
|
||||
This exposes write(), writelines(), [can_]write_eof(),
|
||||
get_extra_info() and close(). It adds drain() which returns an
|
||||
optional Future on which you can wait for flow control. It also
|
||||
adds a transport property which references the Transport
|
||||
directly.
|
||||
"""
|
||||
|
||||
def __init__(self, transport, protocol, reader, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
# drain() expects that the reader has an exception() method
|
||||
assert reader is None or isinstance(reader, StreamReader)
|
||||
self._reader = reader
|
||||
self._loop = loop
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, 'transport=%r' % self._transport]
|
||||
if self._reader is not None:
|
||||
info.append('reader=%r' % self._reader)
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
return self._transport
|
||||
|
||||
def write(self, data):
|
||||
self._transport.write(data)
|
||||
|
||||
def writelines(self, data):
|
||||
self._transport.writelines(data)
|
||||
|
||||
def write_eof(self):
|
||||
return self._transport.write_eof()
|
||||
|
||||
def can_write_eof(self):
|
||||
return self._transport.can_write_eof()
|
||||
|
||||
def close(self):
|
||||
return self._transport.close()
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
return self._transport.get_extra_info(name, default)
|
||||
|
||||
@coroutine
|
||||
def drain(self):
|
||||
"""Flush the write buffer.
|
||||
|
||||
The intended use is to write
|
||||
|
||||
w.write(data)
|
||||
yield from w.drain()
|
||||
"""
|
||||
if self._reader is not None:
|
||||
exc = self._reader.exception()
|
||||
if exc is not None:
|
||||
raise exc
|
||||
if self._transport is not None:
|
||||
if self._transport.is_closing():
|
||||
# Yield to the event loop so connection_lost() may be
|
||||
# called. Without this, _drain_helper() would return
|
||||
# immediately, and code that calls
|
||||
# write(...); yield from drain()
|
||||
# in a loop would never call connection_lost(), so it
|
||||
# would not see an error when the socket is closed.
|
||||
yield
|
||||
yield from self._protocol._drain_helper()
|
||||
|
||||
|
||||
class StreamReader:
|
||||
|
||||
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
|
||||
# The line length limit is a security feature;
|
||||
# it also doubles as half the buffer limit.
|
||||
|
||||
if limit <= 0:
|
||||
raise ValueError('Limit cannot be <= 0')
|
||||
|
||||
self._limit = limit
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._buffer = bytearray()
|
||||
self._eof = False # Whether we're done.
|
||||
self._waiter = None # A future used by _wait_for_data()
|
||||
self._exception = None
|
||||
self._transport = None
|
||||
self._paused = False
|
||||
|
||||
def __repr__(self):
|
||||
info = ['StreamReader']
|
||||
if self._buffer:
|
||||
info.append('%d bytes' % len(self._buffer))
|
||||
if self._eof:
|
||||
info.append('eof')
|
||||
if self._limit != _DEFAULT_LIMIT:
|
||||
info.append('l=%d' % self._limit)
|
||||
if self._waiter:
|
||||
info.append('w=%r' % self._waiter)
|
||||
if self._exception:
|
||||
info.append('e=%r' % self._exception)
|
||||
if self._transport:
|
||||
info.append('t=%r' % self._transport)
|
||||
if self._paused:
|
||||
info.append('paused')
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def exception(self):
|
||||
return self._exception
|
||||
|
||||
def set_exception(self, exc):
|
||||
self._exception = exc
|
||||
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
|
||||
def _wakeup_waiter(self):
|
||||
"""Wakeup read*() functions waiting for data or EOF."""
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def set_transport(self, transport):
|
||||
assert self._transport is None, 'Transport already set'
|
||||
self._transport = transport
|
||||
|
||||
def _maybe_resume_transport(self):
|
||||
if self._paused and len(self._buffer) <= self._limit:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
def feed_eof(self):
|
||||
self._eof = True
|
||||
self._wakeup_waiter()
|
||||
|
||||
def at_eof(self):
|
||||
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
||||
return self._eof and not self._buffer
|
||||
|
||||
def feed_data(self, data):
|
||||
assert not self._eof, 'feed_data after feed_eof'
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
self._buffer.extend(data)
|
||||
self._wakeup_waiter()
|
||||
|
||||
if (self._transport is not None and
|
||||
not self._paused and
|
||||
len(self._buffer) > 2 * self._limit):
|
||||
try:
|
||||
self._transport.pause_reading()
|
||||
except NotImplementedError:
|
||||
# The transport can't be paused.
|
||||
# We'll just have to buffer all data.
|
||||
# Forget the transport so we don't keep trying.
|
||||
self._transport = None
|
||||
else:
|
||||
self._paused = True
|
||||
|
||||
@coroutine
|
||||
def _wait_for_data(self, func_name):
|
||||
"""Wait until feed_data() or feed_eof() is called.
|
||||
|
||||
If stream was paused, automatically resume it.
|
||||
"""
|
||||
# StreamReader uses a future to link the protocol feed_data() method
|
||||
# to a read coroutine. Running two read coroutines at the same time
|
||||
# would have an unexpected behaviour. It would not possible to know
|
||||
# which coroutine would get the next data.
|
||||
if self._waiter is not None:
|
||||
raise RuntimeError('%s() called while another coroutine is '
|
||||
'already waiting for incoming data' % func_name)
|
||||
|
||||
assert not self._eof, '_wait_for_data after EOF'
|
||||
|
||||
# Waiting for data while paused will make deadlock, so prevent it.
|
||||
# This is essential for readexactly(n) for case when n > self._limit.
|
||||
if self._paused:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
self._waiter = self._loop.create_future()
|
||||
try:
|
||||
yield from self._waiter
|
||||
finally:
|
||||
self._waiter = None
|
||||
|
||||
@coroutine
|
||||
def readline(self):
|
||||
"""Read chunk of data from the stream until newline (b'\n') is found.
|
||||
|
||||
On success, return chunk that ends with newline. If only partial
|
||||
line can be read due to EOF, return incomplete line without
|
||||
terminating newline. When EOF was reached while no bytes read, empty
|
||||
bytes object is returned.
|
||||
|
||||
If limit is reached, ValueError will be raised. In that case, if
|
||||
newline was found, complete line including newline will be removed
|
||||
from internal buffer. Else, internal buffer will be cleared. Limit is
|
||||
compared against part of the line without newline.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
sep = b'\n'
|
||||
seplen = len(sep)
|
||||
try:
|
||||
line = yield from self.readuntil(sep)
|
||||
except IncompleteReadError as e:
|
||||
return e.partial
|
||||
except LimitOverrunError as e:
|
||||
if self._buffer.startswith(sep, e.consumed):
|
||||
del self._buffer[:e.consumed + seplen]
|
||||
else:
|
||||
self._buffer.clear()
|
||||
self._maybe_resume_transport()
|
||||
raise ValueError(e.args[0])
|
||||
return line
|
||||
|
||||
@coroutine
|
||||
def readuntil(self, separator=b'\n'):
|
||||
"""Read data from the stream until ``separator`` is found.
|
||||
|
||||
On success, the data and separator will be removed from the
|
||||
internal buffer (consumed). Returned data will include the
|
||||
separator at the end.
|
||||
|
||||
Configured stream limit is used to check result. Limit sets the
|
||||
maximal length of data that can be returned, not counting the
|
||||
separator.
|
||||
|
||||
If an EOF occurs and the complete separator is still not found,
|
||||
an IncompleteReadError exception will be raised, and the internal
|
||||
buffer will be reset. The IncompleteReadError.partial attribute
|
||||
may contain the separator partially.
|
||||
|
||||
If the data cannot be read because of over limit, a
|
||||
LimitOverrunError exception will be raised, and the data
|
||||
will be left in the internal buffer, so it can be read again.
|
||||
"""
|
||||
seplen = len(separator)
|
||||
if seplen == 0:
|
||||
raise ValueError('Separator should be at least one-byte string')
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
# Consume whole buffer except last bytes, which length is
|
||||
# one less than seplen. Let's check corner cases with
|
||||
# separator='SEPARATOR':
|
||||
# * we have received almost complete separator (without last
|
||||
# byte). i.e buffer='some textSEPARATO'. In this case we
|
||||
# can safely consume len(separator) - 1 bytes.
|
||||
# * last byte of buffer is first byte of separator, i.e.
|
||||
# buffer='abcdefghijklmnopqrS'. We may safely consume
|
||||
# everything except that last byte, but this require to
|
||||
# analyze bytes of buffer that match partial separator.
|
||||
# This is slow and/or require FSM. For this case our
|
||||
# implementation is not optimal, since require rescanning
|
||||
# of data that is known to not belong to separator. In
|
||||
# real world, separator will not be so long to notice
|
||||
# performance problems. Even when reading MIME-encoded
|
||||
# messages :)
|
||||
|
||||
# `offset` is the number of bytes from the beginning of the buffer
|
||||
# where there is no occurrence of `separator`.
|
||||
offset = 0
|
||||
|
||||
# Loop until we find `separator` in the buffer, exceed the buffer size,
|
||||
# or an EOF has happened.
|
||||
while True:
|
||||
buflen = len(self._buffer)
|
||||
|
||||
# Check if we now have enough data in the buffer for `separator` to
|
||||
# fit.
|
||||
if buflen - offset >= seplen:
|
||||
isep = self._buffer.find(separator, offset)
|
||||
|
||||
if isep != -1:
|
||||
# `separator` is in the buffer. `isep` will be used later
|
||||
# to retrieve the data.
|
||||
break
|
||||
|
||||
# see upper comment for explanation.
|
||||
offset = buflen + 1 - seplen
|
||||
if offset > self._limit:
|
||||
raise LimitOverrunError(
|
||||
'Separator is not found, and chunk exceed the limit',
|
||||
offset)
|
||||
|
||||
# Complete message (with full separator) may be present in buffer
|
||||
# even when EOF flag is set. This may happen when the last chunk
|
||||
# adds data which makes separator be found. That's why we check for
|
||||
# EOF *ater* inspecting the buffer.
|
||||
if self._eof:
|
||||
chunk = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
raise IncompleteReadError(chunk, None)
|
||||
|
||||
# _wait_for_data() will resume reading if stream was paused.
|
||||
yield from self._wait_for_data('readuntil')
|
||||
|
||||
if isep > self._limit:
|
||||
raise LimitOverrunError(
|
||||
'Separator is found, but chunk is longer than limit', isep)
|
||||
|
||||
chunk = self._buffer[:isep + seplen]
|
||||
del self._buffer[:isep + seplen]
|
||||
self._maybe_resume_transport()
|
||||
return bytes(chunk)
|
||||
|
||||
@coroutine
|
||||
def read(self, n=-1):
|
||||
"""Read up to `n` bytes from the stream.
|
||||
|
||||
If n is not provided, or set to -1, read until EOF and return all read
|
||||
bytes. If the EOF was received and the internal buffer is empty, return
|
||||
an empty bytes object.
|
||||
|
||||
If n is zero, return empty bytes object immediately.
|
||||
|
||||
If n is positive, this function try to read `n` bytes, and may return
|
||||
less or equal bytes than requested, but at least one byte. If EOF was
|
||||
received before any byte is read, this function returns empty byte
|
||||
object.
|
||||
|
||||
Returned value is not limited with limit, configured at stream
|
||||
creation.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if n == 0:
|
||||
return b''
|
||||
|
||||
if n < 0:
|
||||
# This used to just loop creating a new waiter hoping to
|
||||
# collect everything in self._buffer, but that would
|
||||
# deadlock if the subprocess sends more than self.limit
|
||||
# bytes. So just call self.read(self._limit) until EOF.
|
||||
blocks = []
|
||||
while True:
|
||||
block = yield from self.read(self._limit)
|
||||
if not block:
|
||||
break
|
||||
blocks.append(block)
|
||||
return b''.join(blocks)
|
||||
|
||||
if not self._buffer and not self._eof:
|
||||
yield from self._wait_for_data('read')
|
||||
|
||||
# This will work right even if buffer is less than n bytes
|
||||
data = bytes(self._buffer[:n])
|
||||
del self._buffer[:n]
|
||||
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
@coroutine
|
||||
def readexactly(self, n):
|
||||
"""Read exactly `n` bytes.
|
||||
|
||||
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
|
||||
read. The IncompleteReadError.partial attribute of the exception will
|
||||
contain the partial read bytes.
|
||||
|
||||
if n is zero, return empty bytes object.
|
||||
|
||||
Returned value is not limited with limit, configured at stream
|
||||
creation.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
if n < 0:
|
||||
raise ValueError('readexactly size can not be less than zero')
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if n == 0:
|
||||
return b''
|
||||
|
||||
while len(self._buffer) < n:
|
||||
if self._eof:
|
||||
incomplete = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
raise IncompleteReadError(incomplete, n)
|
||||
|
||||
yield from self._wait_for_data('readexactly')
|
||||
|
||||
if len(self._buffer) == n:
|
||||
data = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
else:
|
||||
data = bytes(self._buffer[:n])
|
||||
del self._buffer[:n]
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
if compat.PY35:
|
||||
@coroutine
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
@coroutine
|
||||
def __anext__(self):
|
||||
val = yield from self.readline()
|
||||
if val == b'':
|
||||
raise StopAsyncIteration
|
||||
return val
|
||||
|
||||
if compat.PY352:
|
||||
# In Python 3.5.2 and greater, __aiter__ should return
|
||||
# the asynchronous iterator directly.
|
||||
def __aiter__(self):
|
||||
return self
|
226
third_party/python/Lib/asyncio/subprocess.py
vendored
Normal file
226
third_party/python/Lib/asyncio/subprocess.py
vendored
Normal file
|
@ -0,0 +1,226 @@
|
|||
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
|
||||
|
||||
import subprocess
|
||||
|
||||
from . import events
|
||||
from . import protocols
|
||||
from . import streams
|
||||
from . import tasks
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
DEVNULL = subprocess.DEVNULL
|
||||
|
||||
|
||||
class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
protocols.SubprocessProtocol):
|
||||
"""Like StreamReaderProtocol, but for a subprocess."""
|
||||
|
||||
def __init__(self, limit, loop):
|
||||
super().__init__(loop=loop)
|
||||
self._limit = limit
|
||||
self.stdin = self.stdout = self.stderr = None
|
||||
self._transport = None
|
||||
self._process_exited = False
|
||||
self._pipe_fds = []
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self.stdin is not None:
|
||||
info.append('stdin=%r' % self.stdin)
|
||||
if self.stdout is not None:
|
||||
info.append('stdout=%r' % self.stdout)
|
||||
if self.stderr is not None:
|
||||
info.append('stderr=%r' % self.stderr)
|
||||
return '<%s>' % ' '.join(info)
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._transport = transport
|
||||
|
||||
stdout_transport = transport.get_pipe_transport(1)
|
||||
if stdout_transport is not None:
|
||||
self.stdout = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stdout.set_transport(stdout_transport)
|
||||
self._pipe_fds.append(1)
|
||||
|
||||
stderr_transport = transport.get_pipe_transport(2)
|
||||
if stderr_transport is not None:
|
||||
self.stderr = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stderr.set_transport(stderr_transport)
|
||||
self._pipe_fds.append(2)
|
||||
|
||||
stdin_transport = transport.get_pipe_transport(0)
|
||||
if stdin_transport is not None:
|
||||
self.stdin = streams.StreamWriter(stdin_transport,
|
||||
protocol=self,
|
||||
reader=None,
|
||||
loop=self._loop)
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader is not None:
|
||||
reader.feed_data(data)
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
if fd == 0:
|
||||
pipe = self.stdin
|
||||
if pipe is not None:
|
||||
pipe.close()
|
||||
self.connection_lost(exc)
|
||||
return
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader != None:
|
||||
if exc is None:
|
||||
reader.feed_eof()
|
||||
else:
|
||||
reader.set_exception(exc)
|
||||
|
||||
if fd in self._pipe_fds:
|
||||
self._pipe_fds.remove(fd)
|
||||
self._maybe_close_transport()
|
||||
|
||||
def process_exited(self):
|
||||
self._process_exited = True
|
||||
self._maybe_close_transport()
|
||||
|
||||
def _maybe_close_transport(self):
|
||||
if len(self._pipe_fds) == 0 and self._process_exited:
|
||||
self._transport.close()
|
||||
self._transport = None
|
||||
|
||||
|
||||
class Process:
|
||||
def __init__(self, transport, protocol, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self.stdin = protocol.stdin
|
||||
self.stdout = protocol.stdout
|
||||
self.stderr = protocol.stderr
|
||||
self.pid = transport.get_pid()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self.__class__.__name__, self.pid)
|
||||
|
||||
@property
|
||||
def returncode(self):
|
||||
return self._transport.get_returncode()
|
||||
|
||||
@coroutine
|
||||
def wait(self):
|
||||
"""Wait until the process exit and return the process return code.
|
||||
|
||||
This method is a coroutine."""
|
||||
return (yield from self._transport._wait())
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._transport.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._transport.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._transport.kill()
|
||||
|
||||
@coroutine
|
||||
def _feed_stdin(self, input):
|
||||
debug = self._loop.get_debug()
|
||||
self.stdin.write(input)
|
||||
if debug:
|
||||
logger.debug('%r communicate: feed stdin (%s bytes)',
|
||||
self, len(input))
|
||||
try:
|
||||
yield from self.stdin.drain()
|
||||
except (BrokenPipeError, ConnectionResetError) as exc:
|
||||
# communicate() ignores BrokenPipeError and ConnectionResetError
|
||||
if debug:
|
||||
logger.debug('%r communicate: stdin got %r', self, exc)
|
||||
|
||||
if debug:
|
||||
logger.debug('%r communicate: close stdin', self)
|
||||
self.stdin.close()
|
||||
|
||||
@coroutine
|
||||
def _noop(self):
|
||||
return None
|
||||
|
||||
@coroutine
|
||||
def _read_stream(self, fd):
|
||||
transport = self._transport.get_pipe_transport(fd)
|
||||
if fd == 2:
|
||||
stream = self.stderr
|
||||
else:
|
||||
assert fd == 1
|
||||
stream = self.stdout
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: read %s', self, name)
|
||||
output = yield from stream.read()
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: close %s', self, name)
|
||||
transport.close()
|
||||
return output
|
||||
|
||||
@coroutine
|
||||
def communicate(self, input=None):
|
||||
if input is not None:
|
||||
stdin = self._feed_stdin(input)
|
||||
else:
|
||||
stdin = self._noop()
|
||||
if self.stdout is not None:
|
||||
stdout = self._read_stream(1)
|
||||
else:
|
||||
stdout = self._noop()
|
||||
if self.stderr is not None:
|
||||
stderr = self._read_stream(2)
|
||||
else:
|
||||
stderr = self._noop()
|
||||
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
|
||||
loop=self._loop)
|
||||
yield from self.wait()
|
||||
return (stdout, stderr)
|
||||
|
||||
|
||||
@coroutine
|
||||
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
|
||||
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = yield from loop.subprocess_shell(
|
||||
protocol_factory,
|
||||
cmd, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
return Process(transport, protocol, loop)
|
||||
|
||||
@coroutine
|
||||
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
|
||||
stderr=None, loop=None,
|
||||
limit=streams._DEFAULT_LIMIT, **kwds):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = yield from loop.subprocess_exec(
|
||||
protocol_factory,
|
||||
program, *args,
|
||||
stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
return Process(transport, protocol, loop)
|
726
third_party/python/Lib/asyncio/tasks.py
vendored
Normal file
726
third_party/python/Lib/asyncio/tasks.py
vendored
Normal file
|
@ -0,0 +1,726 @@
|
|||
"""Support for tasks, coroutines and the scheduler."""
|
||||
|
||||
__all__ = ['Task',
|
||||
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
|
||||
'wait', 'wait_for', 'as_completed', 'sleep', 'async',
|
||||
'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
|
||||
]
|
||||
|
||||
import concurrent.futures
|
||||
import functools
|
||||
import inspect
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
from . import base_tasks
|
||||
from . import compat
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import futures
|
||||
from .coroutines import coroutine
|
||||
|
||||
|
||||
class Task(futures.Future):
|
||||
"""A coroutine wrapped in a Future."""
|
||||
|
||||
# An important invariant maintained while a Task not done:
|
||||
#
|
||||
# - Either _fut_waiter is None, and _step() is scheduled;
|
||||
# - or _fut_waiter is some Future, and _step() is *not* scheduled.
|
||||
#
|
||||
# The only transition from the latter to the former is through
|
||||
# _wakeup(). When _fut_waiter is not None, one of its callbacks
|
||||
# must be _wakeup().
|
||||
|
||||
# Weak set containing all tasks alive.
|
||||
_all_tasks = weakref.WeakSet()
|
||||
|
||||
# Dictionary containing tasks that are currently active in
|
||||
# all running event loops. {EventLoop: Task}
|
||||
_current_tasks = {}
|
||||
|
||||
# If False, don't log a message if the task is destroyed whereas its
|
||||
# status is still pending
|
||||
_log_destroy_pending = True
|
||||
|
||||
@classmethod
|
||||
def current_task(cls, loop=None):
|
||||
"""Return the currently running task in an event loop or None.
|
||||
|
||||
By default the current task for the current event loop is returned.
|
||||
|
||||
None is returned when called not in the context of a Task.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
return cls._current_tasks.get(loop)
|
||||
|
||||
@classmethod
|
||||
def all_tasks(cls, loop=None):
|
||||
"""Return a set of all tasks for an event loop.
|
||||
|
||||
By default all tasks for the current event loop are returned.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
return {t for t in cls._all_tasks if t._loop is loop}
|
||||
|
||||
def __init__(self, coro, *, loop=None):
|
||||
assert coroutines.iscoroutine(coro), repr(coro)
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._coro = coro
|
||||
self._fut_waiter = None
|
||||
self._must_cancel = False
|
||||
self._loop.call_soon(self._step)
|
||||
self.__class__._all_tasks.add(self)
|
||||
|
||||
# On Python 3.3 or older, objects with a destructor that are part of a
|
||||
# reference cycle are never destroyed. That's not the case any more on
|
||||
# Python 3.4 thanks to the PEP 442.
|
||||
if compat.PY34:
|
||||
def __del__(self):
|
||||
if self._state == futures._PENDING and self._log_destroy_pending:
|
||||
context = {
|
||||
'task': self,
|
||||
'message': 'Task was destroyed but it is pending!',
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
futures.Future.__del__(self)
|
||||
|
||||
def _repr_info(self):
|
||||
return base_tasks._task_repr_info(self)
|
||||
|
||||
def get_stack(self, *, limit=None):
|
||||
"""Return the list of stack frames for this task's coroutine.
|
||||
|
||||
If the coroutine is not done, this returns the stack where it is
|
||||
suspended. If the coroutine has completed successfully or was
|
||||
cancelled, this returns an empty list. If the coroutine was
|
||||
terminated by an exception, this returns the list of traceback
|
||||
frames.
|
||||
|
||||
The frames are always ordered from oldest to newest.
|
||||
|
||||
The optional limit gives the maximum number of frames to
|
||||
return; by default all available frames are returned. Its
|
||||
meaning differs depending on whether a stack or a traceback is
|
||||
returned: the newest frames of a stack are returned, but the
|
||||
oldest frames of a traceback are returned. (This matches the
|
||||
behavior of the traceback module.)
|
||||
|
||||
For reasons beyond our control, only one stack frame is
|
||||
returned for a suspended coroutine.
|
||||
"""
|
||||
return base_tasks._task_get_stack(self, limit)
|
||||
|
||||
def print_stack(self, *, limit=None, file=None):
|
||||
"""Print the stack or traceback for this task's coroutine.
|
||||
|
||||
This produces output similar to that of the traceback module,
|
||||
for the frames retrieved by get_stack(). The limit argument
|
||||
is passed to get_stack(). The file argument is an I/O stream
|
||||
to which the output is written; by default output is written
|
||||
to sys.stderr.
|
||||
"""
|
||||
return base_tasks._task_print_stack(self, limit, file)
|
||||
|
||||
def cancel(self):
|
||||
"""Request that this task cancel itself.
|
||||
|
||||
This arranges for a CancelledError to be thrown into the
|
||||
wrapped coroutine on the next cycle through the event loop.
|
||||
The coroutine then has a chance to clean up or even deny
|
||||
the request using try/except/finally.
|
||||
|
||||
Unlike Future.cancel, this does not guarantee that the
|
||||
task will be cancelled: the exception might be caught and
|
||||
acted upon, delaying cancellation of the task or preventing
|
||||
cancellation completely. The task may also return a value or
|
||||
raise a different exception.
|
||||
|
||||
Immediately after this method is called, Task.cancelled() will
|
||||
not return True (unless the task was already cancelled). A
|
||||
task will be marked as cancelled when the wrapped coroutine
|
||||
terminates with a CancelledError exception (even if cancel()
|
||||
was not called).
|
||||
"""
|
||||
self._log_traceback = False
|
||||
if self.done():
|
||||
return False
|
||||
if self._fut_waiter is not None:
|
||||
if self._fut_waiter.cancel():
|
||||
# Leave self._fut_waiter; it may be a Task that
|
||||
# catches and ignores the cancellation so we may have
|
||||
# to cancel it again later.
|
||||
return True
|
||||
# It must be the case that self._step is already scheduled.
|
||||
self._must_cancel = True
|
||||
return True
|
||||
|
||||
def _step(self, exc=None):
|
||||
assert not self.done(), \
|
||||
'_step(): already done: {!r}, {!r}'.format(self, exc)
|
||||
if self._must_cancel:
|
||||
if not isinstance(exc, futures.CancelledError):
|
||||
exc = futures.CancelledError()
|
||||
self._must_cancel = False
|
||||
coro = self._coro
|
||||
self._fut_waiter = None
|
||||
|
||||
self.__class__._current_tasks[self._loop] = self
|
||||
# Call either coro.throw(exc) or coro.send(None).
|
||||
try:
|
||||
if exc is None:
|
||||
# We use the `send` method directly, because coroutines
|
||||
# don't have `__iter__` and `__next__` methods.
|
||||
result = coro.send(None)
|
||||
else:
|
||||
result = coro.throw(exc)
|
||||
except StopIteration as exc:
|
||||
if self._must_cancel:
|
||||
# Task is cancelled right before coro stops.
|
||||
self._must_cancel = False
|
||||
self.set_exception(futures.CancelledError())
|
||||
else:
|
||||
self.set_result(exc.value)
|
||||
except futures.CancelledError:
|
||||
super().cancel() # I.e., Future.cancel(self).
|
||||
except Exception as exc:
|
||||
self.set_exception(exc)
|
||||
except BaseException as exc:
|
||||
self.set_exception(exc)
|
||||
raise
|
||||
else:
|
||||
blocking = getattr(result, '_asyncio_future_blocking', None)
|
||||
if blocking is not None:
|
||||
# Yielded Future must come from Future.__iter__().
|
||||
if result._loop is not self._loop:
|
||||
self._loop.call_soon(
|
||||
self._step,
|
||||
RuntimeError(
|
||||
'Task {!r} got Future {!r} attached to a '
|
||||
'different loop'.format(self, result)))
|
||||
elif blocking:
|
||||
if result is self:
|
||||
self._loop.call_soon(
|
||||
self._step,
|
||||
RuntimeError(
|
||||
'Task cannot await on itself: {!r}'.format(
|
||||
self)))
|
||||
else:
|
||||
result._asyncio_future_blocking = False
|
||||
result.add_done_callback(self._wakeup)
|
||||
self._fut_waiter = result
|
||||
if self._must_cancel:
|
||||
if self._fut_waiter.cancel():
|
||||
self._must_cancel = False
|
||||
else:
|
||||
self._loop.call_soon(
|
||||
self._step,
|
||||
RuntimeError(
|
||||
'yield was used instead of yield from '
|
||||
'in task {!r} with {!r}'.format(self, result)))
|
||||
elif result is None:
|
||||
# Bare yield relinquishes control for one event loop iteration.
|
||||
self._loop.call_soon(self._step)
|
||||
elif inspect.isgenerator(result):
|
||||
# Yielding a generator is just wrong.
|
||||
self._loop.call_soon(
|
||||
self._step,
|
||||
RuntimeError(
|
||||
'yield was used instead of yield from for '
|
||||
'generator in task {!r} with {!r}'.format(
|
||||
self, result)))
|
||||
else:
|
||||
# Yielding something else is an error.
|
||||
self._loop.call_soon(
|
||||
self._step,
|
||||
RuntimeError(
|
||||
'Task got bad yield: {!r}'.format(result)))
|
||||
finally:
|
||||
self.__class__._current_tasks.pop(self._loop)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
def _wakeup(self, future):
|
||||
try:
|
||||
future.result()
|
||||
except Exception as exc:
|
||||
# This may also be a cancellation.
|
||||
self._step(exc)
|
||||
else:
|
||||
# Don't pass the value of `future.result()` explicitly,
|
||||
# as `Future.__iter__` and `Future.__await__` don't need it.
|
||||
# If we call `_step(value, None)` instead of `_step()`,
|
||||
# Python eval loop would use `.send(value)` method call,
|
||||
# instead of `__next__()`, which is slower for futures
|
||||
# that return non-generator iterators from their `__iter__`.
|
||||
self._step()
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
_PyTask = Task
|
||||
|
||||
|
||||
try:
|
||||
import _asyncio
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# _CTask is needed for tests.
|
||||
Task = _CTask = _asyncio.Task
|
||||
|
||||
|
||||
# wait() and as_completed() similar to those in PEP 3148.
|
||||
|
||||
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
|
||||
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
|
||||
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
|
||||
|
||||
|
||||
@coroutine
|
||||
def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
|
||||
"""Wait for the Futures and coroutines given by fs to complete.
|
||||
|
||||
The sequence futures must not be empty.
|
||||
|
||||
Coroutines will be wrapped in Tasks.
|
||||
|
||||
Returns two sets of Future: (done, pending).
|
||||
|
||||
Usage:
|
||||
|
||||
done, pending = yield from asyncio.wait(fs)
|
||||
|
||||
Note: This does not raise TimeoutError! Futures that aren't done
|
||||
when the timeout occurs are returned in the second set.
|
||||
"""
|
||||
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
|
||||
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
|
||||
if not fs:
|
||||
raise ValueError('Set of coroutines/Futures is empty.')
|
||||
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
|
||||
raise ValueError('Invalid return_when value: {}'.format(return_when))
|
||||
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
fs = {ensure_future(f, loop=loop) for f in set(fs)}
|
||||
|
||||
return (yield from _wait(fs, timeout, return_when, loop))
|
||||
|
||||
|
||||
def _release_waiter(waiter, *args):
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
|
||||
@coroutine
|
||||
def wait_for(fut, timeout, *, loop=None):
|
||||
"""Wait for the single Future or coroutine to complete, with timeout.
|
||||
|
||||
Coroutine will be wrapped in Task.
|
||||
|
||||
Returns result of the Future or coroutine. When a timeout occurs,
|
||||
it cancels the task and raises TimeoutError. To avoid the task
|
||||
cancellation, wrap it in shield().
|
||||
|
||||
If the wait is cancelled, the task is also cancelled.
|
||||
|
||||
This function is a coroutine.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
|
||||
if timeout is None:
|
||||
return (yield from fut)
|
||||
|
||||
waiter = loop.create_future()
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
cb = functools.partial(_release_waiter, waiter)
|
||||
|
||||
fut = ensure_future(fut, loop=loop)
|
||||
fut.add_done_callback(cb)
|
||||
|
||||
try:
|
||||
# wait until the future completes or the timeout
|
||||
try:
|
||||
yield from waiter
|
||||
except futures.CancelledError:
|
||||
fut.remove_done_callback(cb)
|
||||
fut.cancel()
|
||||
raise
|
||||
|
||||
if fut.done():
|
||||
return fut.result()
|
||||
else:
|
||||
fut.remove_done_callback(cb)
|
||||
fut.cancel()
|
||||
raise futures.TimeoutError()
|
||||
finally:
|
||||
timeout_handle.cancel()
|
||||
|
||||
|
||||
@coroutine
|
||||
def _wait(fs, timeout, return_when, loop):
|
||||
"""Internal helper for wait() and wait_for().
|
||||
|
||||
The fs argument must be a collection of Futures.
|
||||
"""
|
||||
assert fs, 'Set of Futures is empty.'
|
||||
waiter = loop.create_future()
|
||||
timeout_handle = None
|
||||
if timeout is not None:
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
counter = len(fs)
|
||||
|
||||
def _on_completion(f):
|
||||
nonlocal counter
|
||||
counter -= 1
|
||||
if (counter <= 0 or
|
||||
return_when == FIRST_COMPLETED or
|
||||
return_when == FIRST_EXCEPTION and (not f.cancelled() and
|
||||
f.exception() is not None)):
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
for f in fs:
|
||||
f.add_done_callback(_on_completion)
|
||||
|
||||
try:
|
||||
yield from waiter
|
||||
finally:
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
|
||||
done, pending = set(), set()
|
||||
for f in fs:
|
||||
f.remove_done_callback(_on_completion)
|
||||
if f.done():
|
||||
done.add(f)
|
||||
else:
|
||||
pending.add(f)
|
||||
return done, pending
|
||||
|
||||
|
||||
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
|
||||
def as_completed(fs, *, loop=None, timeout=None):
|
||||
"""Return an iterator whose values are coroutines.
|
||||
|
||||
When waiting for the yielded coroutines you'll get the results (or
|
||||
exceptions!) of the original Futures (or coroutines), in the order
|
||||
in which and as soon as they complete.
|
||||
|
||||
This differs from PEP 3148; the proper way to use this is:
|
||||
|
||||
for f in as_completed(fs):
|
||||
result = yield from f # The 'yield from' may raise.
|
||||
# Use result.
|
||||
|
||||
If a timeout is specified, the 'yield from' will raise
|
||||
TimeoutError when the timeout occurs before all Futures are done.
|
||||
|
||||
Note: The futures 'f' are not necessarily members of fs.
|
||||
"""
|
||||
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
|
||||
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
|
||||
loop = loop if loop is not None else events.get_event_loop()
|
||||
todo = {ensure_future(f, loop=loop) for f in set(fs)}
|
||||
from .queues import Queue # Import here to avoid circular import problem.
|
||||
done = Queue(loop=loop)
|
||||
timeout_handle = None
|
||||
|
||||
def _on_timeout():
|
||||
for f in todo:
|
||||
f.remove_done_callback(_on_completion)
|
||||
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
|
||||
todo.clear() # Can't do todo.remove(f) in the loop.
|
||||
|
||||
def _on_completion(f):
|
||||
if not todo:
|
||||
return # _on_timeout() was here first.
|
||||
todo.remove(f)
|
||||
done.put_nowait(f)
|
||||
if not todo and timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
|
||||
@coroutine
|
||||
def _wait_for_one():
|
||||
f = yield from done.get()
|
||||
if f is None:
|
||||
# Dummy value from _on_timeout().
|
||||
raise futures.TimeoutError
|
||||
return f.result() # May raise f.exception().
|
||||
|
||||
for f in todo:
|
||||
f.add_done_callback(_on_completion)
|
||||
if todo and timeout is not None:
|
||||
timeout_handle = loop.call_later(timeout, _on_timeout)
|
||||
for _ in range(len(todo)):
|
||||
yield _wait_for_one()
|
||||
|
||||
|
||||
@coroutine
|
||||
def sleep(delay, result=None, *, loop=None):
|
||||
"""Coroutine that completes after a given time (in seconds)."""
|
||||
if delay == 0:
|
||||
yield
|
||||
return result
|
||||
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
future = loop.create_future()
|
||||
h = future._loop.call_later(delay,
|
||||
futures._set_result_unless_cancelled,
|
||||
future, result)
|
||||
try:
|
||||
return (yield from future)
|
||||
finally:
|
||||
h.cancel()
|
||||
|
||||
|
||||
def async_(coro_or_future, *, loop=None):
|
||||
"""Wrap a coroutine in a future.
|
||||
|
||||
If the argument is a Future, it is returned directly.
|
||||
|
||||
This function is deprecated in 3.5. Use asyncio.ensure_future() instead.
|
||||
"""
|
||||
|
||||
warnings.warn("asyncio.async() function is deprecated, use ensure_future()",
|
||||
DeprecationWarning,
|
||||
stacklevel=2)
|
||||
|
||||
return ensure_future(coro_or_future, loop=loop)
|
||||
|
||||
# Silence DeprecationWarning:
|
||||
globals()['async'] = async_
|
||||
async_.__name__ = 'async'
|
||||
del async_
|
||||
|
||||
|
||||
def ensure_future(coro_or_future, *, loop=None):
|
||||
"""Wrap a coroutine or an awaitable in a future.
|
||||
|
||||
If the argument is a Future, it is returned directly.
|
||||
"""
|
||||
if futures.isfuture(coro_or_future):
|
||||
if loop is not None and loop is not coro_or_future._loop:
|
||||
raise ValueError('loop argument must agree with Future')
|
||||
return coro_or_future
|
||||
elif coroutines.iscoroutine(coro_or_future):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
task = loop.create_task(coro_or_future)
|
||||
if task._source_traceback:
|
||||
del task._source_traceback[-1]
|
||||
return task
|
||||
elif compat.PY35 and inspect.isawaitable(coro_or_future):
|
||||
return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
|
||||
else:
|
||||
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
|
||||
'required')
|
||||
|
||||
|
||||
@coroutine
|
||||
def _wrap_awaitable(awaitable):
|
||||
"""Helper for asyncio.ensure_future().
|
||||
|
||||
Wraps awaitable (an object with __await__) into a coroutine
|
||||
that will later be wrapped in a Task by ensure_future().
|
||||
"""
|
||||
return (yield from awaitable.__await__())
|
||||
|
||||
|
||||
class _GatheringFuture(futures.Future):
|
||||
"""Helper for gather().
|
||||
|
||||
This overrides cancel() to cancel all the children and act more
|
||||
like Task.cancel(), which doesn't immediately mark itself as
|
||||
cancelled.
|
||||
"""
|
||||
|
||||
def __init__(self, children, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
self._children = children
|
||||
self._cancel_requested = False
|
||||
|
||||
def cancel(self):
|
||||
if self.done():
|
||||
return False
|
||||
ret = False
|
||||
for child in self._children:
|
||||
if child.cancel():
|
||||
ret = True
|
||||
if ret:
|
||||
# If any child tasks were actually cancelled, we should
|
||||
# propagate the cancellation request regardless of
|
||||
# *return_exceptions* argument. See issue 32684.
|
||||
self._cancel_requested = True
|
||||
return ret
|
||||
|
||||
|
||||
def gather(*coros_or_futures, loop=None, return_exceptions=False):
|
||||
"""Return a future aggregating results from the given coroutines
|
||||
or futures.
|
||||
|
||||
Coroutines will be wrapped in a future and scheduled in the event
|
||||
loop. They will not necessarily be scheduled in the same order as
|
||||
passed in.
|
||||
|
||||
All futures must share the same event loop. If all the tasks are
|
||||
done successfully, the returned future's result is the list of
|
||||
results (in the order of the original sequence, not necessarily
|
||||
the order of results arrival). If *return_exceptions* is True,
|
||||
exceptions in the tasks are treated the same as successful
|
||||
results, and gathered in the result list; otherwise, the first
|
||||
raised exception will be immediately propagated to the returned
|
||||
future.
|
||||
|
||||
Cancellation: if the outer Future is cancelled, all children (that
|
||||
have not completed yet) are also cancelled. If any child is
|
||||
cancelled, this is treated as if it raised CancelledError --
|
||||
the outer Future is *not* cancelled in this case. (This is to
|
||||
prevent the cancellation of one child to cause other children to
|
||||
be cancelled.)
|
||||
"""
|
||||
if not coros_or_futures:
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
outer = loop.create_future()
|
||||
outer.set_result([])
|
||||
return outer
|
||||
|
||||
arg_to_fut = {}
|
||||
for arg in set(coros_or_futures):
|
||||
if not futures.isfuture(arg):
|
||||
fut = ensure_future(arg, loop=loop)
|
||||
if loop is None:
|
||||
loop = fut._loop
|
||||
# The caller cannot control this future, the "destroy pending task"
|
||||
# warning should not be emitted.
|
||||
fut._log_destroy_pending = False
|
||||
else:
|
||||
fut = arg
|
||||
if loop is None:
|
||||
loop = fut._loop
|
||||
elif fut._loop is not loop:
|
||||
raise ValueError("futures are tied to different event loops")
|
||||
arg_to_fut[arg] = fut
|
||||
|
||||
children = [arg_to_fut[arg] for arg in coros_or_futures]
|
||||
nchildren = len(children)
|
||||
outer = _GatheringFuture(children, loop=loop)
|
||||
nfinished = 0
|
||||
results = [None] * nchildren
|
||||
|
||||
def _done_callback(i, fut):
|
||||
nonlocal nfinished
|
||||
if outer.done():
|
||||
if not fut.cancelled():
|
||||
# Mark exception retrieved.
|
||||
fut.exception()
|
||||
return
|
||||
|
||||
if fut.cancelled():
|
||||
res = futures.CancelledError()
|
||||
if not return_exceptions:
|
||||
outer.set_exception(res)
|
||||
return
|
||||
elif fut._exception is not None:
|
||||
res = fut.exception() # Mark exception retrieved.
|
||||
if not return_exceptions:
|
||||
outer.set_exception(res)
|
||||
return
|
||||
else:
|
||||
res = fut._result
|
||||
results[i] = res
|
||||
nfinished += 1
|
||||
if nfinished == nchildren:
|
||||
if outer._cancel_requested:
|
||||
outer.set_exception(futures.CancelledError())
|
||||
else:
|
||||
outer.set_result(results)
|
||||
|
||||
for i, fut in enumerate(children):
|
||||
fut.add_done_callback(functools.partial(_done_callback, i))
|
||||
return outer
|
||||
|
||||
|
||||
def shield(arg, *, loop=None):
|
||||
"""Wait for a future, shielding it from cancellation.
|
||||
|
||||
The statement
|
||||
|
||||
res = yield from shield(something())
|
||||
|
||||
is exactly equivalent to the statement
|
||||
|
||||
res = yield from something()
|
||||
|
||||
*except* that if the coroutine containing it is cancelled, the
|
||||
task running in something() is not cancelled. From the POV of
|
||||
something(), the cancellation did not happen. But its caller is
|
||||
still cancelled, so the yield-from expression still raises
|
||||
CancelledError. Note: If something() is cancelled by other means
|
||||
this will still cancel shield().
|
||||
|
||||
If you want to completely ignore cancellation (not recommended)
|
||||
you can combine shield() with a try/except clause, as follows:
|
||||
|
||||
try:
|
||||
res = yield from shield(something())
|
||||
except CancelledError:
|
||||
res = None
|
||||
"""
|
||||
inner = ensure_future(arg, loop=loop)
|
||||
if inner.done():
|
||||
# Shortcut.
|
||||
return inner
|
||||
loop = inner._loop
|
||||
outer = loop.create_future()
|
||||
|
||||
def _done_callback(inner):
|
||||
if outer.cancelled():
|
||||
if not inner.cancelled():
|
||||
# Mark inner's result as retrieved.
|
||||
inner.exception()
|
||||
return
|
||||
|
||||
if inner.cancelled():
|
||||
outer.cancel()
|
||||
else:
|
||||
exc = inner.exception()
|
||||
if exc is not None:
|
||||
outer.set_exception(exc)
|
||||
else:
|
||||
outer.set_result(inner.result())
|
||||
|
||||
inner.add_done_callback(_done_callback)
|
||||
return outer
|
||||
|
||||
|
||||
def run_coroutine_threadsafe(coro, loop):
|
||||
"""Submit a coroutine object to a given event loop.
|
||||
|
||||
Return a concurrent.futures.Future to access the result.
|
||||
"""
|
||||
if not coroutines.iscoroutine(coro):
|
||||
raise TypeError('A coroutine object is required')
|
||||
future = concurrent.futures.Future()
|
||||
|
||||
def callback():
|
||||
try:
|
||||
futures._chain_future(ensure_future(coro, loop=loop), future)
|
||||
except Exception as exc:
|
||||
if future.set_running_or_notify_cancel():
|
||||
future.set_exception(exc)
|
||||
raise
|
||||
|
||||
loop.call_soon_threadsafe(callback)
|
||||
return future
|
537
third_party/python/Lib/asyncio/test_utils.py
vendored
Normal file
537
third_party/python/Lib/asyncio/test_utils.py
vendored
Normal file
|
@ -0,0 +1,537 @@
|
|||
"""Utilities shared by tests."""
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import socketserver
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
import weakref
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from http.server import HTTPServer
|
||||
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import base_events
|
||||
from . import compat
|
||||
from . import events
|
||||
from . import futures
|
||||
from . import selectors
|
||||
from . import tasks
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
from test import support
|
||||
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
from .windows_utils import socketpair
|
||||
else:
|
||||
from socket import socketpair # pragma: no cover
|
||||
|
||||
|
||||
def data_file(filename):
|
||||
if hasattr(support, 'TEST_HOME_DIR'):
|
||||
fullname = os.path.join(support.TEST_HOME_DIR, filename)
|
||||
if os.path.isfile(fullname):
|
||||
return fullname
|
||||
fullname = os.path.join(os.path.dirname(os.__file__), 'test', filename)
|
||||
if os.path.isfile(fullname):
|
||||
return fullname
|
||||
raise FileNotFoundError(filename)
|
||||
|
||||
|
||||
ONLYCERT = data_file('ssl_cert.pem')
|
||||
ONLYKEY = data_file('ssl_key.pem')
|
||||
|
||||
|
||||
def dummy_ssl_context():
|
||||
if ssl is None:
|
||||
return None
|
||||
else:
|
||||
return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
|
||||
|
||||
def run_briefly(loop):
|
||||
@coroutine
|
||||
def once():
|
||||
pass
|
||||
gen = once()
|
||||
t = loop.create_task(gen)
|
||||
# Don't log a warning if the task is not done after run_until_complete().
|
||||
# It occurs if the loop is stopped or if a task raises a BaseException.
|
||||
t._log_destroy_pending = False
|
||||
try:
|
||||
loop.run_until_complete(t)
|
||||
finally:
|
||||
gen.close()
|
||||
|
||||
|
||||
def run_until(loop, pred, timeout=30):
|
||||
deadline = time.time() + timeout
|
||||
while not pred():
|
||||
if timeout is not None:
|
||||
timeout = deadline - time.time()
|
||||
if timeout <= 0:
|
||||
raise futures.TimeoutError()
|
||||
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
|
||||
|
||||
|
||||
def run_once(loop):
|
||||
"""Legacy API to run once through the event loop.
|
||||
|
||||
This is the recommended pattern for test code. It will poll the
|
||||
selector once and run all callbacks scheduled in response to I/O
|
||||
events.
|
||||
"""
|
||||
loop.call_soon(loop.stop)
|
||||
loop.run_forever()
|
||||
|
||||
|
||||
class SilentWSGIRequestHandler(WSGIRequestHandler):
|
||||
|
||||
def get_stderr(self):
|
||||
return io.StringIO()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
pass
|
||||
|
||||
|
||||
class SilentWSGIServer(WSGIServer):
|
||||
|
||||
request_timeout = 2
|
||||
|
||||
def get_request(self):
|
||||
request, client_addr = super().get_request()
|
||||
request.settimeout(self.request_timeout)
|
||||
return request, client_addr
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
pass
|
||||
|
||||
|
||||
class SSLWSGIServerMixin:
|
||||
|
||||
def finish_request(self, request, client_address):
|
||||
# The relative location of our test directory (which
|
||||
# contains the ssl key and certificate files) differs
|
||||
# between the stdlib and stand-alone asyncio.
|
||||
# Prefer our own if we can find it.
|
||||
keyfile = ONLYKEY
|
||||
certfile = ONLYCERT
|
||||
context = ssl.SSLContext()
|
||||
context.load_cert_chain(certfile, keyfile)
|
||||
|
||||
ssock = context.wrap_socket(request, server_side=True)
|
||||
try:
|
||||
self.RequestHandlerClass(ssock, client_address, self)
|
||||
ssock.close()
|
||||
except OSError:
|
||||
# maybe socket has been closed by peer
|
||||
pass
|
||||
|
||||
|
||||
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
|
||||
pass
|
||||
|
||||
|
||||
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
|
||||
|
||||
def app(environ, start_response):
|
||||
status = '200 OK'
|
||||
headers = [('Content-type', 'text/plain')]
|
||||
start_response(status, headers)
|
||||
return [b'Test message']
|
||||
|
||||
# Run the test WSGI server in a separate thread in order not to
|
||||
# interfere with event handling in the main thread
|
||||
server_class = server_ssl_cls if use_ssl else server_cls
|
||||
httpd = server_class(address, SilentWSGIRequestHandler)
|
||||
httpd.set_app(app)
|
||||
httpd.address = httpd.server_address
|
||||
server_thread = threading.Thread(
|
||||
target=lambda: httpd.serve_forever(poll_interval=0.05))
|
||||
server_thread.start()
|
||||
try:
|
||||
yield httpd
|
||||
finally:
|
||||
httpd.shutdown()
|
||||
httpd.server_close()
|
||||
server_thread.join()
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
|
||||
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
|
||||
|
||||
def server_bind(self):
|
||||
socketserver.UnixStreamServer.server_bind(self)
|
||||
self.server_name = '127.0.0.1'
|
||||
self.server_port = 80
|
||||
|
||||
|
||||
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
|
||||
|
||||
request_timeout = 2
|
||||
|
||||
def server_bind(self):
|
||||
UnixHTTPServer.server_bind(self)
|
||||
self.setup_environ()
|
||||
|
||||
def get_request(self):
|
||||
request, client_addr = super().get_request()
|
||||
request.settimeout(self.request_timeout)
|
||||
# Code in the stdlib expects that get_request
|
||||
# will return a socket and a tuple (host, port).
|
||||
# However, this isn't true for UNIX sockets,
|
||||
# as the second return value will be a path;
|
||||
# hence we return some fake data sufficient
|
||||
# to get the tests going
|
||||
return request, ('127.0.0.1', '')
|
||||
|
||||
|
||||
class SilentUnixWSGIServer(UnixWSGIServer):
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
pass
|
||||
|
||||
|
||||
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
|
||||
pass
|
||||
|
||||
|
||||
def gen_unix_socket_path():
|
||||
with tempfile.NamedTemporaryFile() as file:
|
||||
return file.name
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def unix_socket_path():
|
||||
path = gen_unix_socket_path()
|
||||
try:
|
||||
yield path
|
||||
finally:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def run_test_unix_server(*, use_ssl=False):
|
||||
with unix_socket_path() as path:
|
||||
yield from _run_test_server(address=path, use_ssl=use_ssl,
|
||||
server_cls=SilentUnixWSGIServer,
|
||||
server_ssl_cls=UnixSSLWSGIServer)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
|
||||
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
|
||||
server_cls=SilentWSGIServer,
|
||||
server_ssl_cls=SSLWSGIServer)
|
||||
|
||||
|
||||
def make_test_protocol(base):
|
||||
dct = {}
|
||||
for name in dir(base):
|
||||
if name.startswith('__') and name.endswith('__'):
|
||||
# skip magic names
|
||||
continue
|
||||
dct[name] = MockCallback(return_value=None)
|
||||
return type('TestProtocol', (base,) + base.__bases__, dct)()
|
||||
|
||||
|
||||
class TestSelector(selectors.BaseSelector):
|
||||
|
||||
def __init__(self):
|
||||
self.keys = {}
|
||||
|
||||
def register(self, fileobj, events, data=None):
|
||||
key = selectors.SelectorKey(fileobj, 0, events, data)
|
||||
self.keys[fileobj] = key
|
||||
return key
|
||||
|
||||
def unregister(self, fileobj):
|
||||
return self.keys.pop(fileobj)
|
||||
|
||||
def select(self, timeout):
|
||||
return []
|
||||
|
||||
def get_map(self):
|
||||
return self.keys
|
||||
|
||||
|
||||
class TestLoop(base_events.BaseEventLoop):
|
||||
"""Loop for unittests.
|
||||
|
||||
It manages self time directly.
|
||||
If something scheduled to be executed later then
|
||||
on next loop iteration after all ready handlers done
|
||||
generator passed to __init__ is calling.
|
||||
|
||||
Generator should be like this:
|
||||
|
||||
def gen():
|
||||
...
|
||||
when = yield ...
|
||||
... = yield time_advance
|
||||
|
||||
Value returned by yield is absolute time of next scheduled handler.
|
||||
Value passed to yield is time advance to move loop's time forward.
|
||||
"""
|
||||
|
||||
def __init__(self, gen=None):
|
||||
super().__init__()
|
||||
|
||||
if gen is None:
|
||||
def gen():
|
||||
yield
|
||||
self._check_on_close = False
|
||||
else:
|
||||
self._check_on_close = True
|
||||
|
||||
self._gen = gen()
|
||||
next(self._gen)
|
||||
self._time = 0
|
||||
self._clock_resolution = 1e-9
|
||||
self._timers = []
|
||||
self._selector = TestSelector()
|
||||
|
||||
self.readers = {}
|
||||
self.writers = {}
|
||||
self.reset_counters()
|
||||
|
||||
self._transports = weakref.WeakValueDictionary()
|
||||
|
||||
def time(self):
|
||||
return self._time
|
||||
|
||||
def advance_time(self, advance):
|
||||
"""Move test time forward."""
|
||||
if advance:
|
||||
self._time += advance
|
||||
|
||||
def close(self):
|
||||
super().close()
|
||||
if self._check_on_close:
|
||||
try:
|
||||
self._gen.send(0)
|
||||
except StopIteration:
|
||||
pass
|
||||
else: # pragma: no cover
|
||||
raise AssertionError("Time generator is not finished")
|
||||
|
||||
def _add_reader(self, fd, callback, *args):
|
||||
self.readers[fd] = events.Handle(callback, args, self)
|
||||
|
||||
def _remove_reader(self, fd):
|
||||
self.remove_reader_count[fd] += 1
|
||||
if fd in self.readers:
|
||||
del self.readers[fd]
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def assert_reader(self, fd, callback, *args):
|
||||
if fd not in self.readers:
|
||||
raise AssertionError(f'fd {fd} is not registered')
|
||||
handle = self.readers[fd]
|
||||
if handle._callback != callback:
|
||||
raise AssertionError(
|
||||
f'unexpected callback: {handle._callback} != {callback}')
|
||||
if handle._args != args:
|
||||
raise AssertionError(
|
||||
f'unexpected callback args: {handle._args} != {args}')
|
||||
|
||||
def assert_no_reader(self, fd):
|
||||
if fd in self.readers:
|
||||
raise AssertionError(f'fd {fd} is registered')
|
||||
|
||||
def _add_writer(self, fd, callback, *args):
|
||||
self.writers[fd] = events.Handle(callback, args, self)
|
||||
|
||||
def _remove_writer(self, fd):
|
||||
self.remove_writer_count[fd] += 1
|
||||
if fd in self.writers:
|
||||
del self.writers[fd]
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def assert_writer(self, fd, callback, *args):
|
||||
assert fd in self.writers, 'fd {} is not registered'.format(fd)
|
||||
handle = self.writers[fd]
|
||||
assert handle._callback == callback, '{!r} != {!r}'.format(
|
||||
handle._callback, callback)
|
||||
assert handle._args == args, '{!r} != {!r}'.format(
|
||||
handle._args, args)
|
||||
|
||||
def _ensure_fd_no_transport(self, fd):
|
||||
try:
|
||||
transport = self._transports[fd]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'File descriptor {!r} is used by transport {!r}'.format(
|
||||
fd, transport))
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
"""Add a reader callback."""
|
||||
self._ensure_fd_no_transport(fd)
|
||||
return self._add_reader(fd, callback, *args)
|
||||
|
||||
def remove_reader(self, fd):
|
||||
"""Remove a reader callback."""
|
||||
self._ensure_fd_no_transport(fd)
|
||||
return self._remove_reader(fd)
|
||||
|
||||
def add_writer(self, fd, callback, *args):
|
||||
"""Add a writer callback.."""
|
||||
self._ensure_fd_no_transport(fd)
|
||||
return self._add_writer(fd, callback, *args)
|
||||
|
||||
def remove_writer(self, fd):
|
||||
"""Remove a writer callback."""
|
||||
self._ensure_fd_no_transport(fd)
|
||||
return self._remove_writer(fd)
|
||||
|
||||
def reset_counters(self):
|
||||
self.remove_reader_count = collections.defaultdict(int)
|
||||
self.remove_writer_count = collections.defaultdict(int)
|
||||
|
||||
def _run_once(self):
|
||||
super()._run_once()
|
||||
for when in self._timers:
|
||||
advance = self._gen.send(when)
|
||||
self.advance_time(advance)
|
||||
self._timers = []
|
||||
|
||||
def call_at(self, when, callback, *args):
|
||||
self._timers.append(when)
|
||||
return super().call_at(when, callback, *args)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
return
|
||||
|
||||
def _write_to_self(self):
|
||||
pass
|
||||
|
||||
|
||||
def MockCallback(**kwargs):
|
||||
return mock.Mock(spec=['__call__'], **kwargs)
|
||||
|
||||
|
||||
class MockPattern(str):
|
||||
"""A regex based str with a fuzzy __eq__.
|
||||
|
||||
Use this helper with 'mock.assert_called_with', or anywhere
|
||||
where a regex comparison between strings is needed.
|
||||
|
||||
For instance:
|
||||
mock_call.assert_called_with(MockPattern('spam.*ham'))
|
||||
"""
|
||||
def __eq__(self, other):
|
||||
return bool(re.search(str(self), other, re.S))
|
||||
|
||||
|
||||
def get_function_source(func):
|
||||
source = events._get_function_source(func)
|
||||
if source is None:
|
||||
raise ValueError("unable to get the source of %r" % (func,))
|
||||
return source
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
@staticmethod
|
||||
def close_loop(loop):
|
||||
executor = loop._default_executor
|
||||
if executor is not None:
|
||||
executor.shutdown(wait=True)
|
||||
loop.close()
|
||||
|
||||
def set_event_loop(self, loop, *, cleanup=True):
|
||||
assert loop is not None
|
||||
# ensure that the event loop is passed explicitly in asyncio
|
||||
events.set_event_loop(None)
|
||||
if cleanup:
|
||||
self.addCleanup(self.close_loop, loop)
|
||||
|
||||
def new_test_loop(self, gen=None):
|
||||
loop = TestLoop(gen)
|
||||
self.set_event_loop(loop)
|
||||
return loop
|
||||
|
||||
def unpatch_get_running_loop(self):
|
||||
events._get_running_loop = self._get_running_loop
|
||||
|
||||
def setUp(self):
|
||||
self._get_running_loop = events._get_running_loop
|
||||
events._get_running_loop = lambda: None
|
||||
self._thread_cleanup = support.threading_setup()
|
||||
|
||||
def tearDown(self):
|
||||
self.unpatch_get_running_loop()
|
||||
|
||||
events.set_event_loop(None)
|
||||
|
||||
# Detect CPython bug #23353: ensure that yield/yield-from is not used
|
||||
# in an except block of a generator
|
||||
self.assertEqual(sys.exc_info(), (None, None, None))
|
||||
|
||||
self.doCleanups()
|
||||
support.threading_cleanup(*self._thread_cleanup)
|
||||
support.reap_children()
|
||||
|
||||
if not compat.PY34:
|
||||
# Python 3.3 compatibility
|
||||
def subTest(self, *args, **kwargs):
|
||||
class EmptyCM:
|
||||
def __enter__(self):
|
||||
pass
|
||||
def __exit__(self, *exc):
|
||||
pass
|
||||
return EmptyCM()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def disable_logger():
|
||||
"""Context manager to disable asyncio logger.
|
||||
|
||||
For example, it can be used to ignore warnings in debug mode.
|
||||
"""
|
||||
old_level = logger.level
|
||||
try:
|
||||
logger.setLevel(logging.CRITICAL+1)
|
||||
yield
|
||||
finally:
|
||||
logger.setLevel(old_level)
|
||||
|
||||
|
||||
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
|
||||
family=socket.AF_INET):
|
||||
"""Create a mock of a non-blocking socket."""
|
||||
sock = mock.MagicMock(socket.socket)
|
||||
sock.proto = proto
|
||||
sock.type = type
|
||||
sock.family = family
|
||||
sock.gettimeout.return_value = 0.0
|
||||
return sock
|
||||
|
||||
|
||||
def force_legacy_ssl_support():
|
||||
return mock.patch('asyncio.sslproto._is_sslproto_available',
|
||||
return_value=False)
|
306
third_party/python/Lib/asyncio/transports.py
vendored
Normal file
306
third_party/python/Lib/asyncio/transports.py
vendored
Normal file
|
@ -0,0 +1,306 @@
|
|||
"""Abstract Transport class."""
|
||||
|
||||
from asyncio import compat
|
||||
|
||||
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
|
||||
'Transport', 'DatagramTransport', 'SubprocessTransport',
|
||||
]
|
||||
|
||||
|
||||
class BaseTransport:
|
||||
"""Base class for transports."""
|
||||
|
||||
def __init__(self, extra=None):
|
||||
if extra is None:
|
||||
extra = {}
|
||||
self._extra = extra
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._extra.get(name, default)
|
||||
|
||||
def is_closing(self):
|
||||
"""Return True if the transport is closing or closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) called
|
||||
with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
"""Set a new protocol."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_protocol(self):
|
||||
"""Return the current protocol."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ReadTransport(BaseTransport):
|
||||
"""Interface for read-only transports."""
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class WriteTransport(BaseTransport):
|
||||
"""Interface for write-only transports."""
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
raise NotImplementedError
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def writelines(self, list_of_data):
|
||||
"""Write a list (or any iterable) of data bytes to the transport.
|
||||
|
||||
The default implementation concatenates the arguments and
|
||||
calls write() on the result.
|
||||
"""
|
||||
data = compat.flatten_list_bytes(list_of_data)
|
||||
self.write(data)
|
||||
|
||||
def write_eof(self):
|
||||
"""Close the write end after flushing buffered data.
|
||||
|
||||
(This is like typing ^D into a UNIX program reading from stdin.)
|
||||
|
||||
Data may still be received.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Transport(ReadTransport, WriteTransport):
|
||||
"""Interface representing a bidirectional transport.
|
||||
|
||||
There may be several implementations, but typically, the user does
|
||||
not implement new transports; rather, the platform provides some
|
||||
useful transports that are implemented using the platform's best
|
||||
practices.
|
||||
|
||||
The user never instantiates a transport directly; they call a
|
||||
utility function, passing it a protocol factory and other
|
||||
information necessary to create the transport and protocol. (E.g.
|
||||
EventLoop.create_connection() or EventLoop.create_server().)
|
||||
|
||||
The utility function will asynchronously create a transport and a
|
||||
protocol and hook them up by calling the protocol's
|
||||
connection_made() method, passing it the transport.
|
||||
|
||||
The implementation here raises NotImplemented for every method
|
||||
except writelines(), which calls write() in a loop.
|
||||
"""
|
||||
|
||||
|
||||
class DatagramTransport(BaseTransport):
|
||||
"""Interface for datagram (UDP) transports."""
|
||||
|
||||
def sendto(self, data, addr=None):
|
||||
"""Send data to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
addr is target socket address.
|
||||
If addr is None use target address pointed on transport creation.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SubprocessTransport(BaseTransport):
|
||||
|
||||
def get_pid(self):
|
||||
"""Get subprocess id."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_returncode(self):
|
||||
"""Get subprocess returncode.
|
||||
|
||||
See also
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
"""Get transport for pipe with number fd."""
|
||||
raise NotImplementedError
|
||||
|
||||
def send_signal(self, signal):
|
||||
"""Send signal to subprocess.
|
||||
|
||||
See also:
|
||||
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def terminate(self):
|
||||
"""Stop the subprocess.
|
||||
|
||||
Alias for close() method.
|
||||
|
||||
On Posix OSs the method sends SIGTERM to the subprocess.
|
||||
On Windows the Win32 API function TerminateProcess()
|
||||
is called to stop the subprocess.
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def kill(self):
|
||||
"""Kill the subprocess.
|
||||
|
||||
On Posix OSs the function sends SIGKILL to the subprocess.
|
||||
On Windows kill() is an alias for terminate().
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _FlowControlMixin(Transport):
|
||||
"""All the logic for (write) flow control in a mix-in base class.
|
||||
|
||||
The subclass must implement get_write_buffer_size(). It must call
|
||||
_maybe_pause_protocol() whenever the write buffer size increases,
|
||||
and _maybe_resume_protocol() whenever it decreases. It may also
|
||||
override set_write_buffer_limits() (e.g. to specify different
|
||||
defaults).
|
||||
|
||||
The subclass constructor must call super().__init__(extra). This
|
||||
will call set_write_buffer_limits().
|
||||
|
||||
The user may call set_write_buffer_limits() and
|
||||
get_write_buffer_size(), and their protocol's pause_writing() and
|
||||
resume_writing() may be called.
|
||||
"""
|
||||
|
||||
def __init__(self, extra=None, loop=None):
|
||||
super().__init__(extra)
|
||||
assert loop is not None
|
||||
self._loop = loop
|
||||
self._protocol_paused = False
|
||||
self._set_write_buffer_limits()
|
||||
|
||||
def _maybe_pause_protocol(self):
|
||||
size = self.get_write_buffer_size()
|
||||
if size <= self._high_water:
|
||||
return
|
||||
if not self._protocol_paused:
|
||||
self._protocol_paused = True
|
||||
try:
|
||||
self._protocol.pause_writing()
|
||||
except Exception as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.pause_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def _maybe_resume_protocol(self):
|
||||
if (self._protocol_paused and
|
||||
self.get_write_buffer_size() <= self._low_water):
|
||||
self._protocol_paused = False
|
||||
try:
|
||||
self._protocol.resume_writing()
|
||||
except Exception as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.resume_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
return (self._low_water, self._high_water)
|
||||
|
||||
def _set_write_buffer_limits(self, high=None, low=None):
|
||||
if high is None:
|
||||
if low is None:
|
||||
high = 64*1024
|
||||
else:
|
||||
high = 4*low
|
||||
if low is None:
|
||||
low = high // 4
|
||||
if not high >= low >= 0:
|
||||
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
|
||||
(high, low))
|
||||
self._high_water = high
|
||||
self._low_water = low
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
self._set_write_buffer_limits(high=high, low=low)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
raise NotImplementedError
|
1083
third_party/python/Lib/asyncio/unix_events.py
vendored
Normal file
1083
third_party/python/Lib/asyncio/unix_events.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
779
third_party/python/Lib/asyncio/windows_events.py
vendored
Normal file
779
third_party/python/Lib/asyncio/windows_events.py
vendored
Normal file
|
@ -0,0 +1,779 @@
|
|||
"""Selector and proactor event loops for Windows."""
|
||||
|
||||
import _winapi
|
||||
import errno
|
||||
import math
|
||||
import socket
|
||||
import struct
|
||||
import weakref
|
||||
|
||||
from . import events
|
||||
from . import base_subprocess
|
||||
from . import futures
|
||||
from . import proactor_events
|
||||
from . import selector_events
|
||||
from . import tasks
|
||||
from . import windows_utils
|
||||
from . import _overlapped
|
||||
from .coroutines import coroutine
|
||||
from .log import logger
|
||||
|
||||
|
||||
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
|
||||
'DefaultEventLoopPolicy',
|
||||
]
|
||||
|
||||
|
||||
NULL = 0
|
||||
INFINITE = 0xffffffff
|
||||
ERROR_CONNECTION_REFUSED = 1225
|
||||
ERROR_CONNECTION_ABORTED = 1236
|
||||
|
||||
# Initial delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_INIT_DELAY = 0.001
|
||||
|
||||
# Maximum delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_MAX_DELAY = 0.100
|
||||
|
||||
|
||||
class _OverlappedFuture(futures.Future):
|
||||
"""Subclass of Future which represents an overlapped operation.
|
||||
|
||||
Cancelling it will immediately cancel the overlapped operation.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._ov = ov
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
if self._ov is not None:
|
||||
state = 'pending' if self._ov.pending else 'completed'
|
||||
info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
|
||||
return info
|
||||
|
||||
def _cancel_overlapped(self):
|
||||
if self._ov is None:
|
||||
return
|
||||
try:
|
||||
self._ov.cancel()
|
||||
except OSError as exc:
|
||||
context = {
|
||||
'message': 'Cancelling an overlapped future failed',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self._ov = None
|
||||
|
||||
def cancel(self):
|
||||
self._cancel_overlapped()
|
||||
return super().cancel()
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
self._cancel_overlapped()
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
self._ov = None
|
||||
|
||||
|
||||
class _BaseWaitHandleFuture(futures.Future):
|
||||
"""Subclass of Future which represents a wait handle."""
|
||||
|
||||
def __init__(self, ov, handle, wait_handle, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
# Keep a reference to the Overlapped object to keep it alive until the
|
||||
# wait is unregistered
|
||||
self._ov = ov
|
||||
self._handle = handle
|
||||
self._wait_handle = wait_handle
|
||||
|
||||
# Should we call UnregisterWaitEx() if the wait completes
|
||||
# or is cancelled?
|
||||
self._registered = True
|
||||
|
||||
def _poll(self):
|
||||
# non-blocking wait: use a timeout of 0 millisecond
|
||||
return (_winapi.WaitForSingleObject(self._handle, 0) ==
|
||||
_winapi.WAIT_OBJECT_0)
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
info.append('handle=%#x' % self._handle)
|
||||
if self._handle is not None:
|
||||
state = 'signaled' if self._poll() else 'waiting'
|
||||
info.append(state)
|
||||
if self._wait_handle is not None:
|
||||
info.append('wait_handle=%#x' % self._wait_handle)
|
||||
return info
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
# The wait was unregistered: it's not safe to destroy the Overlapped
|
||||
# object
|
||||
self._ov = None
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWait(wait_handle)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING means that the unregister is pending
|
||||
|
||||
self._unregister_wait_cb(None)
|
||||
|
||||
def cancel(self):
|
||||
self._unregister_wait()
|
||||
return super().cancel()
|
||||
|
||||
def set_exception(self, exception):
|
||||
self._unregister_wait()
|
||||
super().set_exception(exception)
|
||||
|
||||
def set_result(self, result):
|
||||
self._unregister_wait()
|
||||
super().set_result(result)
|
||||
|
||||
|
||||
class _WaitCancelFuture(_BaseWaitHandleFuture):
|
||||
"""Subclass of Future which represents a wait for the cancellation of a
|
||||
_WaitHandleFuture using an event.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, event, wait_handle, *, loop=None):
|
||||
super().__init__(ov, event, wait_handle, loop=loop)
|
||||
|
||||
self._done_callback = None
|
||||
|
||||
def cancel(self):
|
||||
raise RuntimeError("_WaitCancelFuture must not be cancelled")
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
|
||||
class _WaitHandleFuture(_BaseWaitHandleFuture):
|
||||
def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
|
||||
super().__init__(ov, handle, wait_handle, loop=loop)
|
||||
self._proactor = proactor
|
||||
self._unregister_proactor = True
|
||||
self._event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self._event_fut = None
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
if self._event is not None:
|
||||
_winapi.CloseHandle(self._event)
|
||||
self._event = None
|
||||
self._event_fut = None
|
||||
|
||||
# If the wait was cancelled, the wait may never be signalled, so
|
||||
# it's required to unregister it. Otherwise, IocpProactor.close() will
|
||||
# wait forever for an event which will never come.
|
||||
#
|
||||
# If the IocpProactor already received the event, it's safe to call
|
||||
# _unregister() because we kept a reference to the Overlapped object
|
||||
# which is used as a unique key.
|
||||
self._proactor._unregister(self._ov)
|
||||
self._proactor = None
|
||||
|
||||
super()._unregister_wait_cb(fut)
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWaitEx(wait_handle, self._event)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
||||
|
||||
self._event_fut = self._proactor._wait_cancel(self._event,
|
||||
self._unregister_wait_cb)
|
||||
|
||||
|
||||
class PipeServer(object):
|
||||
"""Class representing a pipe server.
|
||||
|
||||
This is much like a bound, listening socket.
|
||||
"""
|
||||
def __init__(self, address):
|
||||
self._address = address
|
||||
self._free_instances = weakref.WeakSet()
|
||||
# initialize the pipe attribute before calling _server_pipe_handle()
|
||||
# because this function can raise an exception and the destructor calls
|
||||
# the close() method
|
||||
self._pipe = None
|
||||
self._accept_pipe_future = None
|
||||
self._pipe = self._server_pipe_handle(True)
|
||||
|
||||
def _get_unconnected_pipe(self):
|
||||
# Create new instance and return previous one. This ensures
|
||||
# that (until the server is closed) there is always at least
|
||||
# one pipe handle for address. Therefore if a client attempt
|
||||
# to connect it will not fail with FileNotFoundError.
|
||||
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
|
||||
return tmp
|
||||
|
||||
def _server_pipe_handle(self, first):
|
||||
# Return a wrapper for a new pipe handle.
|
||||
if self.closed():
|
||||
return None
|
||||
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
||||
if first:
|
||||
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
h = _winapi.CreateNamedPipe(
|
||||
self._address, flags,
|
||||
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
||||
_winapi.PIPE_WAIT,
|
||||
_winapi.PIPE_UNLIMITED_INSTANCES,
|
||||
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
|
||||
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
pipe = windows_utils.PipeHandle(h)
|
||||
self._free_instances.add(pipe)
|
||||
return pipe
|
||||
|
||||
def closed(self):
|
||||
return (self._address is None)
|
||||
|
||||
def close(self):
|
||||
if self._accept_pipe_future is not None:
|
||||
self._accept_pipe_future.cancel()
|
||||
self._accept_pipe_future = None
|
||||
# Close all instances which have not been connected to by a client.
|
||||
if self._address is not None:
|
||||
for pipe in self._free_instances:
|
||||
pipe.close()
|
||||
self._pipe = None
|
||||
self._address = None
|
||||
self._free_instances.clear()
|
||||
|
||||
__del__ = close
|
||||
|
||||
|
||||
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
"""Windows version of selector event loop."""
|
||||
|
||||
def _socketpair(self):
|
||||
return windows_utils.socketpair()
|
||||
|
||||
|
||||
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
||||
"""Windows version of proactor event loop using IOCP."""
|
||||
|
||||
def __init__(self, proactor=None):
|
||||
if proactor is None:
|
||||
proactor = IocpProactor()
|
||||
super().__init__(proactor)
|
||||
|
||||
def _socketpair(self):
|
||||
return windows_utils.socketpair()
|
||||
|
||||
@coroutine
|
||||
def create_pipe_connection(self, protocol_factory, address):
|
||||
f = self._proactor.connect_pipe(address)
|
||||
pipe = yield from f
|
||||
protocol = protocol_factory()
|
||||
trans = self._make_duplex_pipe_transport(pipe, protocol,
|
||||
extra={'addr': address})
|
||||
return trans, protocol
|
||||
|
||||
@coroutine
|
||||
def start_serving_pipe(self, protocol_factory, address):
|
||||
server = PipeServer(address)
|
||||
|
||||
def loop_accept_pipe(f=None):
|
||||
pipe = None
|
||||
try:
|
||||
if f:
|
||||
pipe = f.result()
|
||||
server._free_instances.discard(pipe)
|
||||
|
||||
if server.closed():
|
||||
# A client connected before the server was closed:
|
||||
# drop the client (close the pipe) and exit
|
||||
pipe.close()
|
||||
return
|
||||
|
||||
protocol = protocol_factory()
|
||||
self._make_duplex_pipe_transport(
|
||||
pipe, protocol, extra={'addr': address})
|
||||
|
||||
pipe = server._get_unconnected_pipe()
|
||||
if pipe is None:
|
||||
return
|
||||
|
||||
f = self._proactor.accept_pipe(pipe)
|
||||
except OSError as exc:
|
||||
if pipe and pipe.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Pipe accept failed',
|
||||
'exception': exc,
|
||||
'pipe': pipe,
|
||||
})
|
||||
pipe.close()
|
||||
elif self._debug:
|
||||
logger.warning("Accept pipe failed on pipe %r",
|
||||
pipe, exc_info=True)
|
||||
except futures.CancelledError:
|
||||
if pipe:
|
||||
pipe.close()
|
||||
else:
|
||||
server._accept_pipe_future = f
|
||||
f.add_done_callback(loop_accept_pipe)
|
||||
|
||||
self.call_soon(loop_accept_pipe)
|
||||
return [server]
|
||||
|
||||
@coroutine
|
||||
def _make_subprocess_transport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
waiter = self.create_future()
|
||||
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
waiter=waiter, extra=extra,
|
||||
**kwargs)
|
||||
try:
|
||||
yield from waiter
|
||||
except Exception as exc:
|
||||
# Workaround CPython bug #23353: using yield/yield-from in an
|
||||
# except block of a generator doesn't clear properly sys.exc_info()
|
||||
err = exc
|
||||
else:
|
||||
err = None
|
||||
|
||||
if err is not None:
|
||||
transp.close()
|
||||
yield from transp._wait()
|
||||
raise err
|
||||
|
||||
return transp
|
||||
|
||||
|
||||
class IocpProactor:
|
||||
"""Proactor implementation using IOCP."""
|
||||
|
||||
def __init__(self, concurrency=0xffffffff):
|
||||
self._loop = None
|
||||
self._results = []
|
||||
self._iocp = _overlapped.CreateIoCompletionPort(
|
||||
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
|
||||
self._cache = {}
|
||||
self._registered = weakref.WeakSet()
|
||||
self._unregistered = []
|
||||
self._stopped_serving = weakref.WeakSet()
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s overlapped#=%s result#=%s>'
|
||||
% (self.__class__.__name__, len(self._cache),
|
||||
len(self._results)))
|
||||
|
||||
def set_loop(self, loop):
|
||||
self._loop = loop
|
||||
|
||||
def select(self, timeout=None):
|
||||
if not self._results:
|
||||
self._poll(timeout)
|
||||
tmp = self._results
|
||||
self._results = []
|
||||
return tmp
|
||||
|
||||
def _result(self, value):
|
||||
fut = self._loop.create_future()
|
||||
fut.set_result(value)
|
||||
return fut
|
||||
|
||||
def recv(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSARecv(conn.fileno(), nbytes, flags)
|
||||
else:
|
||||
ov.ReadFile(conn.fileno(), nbytes)
|
||||
except BrokenPipeError:
|
||||
return self._result(b'')
|
||||
|
||||
def finish_recv(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_recv)
|
||||
|
||||
def send(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSASend(conn.fileno(), buf, flags)
|
||||
else:
|
||||
ov.WriteFile(conn.fileno(), buf)
|
||||
|
||||
def finish_send(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_send)
|
||||
|
||||
def accept(self, listener):
|
||||
self._register_with_iocp(listener)
|
||||
conn = self._get_accept_socket(listener.family)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.AcceptEx(listener.fileno(), conn.fileno())
|
||||
|
||||
def finish_accept(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
|
||||
buf = struct.pack('@P', listener.fileno())
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
|
||||
conn.settimeout(listener.gettimeout())
|
||||
return conn, conn.getpeername()
|
||||
|
||||
@coroutine
|
||||
def accept_coro(future, conn):
|
||||
# Coroutine closing the accept socket if the future is cancelled
|
||||
try:
|
||||
yield from future
|
||||
except futures.CancelledError:
|
||||
conn.close()
|
||||
raise
|
||||
|
||||
future = self._register(ov, listener, finish_accept)
|
||||
coro = accept_coro(future, conn)
|
||||
tasks.ensure_future(coro, loop=self._loop)
|
||||
return future
|
||||
|
||||
def connect(self, conn, address):
|
||||
self._register_with_iocp(conn)
|
||||
# The socket needs to be locally bound before we call ConnectEx().
|
||||
try:
|
||||
_overlapped.BindLocal(conn.fileno(), conn.family)
|
||||
except OSError as e:
|
||||
if e.winerror != errno.WSAEINVAL:
|
||||
raise
|
||||
# Probably already locally bound; check using getsockname().
|
||||
if conn.getsockname()[1] == 0:
|
||||
raise
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.ConnectEx(conn.fileno(), address)
|
||||
|
||||
def finish_connect(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
|
||||
return conn
|
||||
|
||||
return self._register(ov, conn, finish_connect)
|
||||
|
||||
def accept_pipe(self, pipe):
|
||||
self._register_with_iocp(pipe)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
connected = ov.ConnectNamedPipe(pipe.fileno())
|
||||
|
||||
if connected:
|
||||
# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
|
||||
# that the pipe is connected. There is no need to wait for the
|
||||
# completion of the connection.
|
||||
return self._result(pipe)
|
||||
|
||||
def finish_accept_pipe(trans, key, ov):
|
||||
ov.getresult()
|
||||
return pipe
|
||||
|
||||
return self._register(ov, pipe, finish_accept_pipe)
|
||||
|
||||
@coroutine
|
||||
def connect_pipe(self, address):
|
||||
delay = CONNECT_PIPE_INIT_DELAY
|
||||
while True:
|
||||
# Unfortunately there is no way to do an overlapped connect to a pipe.
|
||||
# Call CreateFile() in a loop until it doesn't fail with
|
||||
# ERROR_PIPE_BUSY
|
||||
try:
|
||||
handle = _overlapped.ConnectPipe(address)
|
||||
break
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
|
||||
raise
|
||||
|
||||
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
|
||||
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
|
||||
yield from tasks.sleep(delay, loop=self._loop)
|
||||
|
||||
return windows_utils.PipeHandle(handle)
|
||||
|
||||
def wait_for_handle(self, handle, timeout=None):
|
||||
"""Wait for a handle.
|
||||
|
||||
Return a Future object. The result of the future is True if the wait
|
||||
completed, or False if the wait did not complete (on timeout).
|
||||
"""
|
||||
return self._wait_for_handle(handle, timeout, False)
|
||||
|
||||
def _wait_cancel(self, event, done_callback):
|
||||
fut = self._wait_for_handle(event, None, True)
|
||||
# add_done_callback() cannot be used because the wait may only complete
|
||||
# in IocpProactor.close(), while the event loop is not running.
|
||||
fut._done_callback = done_callback
|
||||
return fut
|
||||
|
||||
def _wait_for_handle(self, handle, timeout, _is_cancel):
|
||||
if timeout is None:
|
||||
ms = _winapi.INFINITE
|
||||
else:
|
||||
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
|
||||
# We only create ov so we can use ov.address as a key for the cache.
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
wait_handle = _overlapped.RegisterWaitWithQueue(
|
||||
handle, self._iocp, ov.address, ms)
|
||||
if _is_cancel:
|
||||
f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
|
||||
else:
|
||||
f = _WaitHandleFuture(ov, handle, wait_handle, self,
|
||||
loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
|
||||
def finish_wait_for_handle(trans, key, ov):
|
||||
# Note that this second wait means that we should only use
|
||||
# this with handles types where a successful wait has no
|
||||
# effect. So events or processes are all right, but locks
|
||||
# or semaphores are not. Also note if the handle is
|
||||
# signalled and then quickly reset, then we may return
|
||||
# False even though we have not timed out.
|
||||
return f._poll()
|
||||
|
||||
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
|
||||
return f
|
||||
|
||||
def _register_with_iocp(self, obj):
|
||||
# To get notifications of finished ops on this objects sent to the
|
||||
# completion port, were must register the handle.
|
||||
if obj not in self._registered:
|
||||
self._registered.add(obj)
|
||||
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
|
||||
# XXX We could also use SetFileCompletionNotificationModes()
|
||||
# to avoid sending notifications to completion port of ops
|
||||
# that succeed immediately.
|
||||
|
||||
def _register(self, ov, obj, callback):
|
||||
# Return a future which will be set with the result of the
|
||||
# operation when it completes. The future's value is actually
|
||||
# the value returned by callback().
|
||||
f = _OverlappedFuture(ov, loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
if not ov.pending:
|
||||
# The operation has completed, so no need to postpone the
|
||||
# work. We cannot take this short cut if we need the
|
||||
# NumberOfBytes, CompletionKey values returned by
|
||||
# PostQueuedCompletionStatus().
|
||||
try:
|
||||
value = callback(None, None, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
else:
|
||||
f.set_result(value)
|
||||
# Even if GetOverlappedResult() was called, we have to wait for the
|
||||
# notification of the completion in GetQueuedCompletionStatus().
|
||||
# Register the overlapped operation to keep a reference to the
|
||||
# OVERLAPPED object, otherwise the memory is freed and Windows may
|
||||
# read uninitialized memory.
|
||||
|
||||
# Register the overlapped operation for later. Note that
|
||||
# we only store obj to prevent it from being garbage
|
||||
# collected too early.
|
||||
self._cache[ov.address] = (f, ov, obj, callback)
|
||||
return f
|
||||
|
||||
def _unregister(self, ov):
|
||||
"""Unregister an overlapped object.
|
||||
|
||||
Call this method when its future has been cancelled. The event can
|
||||
already be signalled (pending in the proactor event queue). It is also
|
||||
safe if the event is never signalled (because it was cancelled).
|
||||
"""
|
||||
self._unregistered.append(ov)
|
||||
|
||||
def _get_accept_socket(self, family):
|
||||
s = socket.socket(family)
|
||||
s.settimeout(0)
|
||||
return s
|
||||
|
||||
def _poll(self, timeout=None):
|
||||
if timeout is None:
|
||||
ms = INFINITE
|
||||
elif timeout < 0:
|
||||
raise ValueError("negative timeout")
|
||||
else:
|
||||
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
if ms >= INFINITE:
|
||||
raise ValueError("timeout too big")
|
||||
|
||||
while True:
|
||||
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
|
||||
if status is None:
|
||||
break
|
||||
ms = 0
|
||||
|
||||
err, transferred, key, address = status
|
||||
try:
|
||||
f, ov, obj, callback = self._cache.pop(address)
|
||||
except KeyError:
|
||||
if self._loop.get_debug():
|
||||
self._loop.call_exception_handler({
|
||||
'message': ('GetQueuedCompletionStatus() returned an '
|
||||
'unexpected event'),
|
||||
'status': ('err=%s transferred=%s key=%#x address=%#x'
|
||||
% (err, transferred, key, address)),
|
||||
})
|
||||
|
||||
# key is either zero, or it is used to return a pipe
|
||||
# handle which should be closed to avoid a leak.
|
||||
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
|
||||
_winapi.CloseHandle(key)
|
||||
continue
|
||||
|
||||
if obj in self._stopped_serving:
|
||||
f.cancel()
|
||||
# Don't call the callback if _register() already read the result or
|
||||
# if the overlapped has been cancelled
|
||||
elif not f.done():
|
||||
try:
|
||||
value = callback(transferred, key, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
self._results.append(f)
|
||||
else:
|
||||
f.set_result(value)
|
||||
self._results.append(f)
|
||||
|
||||
# Remove unregisted futures
|
||||
for ov in self._unregistered:
|
||||
self._cache.pop(ov.address, None)
|
||||
self._unregistered.clear()
|
||||
|
||||
def _stop_serving(self, obj):
|
||||
# obj is a socket or pipe handle. It will be closed in
|
||||
# BaseProactorEventLoop._stop_serving() which will make any
|
||||
# pending operations fail quickly.
|
||||
self._stopped_serving.add(obj)
|
||||
|
||||
def close(self):
|
||||
# Cancel remaining registered operations.
|
||||
for address, (fut, ov, obj, callback) in list(self._cache.items()):
|
||||
if fut.cancelled():
|
||||
# Nothing to do with cancelled futures
|
||||
pass
|
||||
elif isinstance(fut, _WaitCancelFuture):
|
||||
# _WaitCancelFuture must not be cancelled
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
fut.cancel()
|
||||
except OSError as exc:
|
||||
if self._loop is not None:
|
||||
context = {
|
||||
'message': 'Cancelling a future failed',
|
||||
'exception': exc,
|
||||
'future': fut,
|
||||
}
|
||||
if fut._source_traceback:
|
||||
context['source_traceback'] = fut._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
while self._cache:
|
||||
if not self._poll(1):
|
||||
logger.debug('taking long time to close proactor')
|
||||
|
||||
self._results = []
|
||||
if self._iocp is not None:
|
||||
_winapi.CloseHandle(self._iocp)
|
||||
self._iocp = None
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
self._proc = windows_utils.Popen(
|
||||
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
||||
bufsize=bufsize, **kwargs)
|
||||
|
||||
def callback(f):
|
||||
returncode = self._proc.poll()
|
||||
self._process_exited(returncode)
|
||||
|
||||
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
|
||||
f.add_done_callback(callback)
|
||||
|
||||
|
||||
SelectorEventLoop = _WindowsSelectorEventLoop
|
||||
|
||||
|
||||
class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
_loop_factory = SelectorEventLoop
|
||||
|
||||
|
||||
DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy
|
224
third_party/python/Lib/asyncio/windows_utils.py
vendored
Normal file
224
third_party/python/Lib/asyncio/windows_utils.py
vendored
Normal file
|
@ -0,0 +1,224 @@
|
|||
"""
|
||||
Various Windows specific bits and pieces
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
if sys.platform != 'win32': # pragma: no cover
|
||||
raise ImportError('win32 only')
|
||||
|
||||
import _winapi
|
||||
import itertools
|
||||
import msvcrt
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
|
||||
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
|
||||
|
||||
|
||||
# Constants/globals
|
||||
|
||||
|
||||
BUFSIZE = 8192
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
|
||||
if hasattr(socket, 'socketpair'):
|
||||
# Since Python 3.5, socket.socketpair() is now also available on Windows
|
||||
socketpair = socket.socketpair
|
||||
else:
|
||||
# Replacement for socket.socketpair()
|
||||
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
|
||||
"""A socket pair usable as a self-pipe, for Windows.
|
||||
|
||||
Origin: https://gist.github.com/4325783, by Geert Jansen.
|
||||
Public domain.
|
||||
"""
|
||||
if family == socket.AF_INET:
|
||||
host = '127.0.0.1'
|
||||
elif family == socket.AF_INET6:
|
||||
host = '::1'
|
||||
else:
|
||||
raise ValueError("Only AF_INET and AF_INET6 socket address "
|
||||
"families are supported")
|
||||
if type != socket.SOCK_STREAM:
|
||||
raise ValueError("Only SOCK_STREAM socket type is supported")
|
||||
if proto != 0:
|
||||
raise ValueError("Only protocol zero is supported")
|
||||
|
||||
# We create a connected TCP socket. Note the trick with setblocking(0)
|
||||
# that prevents us from having to create a thread.
|
||||
lsock = socket.socket(family, type, proto)
|
||||
try:
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen(1)
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket.socket(family, type, proto)
|
||||
try:
|
||||
csock.setblocking(False)
|
||||
try:
|
||||
csock.connect((addr, port))
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
csock.setblocking(True)
|
||||
ssock, _ = lsock.accept()
|
||||
except:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
return (ssock, csock)
|
||||
|
||||
|
||||
# Replacement for os.pipe() using handles instead of fds
|
||||
|
||||
|
||||
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
|
||||
"""Like os.pipe() but with overlapped support and using handles not fds."""
|
||||
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
|
||||
(os.getpid(), next(_mmap_counter)))
|
||||
|
||||
if duplex:
|
||||
openmode = _winapi.PIPE_ACCESS_DUPLEX
|
||||
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = bufsize, bufsize
|
||||
else:
|
||||
openmode = _winapi.PIPE_ACCESS_INBOUND
|
||||
access = _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = 0, bufsize
|
||||
|
||||
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
|
||||
if overlapped[0]:
|
||||
openmode |= _winapi.FILE_FLAG_OVERLAPPED
|
||||
|
||||
if overlapped[1]:
|
||||
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
|
||||
else:
|
||||
flags_and_attribs = 0
|
||||
|
||||
h1 = h2 = None
|
||||
try:
|
||||
h1 = _winapi.CreateNamedPipe(
|
||||
address, openmode, _winapi.PIPE_WAIT,
|
||||
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
|
||||
h2 = _winapi.CreateFile(
|
||||
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
||||
flags_and_attribs, _winapi.NULL)
|
||||
|
||||
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
|
||||
ov.GetOverlappedResult(True)
|
||||
return h1, h2
|
||||
except:
|
||||
if h1 is not None:
|
||||
_winapi.CloseHandle(h1)
|
||||
if h2 is not None:
|
||||
_winapi.CloseHandle(h2)
|
||||
raise
|
||||
|
||||
|
||||
# Wrapper for a pipe handle
|
||||
|
||||
|
||||
class PipeHandle:
|
||||
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
|
||||
|
||||
The IOCP event loop can use these instead of socket objects.
|
||||
"""
|
||||
def __init__(self, handle):
|
||||
self._handle = handle
|
||||
|
||||
def __repr__(self):
|
||||
if self._handle is not None:
|
||||
handle = 'handle=%r' % self._handle
|
||||
else:
|
||||
handle = 'closed'
|
||||
return '<%s %s>' % (self.__class__.__name__, handle)
|
||||
|
||||
@property
|
||||
def handle(self):
|
||||
return self._handle
|
||||
|
||||
def fileno(self):
|
||||
if self._handle is None:
|
||||
raise ValueError("I/O operatioon on closed pipe")
|
||||
return self._handle
|
||||
|
||||
def close(self, *, CloseHandle=_winapi.CloseHandle):
|
||||
if self._handle is not None:
|
||||
CloseHandle(self._handle)
|
||||
self._handle = None
|
||||
|
||||
def __del__(self):
|
||||
if self._handle is not None:
|
||||
warnings.warn("unclosed %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.close()
|
||||
|
||||
|
||||
# Replacement for subprocess.Popen using overlapped pipe handles
|
||||
|
||||
|
||||
class Popen(subprocess.Popen):
|
||||
"""Replacement for subprocess.Popen using overlapped pipe handles.
|
||||
|
||||
The stdin, stdout, stderr are None or instances of PipeHandle.
|
||||
"""
|
||||
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
|
||||
assert not kwds.get('universal_newlines')
|
||||
assert kwds.get('bufsize', 0) == 0
|
||||
stdin_rfd = stdout_wfd = stderr_wfd = None
|
||||
stdin_wh = stdout_rh = stderr_rh = None
|
||||
if stdin == PIPE:
|
||||
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
|
||||
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
|
||||
else:
|
||||
stdin_rfd = stdin
|
||||
if stdout == PIPE:
|
||||
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
|
||||
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
|
||||
else:
|
||||
stdout_wfd = stdout
|
||||
if stderr == PIPE:
|
||||
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
|
||||
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
|
||||
elif stderr == STDOUT:
|
||||
stderr_wfd = stdout_wfd
|
||||
else:
|
||||
stderr_wfd = stderr
|
||||
try:
|
||||
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
|
||||
stderr=stderr_wfd, **kwds)
|
||||
except:
|
||||
for h in (stdin_wh, stdout_rh, stderr_rh):
|
||||
if h is not None:
|
||||
_winapi.CloseHandle(h)
|
||||
raise
|
||||
else:
|
||||
if stdin_wh is not None:
|
||||
self.stdin = PipeHandle(stdin_wh)
|
||||
if stdout_rh is not None:
|
||||
self.stdout = PipeHandle(stdout_rh)
|
||||
if stderr_rh is not None:
|
||||
self.stderr = PipeHandle(stderr_rh)
|
||||
finally:
|
||||
if stdin == PIPE:
|
||||
os.close(stdin_rfd)
|
||||
if stdout == PIPE:
|
||||
os.close(stdout_wfd)
|
||||
if stderr == PIPE:
|
||||
os.close(stderr_wfd)
|
645
third_party/python/Lib/asyncore.py
vendored
Normal file
645
third_party/python/Lib/asyncore.py
vendored
Normal file
|
@ -0,0 +1,645 @@
|
|||
# -*- Mode: Python -*-
|
||||
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
"""Basic infrastructure for asynchronous socket service clients and servers.
|
||||
|
||||
There are only two ways to have a program on a single processor do "more
|
||||
than one thing at a time". Multi-threaded programming is the simplest and
|
||||
most popular way to do it, but there is another very different technique,
|
||||
that lets you have nearly all the advantages of multi-threading, without
|
||||
actually using multiple threads. it's really only practical if your program
|
||||
is largely I/O bound. If your program is CPU bound, then pre-emptive
|
||||
scheduled threads are probably what you really need. Network servers are
|
||||
rarely CPU-bound, however.
|
||||
|
||||
If your operating system supports the select() system call in its I/O
|
||||
library (and nearly all do), then you can use it to juggle multiple
|
||||
communication channels at once; doing other work while your I/O is taking
|
||||
place in the "background." Although this strategy can seem strange and
|
||||
complex, especially at first, it is in many ways easier to understand and
|
||||
control than multi-threaded programming. The module documented here solves
|
||||
many of the difficult problems for you, making the task of building
|
||||
sophisticated high-performance network servers and clients a snap.
|
||||
"""
|
||||
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import os
|
||||
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
|
||||
ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
|
||||
errorcode
|
||||
|
||||
_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
|
||||
EBADF})
|
||||
|
||||
try:
|
||||
socket_map
|
||||
except NameError:
|
||||
socket_map = {}
|
||||
|
||||
def _strerror(err):
|
||||
try:
|
||||
return os.strerror(err)
|
||||
except (ValueError, OverflowError, NameError):
|
||||
if err in errorcode:
|
||||
return errorcode[err]
|
||||
return "Unknown error %s" %err
|
||||
|
||||
class ExitNow(Exception):
|
||||
pass
|
||||
|
||||
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
||||
|
||||
def read(obj):
|
||||
try:
|
||||
obj.handle_read_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def write(obj):
|
||||
try:
|
||||
obj.handle_write_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def _exception(obj):
|
||||
try:
|
||||
obj.handle_expt_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def readwrite(obj, flags):
|
||||
try:
|
||||
if flags & select.POLLIN:
|
||||
obj.handle_read_event()
|
||||
if flags & select.POLLOUT:
|
||||
obj.handle_write_event()
|
||||
if flags & select.POLLPRI:
|
||||
obj.handle_expt_event()
|
||||
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
||||
obj.handle_close()
|
||||
except OSError as e:
|
||||
if e.args[0] not in _DISCONNECTED:
|
||||
obj.handle_error()
|
||||
else:
|
||||
obj.handle_close()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def poll(timeout=0.0, map=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if map:
|
||||
r = []; w = []; e = []
|
||||
for fd, obj in list(map.items()):
|
||||
is_r = obj.readable()
|
||||
is_w = obj.writable()
|
||||
if is_r:
|
||||
r.append(fd)
|
||||
# accepting sockets should not be writable
|
||||
if is_w and not obj.accepting:
|
||||
w.append(fd)
|
||||
if is_r or is_w:
|
||||
e.append(fd)
|
||||
if [] == r == w == e:
|
||||
time.sleep(timeout)
|
||||
return
|
||||
|
||||
r, w, e = select.select(r, w, e, timeout)
|
||||
|
||||
for fd in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
read(obj)
|
||||
|
||||
for fd in w:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
write(obj)
|
||||
|
||||
for fd in e:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
_exception(obj)
|
||||
|
||||
def poll2(timeout=0.0, map=None):
|
||||
# Use the poll() support added to the select module in Python 2.0
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if timeout is not None:
|
||||
# timeout is in milliseconds
|
||||
timeout = int(timeout*1000)
|
||||
pollster = select.poll()
|
||||
if map:
|
||||
for fd, obj in list(map.items()):
|
||||
flags = 0
|
||||
if obj.readable():
|
||||
flags |= select.POLLIN | select.POLLPRI
|
||||
# accepting sockets should not be writable
|
||||
if obj.writable() and not obj.accepting:
|
||||
flags |= select.POLLOUT
|
||||
if flags:
|
||||
pollster.register(fd, flags)
|
||||
|
||||
r = pollster.poll(timeout)
|
||||
for fd, flags in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
readwrite(obj, flags)
|
||||
|
||||
poll3 = poll2 # Alias for backward compatibility
|
||||
|
||||
def loop(timeout=30.0, use_poll=False, map=None, count=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
|
||||
if use_poll and hasattr(select, 'poll'):
|
||||
poll_fun = poll2
|
||||
else:
|
||||
poll_fun = poll
|
||||
|
||||
if count is None:
|
||||
while map:
|
||||
poll_fun(timeout, map)
|
||||
|
||||
else:
|
||||
while map and count > 0:
|
||||
poll_fun(timeout, map)
|
||||
count = count - 1
|
||||
|
||||
class dispatcher:
|
||||
|
||||
debug = False
|
||||
connected = False
|
||||
accepting = False
|
||||
connecting = False
|
||||
closing = False
|
||||
addr = None
|
||||
ignore_log_types = frozenset({'warning'})
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
if map is None:
|
||||
self._map = socket_map
|
||||
else:
|
||||
self._map = map
|
||||
|
||||
self._fileno = None
|
||||
|
||||
if sock:
|
||||
# Set to nonblocking just to make sure for cases where we
|
||||
# get a socket from a blocking source.
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock, map)
|
||||
self.connected = True
|
||||
# The constructor no longer requires that the socket
|
||||
# passed be connected.
|
||||
try:
|
||||
self.addr = sock.getpeername()
|
||||
except OSError as err:
|
||||
if err.args[0] in (ENOTCONN, EINVAL):
|
||||
# To handle the case where we got an unconnected
|
||||
# socket.
|
||||
self.connected = False
|
||||
else:
|
||||
# The socket is broken in some unknown way, alert
|
||||
# the user and remove it from the map (to prevent
|
||||
# polling of broken sockets).
|
||||
self.del_channel(map)
|
||||
raise
|
||||
else:
|
||||
self.socket = None
|
||||
|
||||
def __repr__(self):
|
||||
status = [self.__class__.__module__+"."+self.__class__.__qualname__]
|
||||
if self.accepting and self.addr:
|
||||
status.append('listening')
|
||||
elif self.connected:
|
||||
status.append('connected')
|
||||
if self.addr is not None:
|
||||
try:
|
||||
status.append('%s:%d' % self.addr)
|
||||
except TypeError:
|
||||
status.append(repr(self.addr))
|
||||
return '<%s at %#x>' % (' '.join(status), id(self))
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
def add_channel(self, map=None):
|
||||
#self.log_info('adding channel %s' % self)
|
||||
if map is None:
|
||||
map = self._map
|
||||
map[self._fileno] = self
|
||||
|
||||
def del_channel(self, map=None):
|
||||
fd = self._fileno
|
||||
if map is None:
|
||||
map = self._map
|
||||
if fd in map:
|
||||
#self.log_info('closing channel %d:%s' % (fd, self))
|
||||
del map[fd]
|
||||
self._fileno = None
|
||||
|
||||
def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM):
|
||||
self.family_and_type = family, type
|
||||
sock = socket.socket(family, type)
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock)
|
||||
|
||||
def set_socket(self, sock, map=None):
|
||||
self.socket = sock
|
||||
## self.__dict__['socket'] = sock
|
||||
self._fileno = sock.fileno()
|
||||
self.add_channel(map)
|
||||
|
||||
def set_reuse_addr(self):
|
||||
# try to re-use a server port if possible
|
||||
try:
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
||||
self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR) | 1
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# ==================================================
|
||||
# predicates for select()
|
||||
# these are used as filters for the lists of sockets
|
||||
# to pass to select().
|
||||
# ==================================================
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
# ==================================================
|
||||
# socket object methods.
|
||||
# ==================================================
|
||||
|
||||
def listen(self, num):
|
||||
self.accepting = True
|
||||
if os.name == 'nt' and num > 5:
|
||||
num = 5
|
||||
return self.socket.listen(num)
|
||||
|
||||
def bind(self, addr):
|
||||
self.addr = addr
|
||||
return self.socket.bind(addr)
|
||||
|
||||
def connect(self, address):
|
||||
self.connected = False
|
||||
self.connecting = True
|
||||
err = self.socket.connect_ex(address)
|
||||
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
|
||||
or err == EINVAL and os.name == 'nt':
|
||||
self.addr = address
|
||||
return
|
||||
if err in (0, EISCONN):
|
||||
self.addr = address
|
||||
self.handle_connect_event()
|
||||
else:
|
||||
raise OSError(err, errorcode[err])
|
||||
|
||||
def accept(self):
|
||||
# XXX can return either an address pair or None
|
||||
try:
|
||||
conn, addr = self.socket.accept()
|
||||
except TypeError:
|
||||
return None
|
||||
except OSError as why:
|
||||
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return conn, addr
|
||||
|
||||
def send(self, data):
|
||||
try:
|
||||
result = self.socket.send(data)
|
||||
return result
|
||||
except OSError as why:
|
||||
if why.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
elif why.args[0] in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return 0
|
||||
else:
|
||||
raise
|
||||
|
||||
def recv(self, buffer_size):
|
||||
try:
|
||||
data = self.socket.recv(buffer_size)
|
||||
if not data:
|
||||
# a closed connection is indicated by signaling
|
||||
# a read condition, and having recv() return 0.
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
return data
|
||||
except OSError as why:
|
||||
# winsock sometimes raises ENOTCONN
|
||||
if why.args[0] in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return b''
|
||||
else:
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
self.connected = False
|
||||
self.accepting = False
|
||||
self.connecting = False
|
||||
self.del_channel()
|
||||
if self.socket is not None:
|
||||
try:
|
||||
self.socket.close()
|
||||
except OSError as why:
|
||||
if why.args[0] not in (ENOTCONN, EBADF):
|
||||
raise
|
||||
|
||||
# log and log_info may be overridden to provide more sophisticated
|
||||
# logging and warning methods. In general, log is for 'hit' logging
|
||||
# and 'log_info' is for informational, warning and error logging.
|
||||
|
||||
def log(self, message):
|
||||
sys.stderr.write('log: %s\n' % str(message))
|
||||
|
||||
def log_info(self, message, type='info'):
|
||||
if type not in self.ignore_log_types:
|
||||
print('%s: %s' % (type, message))
|
||||
|
||||
def handle_read_event(self):
|
||||
if self.accepting:
|
||||
# accepting sockets are never connected, they "spawn" new
|
||||
# sockets that are connected
|
||||
self.handle_accept()
|
||||
elif not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_read()
|
||||
else:
|
||||
self.handle_read()
|
||||
|
||||
def handle_connect_event(self):
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
raise OSError(err, _strerror(err))
|
||||
self.handle_connect()
|
||||
self.connected = True
|
||||
self.connecting = False
|
||||
|
||||
def handle_write_event(self):
|
||||
if self.accepting:
|
||||
# Accepting sockets shouldn't get a write event.
|
||||
# We will pretend it didn't happen.
|
||||
return
|
||||
|
||||
if not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_write()
|
||||
|
||||
def handle_expt_event(self):
|
||||
# handle_expt_event() is called if there might be an error on the
|
||||
# socket, or if there is OOB data
|
||||
# check for the error condition first
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
# we can get here when select.select() says that there is an
|
||||
# exceptional condition on the socket
|
||||
# since there is an error, we'll go ahead and close the socket
|
||||
# like we would in a subclassed handle_read() that received no
|
||||
# data
|
||||
self.handle_close()
|
||||
else:
|
||||
self.handle_expt()
|
||||
|
||||
def handle_error(self):
|
||||
nil, t, v, tbinfo = compact_traceback()
|
||||
|
||||
# sometimes a user repr method will crash.
|
||||
try:
|
||||
self_repr = repr(self)
|
||||
except:
|
||||
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
||||
|
||||
self.log_info(
|
||||
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
||||
self_repr,
|
||||
t,
|
||||
v,
|
||||
tbinfo
|
||||
),
|
||||
'error'
|
||||
)
|
||||
self.handle_close()
|
||||
|
||||
def handle_expt(self):
|
||||
self.log_info('unhandled incoming priority event', 'warning')
|
||||
|
||||
def handle_read(self):
|
||||
self.log_info('unhandled read event', 'warning')
|
||||
|
||||
def handle_write(self):
|
||||
self.log_info('unhandled write event', 'warning')
|
||||
|
||||
def handle_connect(self):
|
||||
self.log_info('unhandled connect event', 'warning')
|
||||
|
||||
def handle_accept(self):
|
||||
pair = self.accept()
|
||||
if pair is not None:
|
||||
self.handle_accepted(*pair)
|
||||
|
||||
def handle_accepted(self, sock, addr):
|
||||
sock.close()
|
||||
self.log_info('unhandled accepted event', 'warning')
|
||||
|
||||
def handle_close(self):
|
||||
self.log_info('unhandled close event', 'warning')
|
||||
self.close()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# adds simple buffered output capability, useful for simple clients.
|
||||
# [for more sophisticated usage use asynchat.async_chat]
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class dispatcher_with_send(dispatcher):
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
dispatcher.__init__(self, sock, map)
|
||||
self.out_buffer = b''
|
||||
|
||||
def initiate_send(self):
|
||||
num_sent = 0
|
||||
num_sent = dispatcher.send(self, self.out_buffer[:65536])
|
||||
self.out_buffer = self.out_buffer[num_sent:]
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def writable(self):
|
||||
return (not self.connected) or len(self.out_buffer)
|
||||
|
||||
def send(self, data):
|
||||
if self.debug:
|
||||
self.log_info('sending %s' % repr(data))
|
||||
self.out_buffer = self.out_buffer + data
|
||||
self.initiate_send()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# used for debugging.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def compact_traceback():
|
||||
t, v, tb = sys.exc_info()
|
||||
tbinfo = []
|
||||
if not tb: # Must have a traceback
|
||||
raise AssertionError("traceback does not exist")
|
||||
while tb:
|
||||
tbinfo.append((
|
||||
tb.tb_frame.f_code.co_filename,
|
||||
tb.tb_frame.f_code.co_name,
|
||||
str(tb.tb_lineno)
|
||||
))
|
||||
tb = tb.tb_next
|
||||
|
||||
# just to be safe
|
||||
del tb
|
||||
|
||||
file, function, line = tbinfo[-1]
|
||||
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
||||
return (file, function, line), t, v, info
|
||||
|
||||
def close_all(map=None, ignore_all=False):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
for x in list(map.values()):
|
||||
try:
|
||||
x.close()
|
||||
except OSError as x:
|
||||
if x.args[0] == EBADF:
|
||||
pass
|
||||
elif not ignore_all:
|
||||
raise
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
if not ignore_all:
|
||||
raise
|
||||
map.clear()
|
||||
|
||||
# Asynchronous File I/O:
|
||||
#
|
||||
# After a little research (reading man pages on various unixen, and
|
||||
# digging through the linux kernel), I've determined that select()
|
||||
# isn't meant for doing asynchronous file i/o.
|
||||
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
||||
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
||||
# will be sitting in memory for us already when we go to read it.
|
||||
#
|
||||
# What other OS's (besides NT) support async file i/o? [VMS?]
|
||||
#
|
||||
# Regardless, this is useful for pipes, and stdin/stdout...
|
||||
|
||||
if os.name == 'posix':
|
||||
class file_wrapper:
|
||||
# Here we override just enough to make a file
|
||||
# look like a socket for the purposes of asyncore.
|
||||
# The passed fd is automatically os.dup()'d
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = os.dup(fd)
|
||||
|
||||
def __del__(self):
|
||||
if self.fd >= 0:
|
||||
warnings.warn("unclosed file %r" % self, ResourceWarning,
|
||||
source=self)
|
||||
self.close()
|
||||
|
||||
def recv(self, *args):
|
||||
return os.read(self.fd, *args)
|
||||
|
||||
def send(self, *args):
|
||||
return os.write(self.fd, *args)
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
if (level == socket.SOL_SOCKET and
|
||||
optname == socket.SO_ERROR and
|
||||
not buflen):
|
||||
return 0
|
||||
raise NotImplementedError("Only asyncore specific behaviour "
|
||||
"implemented.")
|
||||
|
||||
read = recv
|
||||
write = send
|
||||
|
||||
def close(self):
|
||||
if self.fd < 0:
|
||||
return
|
||||
fd = self.fd
|
||||
self.fd = -1
|
||||
os.close(fd)
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
|
||||
class file_dispatcher(dispatcher):
|
||||
|
||||
def __init__(self, fd, map=None):
|
||||
dispatcher.__init__(self, None, map)
|
||||
self.connected = True
|
||||
try:
|
||||
fd = fd.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.set_file(fd)
|
||||
# set it to non-blocking mode
|
||||
os.set_blocking(fd, False)
|
||||
|
||||
def set_file(self, fd):
|
||||
self.socket = file_wrapper(fd)
|
||||
self._fileno = self.socket.fileno()
|
||||
self.add_channel()
|
595
third_party/python/Lib/base64.py
vendored
Executable file
595
third_party/python/Lib/base64.py
vendored
Executable file
|
@ -0,0 +1,595 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings"""
|
||||
|
||||
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
|
||||
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
|
||||
# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
|
||||
|
||||
import re
|
||||
import struct
|
||||
import binascii
|
||||
|
||||
|
||||
__all__ = [
|
||||
# Legacy interface exports traditional RFC 2045 Base64 encodings
|
||||
'encode', 'decode', 'encodebytes', 'decodebytes',
|
||||
# Generalized interface for other encodings
|
||||
'b64encode', 'b64decode', 'b32encode', 'b32decode',
|
||||
'b16encode', 'b16decode',
|
||||
# Base85 and Ascii85 encodings
|
||||
'b85encode', 'b85decode', 'a85encode', 'a85decode',
|
||||
# Standard Base64 encoding
|
||||
'standard_b64encode', 'standard_b64decode',
|
||||
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
|
||||
# starting at:
|
||||
#
|
||||
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
|
||||
'urlsafe_b64encode', 'urlsafe_b64decode',
|
||||
]
|
||||
|
||||
|
||||
bytes_types = (bytes, bytearray) # Types acceptable as binary data
|
||||
|
||||
def _bytes_from_decode_data(s):
|
||||
if isinstance(s, str):
|
||||
try:
|
||||
return s.encode('ascii')
|
||||
except UnicodeEncodeError:
|
||||
raise ValueError('string argument should contain only ASCII characters')
|
||||
if isinstance(s, bytes_types):
|
||||
return s
|
||||
try:
|
||||
return memoryview(s).tobytes()
|
||||
except TypeError:
|
||||
raise TypeError("argument should be a bytes-like object or ASCII "
|
||||
"string, not %r" % s.__class__.__name__) from None
|
||||
|
||||
|
||||
# Base64 encoding/decoding uses binascii
|
||||
|
||||
def b64encode(s, altchars=None):
|
||||
"""Encode the bytes-like object s using Base64 and return a bytes object.
|
||||
|
||||
Optional altchars should be a byte string of length 2 which specifies an
|
||||
alternative alphabet for the '+' and '/' characters. This allows an
|
||||
application to e.g. generate url or filesystem safe Base64 strings.
|
||||
"""
|
||||
encoded = binascii.b2a_base64(s, newline=False)
|
||||
if altchars is not None:
|
||||
assert len(altchars) == 2, repr(altchars)
|
||||
return encoded.translate(bytes.maketrans(b'+/', altchars))
|
||||
return encoded
|
||||
|
||||
|
||||
def b64decode(s, altchars=None, validate=False):
|
||||
"""Decode the Base64 encoded bytes-like object or ASCII string s.
|
||||
|
||||
Optional altchars must be a bytes-like object or ASCII string of length 2
|
||||
which specifies the alternative alphabet used instead of the '+' and '/'
|
||||
characters.
|
||||
|
||||
The result is returned as a bytes object. A binascii.Error is raised if
|
||||
s is incorrectly padded.
|
||||
|
||||
If validate is False (the default), characters that are neither in the
|
||||
normal base-64 alphabet nor the alternative alphabet are discarded prior
|
||||
to the padding check. If validate is True, these non-alphabet characters
|
||||
in the input result in a binascii.Error.
|
||||
"""
|
||||
s = _bytes_from_decode_data(s)
|
||||
if altchars is not None:
|
||||
altchars = _bytes_from_decode_data(altchars)
|
||||
assert len(altchars) == 2, repr(altchars)
|
||||
s = s.translate(bytes.maketrans(altchars, b'+/'))
|
||||
if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
|
||||
raise binascii.Error('Non-base64 digit found')
|
||||
return binascii.a2b_base64(s)
|
||||
|
||||
|
||||
def standard_b64encode(s):
|
||||
"""Encode bytes-like object s using the standard Base64 alphabet.
|
||||
|
||||
The result is returned as a bytes object.
|
||||
"""
|
||||
return b64encode(s)
|
||||
|
||||
def standard_b64decode(s):
|
||||
"""Decode bytes encoded with the standard Base64 alphabet.
|
||||
|
||||
Argument s is a bytes-like object or ASCII string to decode. The result
|
||||
is returned as a bytes object. A binascii.Error is raised if the input
|
||||
is incorrectly padded. Characters that are not in the standard alphabet
|
||||
are discarded prior to the padding check.
|
||||
"""
|
||||
return b64decode(s)
|
||||
|
||||
|
||||
_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')
|
||||
_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
|
||||
|
||||
def urlsafe_b64encode(s):
|
||||
"""Encode bytes using the URL- and filesystem-safe Base64 alphabet.
|
||||
|
||||
Argument s is a bytes-like object to encode. The result is returned as a
|
||||
bytes object. The alphabet uses '-' instead of '+' and '_' instead of
|
||||
'/'.
|
||||
"""
|
||||
return b64encode(s).translate(_urlsafe_encode_translation)
|
||||
|
||||
def urlsafe_b64decode(s):
|
||||
"""Decode bytes using the URL- and filesystem-safe Base64 alphabet.
|
||||
|
||||
Argument s is a bytes-like object or ASCII string to decode. The result
|
||||
is returned as a bytes object. A binascii.Error is raised if the input
|
||||
is incorrectly padded. Characters that are not in the URL-safe base-64
|
||||
alphabet, and are not a plus '+' or slash '/', are discarded prior to the
|
||||
padding check.
|
||||
|
||||
The alphabet uses '-' instead of '+' and '_' instead of '/'.
|
||||
"""
|
||||
s = _bytes_from_decode_data(s)
|
||||
s = s.translate(_urlsafe_decode_translation)
|
||||
return b64decode(s)
|
||||
|
||||
|
||||
|
||||
# Base32 encoding/decoding must be done in Python
|
||||
_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
|
||||
_b32tab2 = None
|
||||
_b32rev = None
|
||||
|
||||
def b32encode(s):
|
||||
"""Encode the bytes-like object s using Base32 and return a bytes object.
|
||||
"""
|
||||
global _b32tab2
|
||||
# Delay the initialization of the table to not waste memory
|
||||
# if the function is never called
|
||||
if _b32tab2 is None:
|
||||
b32tab = [bytes((i,)) for i in _b32alphabet]
|
||||
_b32tab2 = [a + b for a in b32tab for b in b32tab]
|
||||
b32tab = None
|
||||
|
||||
if not isinstance(s, bytes_types):
|
||||
s = memoryview(s).tobytes()
|
||||
leftover = len(s) % 5
|
||||
# Pad the last quantum with zero bits if necessary
|
||||
if leftover:
|
||||
s = s + b'\0' * (5 - leftover) # Don't use += !
|
||||
encoded = bytearray()
|
||||
from_bytes = int.from_bytes
|
||||
b32tab2 = _b32tab2
|
||||
for i in range(0, len(s), 5):
|
||||
c = from_bytes(s[i: i + 5], 'big')
|
||||
encoded += (b32tab2[c >> 30] + # bits 1 - 10
|
||||
b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20
|
||||
b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30
|
||||
b32tab2[c & 0x3ff] # bits 31 - 40
|
||||
)
|
||||
# Adjust for any leftover partial quanta
|
||||
if leftover == 1:
|
||||
encoded[-6:] = b'======'
|
||||
elif leftover == 2:
|
||||
encoded[-4:] = b'===='
|
||||
elif leftover == 3:
|
||||
encoded[-3:] = b'==='
|
||||
elif leftover == 4:
|
||||
encoded[-1:] = b'='
|
||||
return bytes(encoded)
|
||||
|
||||
def b32decode(s, casefold=False, map01=None):
|
||||
"""Decode the Base32 encoded bytes-like object or ASCII string s.
|
||||
|
||||
Optional casefold is a flag specifying whether a lowercase alphabet is
|
||||
acceptable as input. For security purposes, the default is False.
|
||||
|
||||
RFC 3548 allows for optional mapping of the digit 0 (zero) to the
|
||||
letter O (oh), and for optional mapping of the digit 1 (one) to
|
||||
either the letter I (eye) or letter L (el). The optional argument
|
||||
map01 when not None, specifies which letter the digit 1 should be
|
||||
mapped to (when map01 is not None, the digit 0 is always mapped to
|
||||
the letter O). For security purposes the default is None, so that
|
||||
0 and 1 are not allowed in the input.
|
||||
|
||||
The result is returned as a bytes object. A binascii.Error is raised if
|
||||
the input is incorrectly padded or if there are non-alphabet
|
||||
characters present in the input.
|
||||
"""
|
||||
global _b32rev
|
||||
# Delay the initialization of the table to not waste memory
|
||||
# if the function is never called
|
||||
if _b32rev is None:
|
||||
_b32rev = {v: k for k, v in enumerate(_b32alphabet)}
|
||||
s = _bytes_from_decode_data(s)
|
||||
if len(s) % 8:
|
||||
raise binascii.Error('Incorrect padding')
|
||||
# Handle section 2.4 zero and one mapping. The flag map01 will be either
|
||||
# False, or the character to map the digit 1 (one) to. It should be
|
||||
# either L (el) or I (eye).
|
||||
if map01 is not None:
|
||||
map01 = _bytes_from_decode_data(map01)
|
||||
assert len(map01) == 1, repr(map01)
|
||||
s = s.translate(bytes.maketrans(b'01', b'O' + map01))
|
||||
if casefold:
|
||||
s = s.upper()
|
||||
# Strip off pad characters from the right. We need to count the pad
|
||||
# characters because this will tell us how many null bytes to remove from
|
||||
# the end of the decoded string.
|
||||
l = len(s)
|
||||
s = s.rstrip(b'=')
|
||||
padchars = l - len(s)
|
||||
# Now decode the full quanta
|
||||
decoded = bytearray()
|
||||
b32rev = _b32rev
|
||||
for i in range(0, len(s), 8):
|
||||
quanta = s[i: i + 8]
|
||||
acc = 0
|
||||
try:
|
||||
for c in quanta:
|
||||
acc = (acc << 5) + b32rev[c]
|
||||
except KeyError:
|
||||
raise binascii.Error('Non-base32 digit found') from None
|
||||
decoded += acc.to_bytes(5, 'big')
|
||||
# Process the last, partial quanta
|
||||
if l % 8 or padchars not in {0, 1, 3, 4, 6}:
|
||||
raise binascii.Error('Incorrect padding')
|
||||
if padchars and decoded:
|
||||
acc <<= 5 * padchars
|
||||
last = acc.to_bytes(5, 'big')
|
||||
leftover = (43 - 5 * padchars) // 8 # 1: 4, 3: 3, 4: 2, 6: 1
|
||||
decoded[-5:] = last[:leftover]
|
||||
return bytes(decoded)
|
||||
|
||||
|
||||
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
|
||||
# lowercase. The RFC also recommends against accepting input case
|
||||
# insensitively.
|
||||
def b16encode(s):
|
||||
"""Encode the bytes-like object s using Base16 and return a bytes object.
|
||||
"""
|
||||
return binascii.hexlify(s).upper()
|
||||
|
||||
|
||||
def b16decode(s, casefold=False):
|
||||
"""Decode the Base16 encoded bytes-like object or ASCII string s.
|
||||
|
||||
Optional casefold is a flag specifying whether a lowercase alphabet is
|
||||
acceptable as input. For security purposes, the default is False.
|
||||
|
||||
The result is returned as a bytes object. A binascii.Error is raised if
|
||||
s is incorrectly padded or if there are non-alphabet characters present
|
||||
in the input.
|
||||
"""
|
||||
s = _bytes_from_decode_data(s)
|
||||
if casefold:
|
||||
s = s.upper()
|
||||
if re.search(b'[^0-9A-F]', s):
|
||||
raise binascii.Error('Non-base16 digit found')
|
||||
return binascii.unhexlify(s)
|
||||
|
||||
#
|
||||
# Ascii85 encoding/decoding
|
||||
#
|
||||
|
||||
_a85chars = None
|
||||
_a85chars2 = None
|
||||
_A85START = b"<~"
|
||||
_A85END = b"~>"
|
||||
|
||||
def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False):
|
||||
# Helper function for a85encode and b85encode
|
||||
if not isinstance(b, bytes_types):
|
||||
b = memoryview(b).tobytes()
|
||||
|
||||
padding = (-len(b)) % 4
|
||||
if padding:
|
||||
b = b + b'\0' * padding
|
||||
words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b)
|
||||
|
||||
chunks = [b'z' if foldnuls and not word else
|
||||
b'y' if foldspaces and word == 0x20202020 else
|
||||
(chars2[word // 614125] +
|
||||
chars2[word // 85 % 7225] +
|
||||
chars[word % 85])
|
||||
for word in words]
|
||||
|
||||
if padding and not pad:
|
||||
if chunks[-1] == b'z':
|
||||
chunks[-1] = chars[0] * 5
|
||||
chunks[-1] = chunks[-1][:-padding]
|
||||
|
||||
return b''.join(chunks)
|
||||
|
||||
def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False):
|
||||
"""Encode bytes-like object b using Ascii85 and return a bytes object.
|
||||
|
||||
foldspaces is an optional flag that uses the special short sequence 'y'
|
||||
instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This
|
||||
feature is not supported by the "standard" Adobe encoding.
|
||||
|
||||
wrapcol controls whether the output should have newline (b'\\n') characters
|
||||
added to it. If this is non-zero, each output line will be at most this
|
||||
many characters long.
|
||||
|
||||
pad controls whether the input is padded to a multiple of 4 before
|
||||
encoding. Note that the btoa implementation always pads.
|
||||
|
||||
adobe controls whether the encoded byte sequence is framed with <~ and ~>,
|
||||
which is used by the Adobe implementation.
|
||||
"""
|
||||
global _a85chars, _a85chars2
|
||||
# Delay the initialization of tables to not waste memory
|
||||
# if the function is never called
|
||||
if _a85chars is None:
|
||||
_a85chars = [bytes((i,)) for i in range(33, 118)]
|
||||
_a85chars2 = [(a + b) for a in _a85chars for b in _a85chars]
|
||||
|
||||
result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces)
|
||||
|
||||
if adobe:
|
||||
result = _A85START + result
|
||||
if wrapcol:
|
||||
wrapcol = max(2 if adobe else 1, wrapcol)
|
||||
chunks = [result[i: i + wrapcol]
|
||||
for i in range(0, len(result), wrapcol)]
|
||||
if adobe:
|
||||
if len(chunks[-1]) + 2 > wrapcol:
|
||||
chunks.append(b'')
|
||||
result = b'\n'.join(chunks)
|
||||
if adobe:
|
||||
result += _A85END
|
||||
|
||||
return result
|
||||
|
||||
def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'):
|
||||
"""Decode the Ascii85 encoded bytes-like object or ASCII string b.
|
||||
|
||||
foldspaces is a flag that specifies whether the 'y' short sequence should be
|
||||
accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is
|
||||
not supported by the "standard" Adobe encoding.
|
||||
|
||||
adobe controls whether the input sequence is in Adobe Ascii85 format (i.e.
|
||||
is framed with <~ and ~>).
|
||||
|
||||
ignorechars should be a byte string containing characters to ignore from the
|
||||
input. This should only contain whitespace characters, and by default
|
||||
contains all whitespace characters in ASCII.
|
||||
|
||||
The result is returned as a bytes object.
|
||||
"""
|
||||
b = _bytes_from_decode_data(b)
|
||||
if adobe:
|
||||
if not b.endswith(_A85END):
|
||||
raise ValueError(
|
||||
"Ascii85 encoded byte sequences must end "
|
||||
"with {!r}".format(_A85END)
|
||||
)
|
||||
if b.startswith(_A85START):
|
||||
b = b[2:-2] # Strip off start/end markers
|
||||
else:
|
||||
b = b[:-2]
|
||||
#
|
||||
# We have to go through this stepwise, so as to ignore spaces and handle
|
||||
# special short sequences
|
||||
#
|
||||
packI = struct.Struct('!I').pack
|
||||
decoded = []
|
||||
decoded_append = decoded.append
|
||||
curr = []
|
||||
curr_append = curr.append
|
||||
curr_clear = curr.clear
|
||||
for x in b + b'u' * 4:
|
||||
if b'!'[0] <= x <= b'u'[0]:
|
||||
curr_append(x)
|
||||
if len(curr) == 5:
|
||||
acc = 0
|
||||
for x in curr:
|
||||
acc = 85 * acc + (x - 33)
|
||||
try:
|
||||
decoded_append(packI(acc))
|
||||
except struct.error:
|
||||
raise ValueError('Ascii85 overflow') from None
|
||||
curr_clear()
|
||||
elif x == b'z'[0]:
|
||||
if curr:
|
||||
raise ValueError('z inside Ascii85 5-tuple')
|
||||
decoded_append(b'\0\0\0\0')
|
||||
elif foldspaces and x == b'y'[0]:
|
||||
if curr:
|
||||
raise ValueError('y inside Ascii85 5-tuple')
|
||||
decoded_append(b'\x20\x20\x20\x20')
|
||||
elif x in ignorechars:
|
||||
# Skip whitespace
|
||||
continue
|
||||
else:
|
||||
raise ValueError('Non-Ascii85 digit found: %c' % x)
|
||||
|
||||
result = b''.join(decoded)
|
||||
padding = 4 - len(curr)
|
||||
if padding:
|
||||
# Throw away the extra padding
|
||||
result = result[:-padding]
|
||||
return result
|
||||
|
||||
# The following code is originally taken (with permission) from Mercurial
|
||||
|
||||
_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
|
||||
_b85chars = None
|
||||
_b85chars2 = None
|
||||
_b85dec = None
|
||||
|
||||
def b85encode(b, pad=False):
|
||||
"""Encode bytes-like object b in base85 format and return a bytes object.
|
||||
|
||||
If pad is true, the input is padded with b'\\0' so its length is a multiple of
|
||||
4 bytes before encoding.
|
||||
"""
|
||||
global _b85chars, _b85chars2
|
||||
# Delay the initialization of tables to not waste memory
|
||||
# if the function is never called
|
||||
if _b85chars is None:
|
||||
_b85chars = [bytes((i,)) for i in _b85alphabet]
|
||||
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
|
||||
return _85encode(b, _b85chars, _b85chars2, pad)
|
||||
|
||||
def b85decode(b):
|
||||
"""Decode the base85-encoded bytes-like object or ASCII string b
|
||||
|
||||
The result is returned as a bytes object.
|
||||
"""
|
||||
global _b85dec
|
||||
# Delay the initialization of tables to not waste memory
|
||||
# if the function is never called
|
||||
if _b85dec is None:
|
||||
_b85dec = [None] * 256
|
||||
for i, c in enumerate(_b85alphabet):
|
||||
_b85dec[c] = i
|
||||
|
||||
b = _bytes_from_decode_data(b)
|
||||
padding = (-len(b)) % 5
|
||||
b = b + b'~' * padding
|
||||
out = []
|
||||
packI = struct.Struct('!I').pack
|
||||
for i in range(0, len(b), 5):
|
||||
chunk = b[i:i + 5]
|
||||
acc = 0
|
||||
try:
|
||||
for c in chunk:
|
||||
acc = acc * 85 + _b85dec[c]
|
||||
except TypeError:
|
||||
for j, c in enumerate(chunk):
|
||||
if _b85dec[c] is None:
|
||||
raise ValueError('bad base85 character at position %d'
|
||||
% (i + j)) from None
|
||||
raise
|
||||
try:
|
||||
out.append(packI(acc))
|
||||
except struct.error:
|
||||
raise ValueError('base85 overflow in hunk starting at byte %d'
|
||||
% i) from None
|
||||
|
||||
result = b''.join(out)
|
||||
if padding:
|
||||
result = result[:-padding]
|
||||
return result
|
||||
|
||||
# Legacy interface. This code could be cleaned up since I don't believe
|
||||
# binascii has any line length limitations. It just doesn't seem worth it
|
||||
# though. The files should be opened in binary mode.
|
||||
|
||||
MAXLINESIZE = 76 # Excluding the CRLF
|
||||
MAXBINSIZE = (MAXLINESIZE//4)*3
|
||||
|
||||
def encode(input, output):
|
||||
"""Encode a file; input and output are binary files."""
|
||||
while True:
|
||||
s = input.read(MAXBINSIZE)
|
||||
if not s:
|
||||
break
|
||||
while len(s) < MAXBINSIZE:
|
||||
ns = input.read(MAXBINSIZE-len(s))
|
||||
if not ns:
|
||||
break
|
||||
s += ns
|
||||
line = binascii.b2a_base64(s)
|
||||
output.write(line)
|
||||
|
||||
|
||||
def decode(input, output):
|
||||
"""Decode a file; input and output are binary files."""
|
||||
while True:
|
||||
line = input.readline()
|
||||
if not line:
|
||||
break
|
||||
s = binascii.a2b_base64(line)
|
||||
output.write(s)
|
||||
|
||||
def _input_type_check(s):
|
||||
try:
|
||||
m = memoryview(s)
|
||||
except TypeError as err:
|
||||
msg = "expected bytes-like object, not %s" % s.__class__.__name__
|
||||
raise TypeError(msg) from err
|
||||
if m.format not in ('c', 'b', 'B'):
|
||||
msg = ("expected single byte elements, not %r from %s" %
|
||||
(m.format, s.__class__.__name__))
|
||||
raise TypeError(msg)
|
||||
if m.ndim != 1:
|
||||
msg = ("expected 1-D data, not %d-D data from %s" %
|
||||
(m.ndim, s.__class__.__name__))
|
||||
raise TypeError(msg)
|
||||
|
||||
|
||||
def encodebytes(s):
|
||||
"""Encode a bytestring into a bytes object containing multiple lines
|
||||
of base-64 data."""
|
||||
_input_type_check(s)
|
||||
pieces = []
|
||||
for i in range(0, len(s), MAXBINSIZE):
|
||||
chunk = s[i : i + MAXBINSIZE]
|
||||
pieces.append(binascii.b2a_base64(chunk))
|
||||
return b"".join(pieces)
|
||||
|
||||
def encodestring(s):
|
||||
"""Legacy alias of encodebytes()."""
|
||||
import warnings
|
||||
warnings.warn("encodestring() is a deprecated alias since 3.1, "
|
||||
"use encodebytes()",
|
||||
DeprecationWarning, 2)
|
||||
return encodebytes(s)
|
||||
|
||||
|
||||
def decodebytes(s):
|
||||
"""Decode a bytestring of base-64 data into a bytes object."""
|
||||
_input_type_check(s)
|
||||
return binascii.a2b_base64(s)
|
||||
|
||||
def decodestring(s):
|
||||
"""Legacy alias of decodebytes()."""
|
||||
import warnings
|
||||
warnings.warn("decodestring() is a deprecated alias since Python 3.1, "
|
||||
"use decodebytes()",
|
||||
DeprecationWarning, 2)
|
||||
return decodebytes(s)
|
||||
|
||||
|
||||
# Usable as a script...
|
||||
def main():
|
||||
"""Small main program"""
|
||||
import sys, getopt
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'deut')
|
||||
except getopt.error as msg:
|
||||
sys.stdout = sys.stderr
|
||||
print(msg)
|
||||
print("""usage: %s [-d|-e|-u|-t] [file|-]
|
||||
-d, -u: decode
|
||||
-e: encode (default)
|
||||
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
|
||||
sys.exit(2)
|
||||
func = encode
|
||||
for o, a in opts:
|
||||
if o == '-e': func = encode
|
||||
if o == '-d': func = decode
|
||||
if o == '-u': func = decode
|
||||
if o == '-t': test(); return
|
||||
if args and args[0] != '-':
|
||||
with open(args[0], 'rb') as f:
|
||||
func(f, sys.stdout.buffer)
|
||||
else:
|
||||
func(sys.stdin.buffer, sys.stdout.buffer)
|
||||
|
||||
|
||||
def test():
|
||||
s0 = b"Aladdin:open sesame"
|
||||
print(repr(s0))
|
||||
s1 = encodebytes(s0)
|
||||
print(repr(s1))
|
||||
s2 = decodebytes(s1)
|
||||
print(repr(s2))
|
||||
assert s0 == s2
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
676
third_party/python/Lib/bdb.py
vendored
Normal file
676
third_party/python/Lib/bdb.py
vendored
Normal file
|
@ -0,0 +1,676 @@
|
|||
"""Debugger basics"""
|
||||
|
||||
import fnmatch
|
||||
import sys
|
||||
import os
|
||||
from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR
|
||||
|
||||
__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
|
||||
|
||||
GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR
|
||||
|
||||
|
||||
class BdbQuit(Exception):
|
||||
"""Exception to give up completely."""
|
||||
|
||||
|
||||
class Bdb:
|
||||
"""Generic Python debugger base class.
|
||||
|
||||
This class takes care of details of the trace facility;
|
||||
a derived class should implement user interaction.
|
||||
The standard debugger class (pdb.Pdb) is an example.
|
||||
"""
|
||||
|
||||
def __init__(self, skip=None):
|
||||
self.skip = set(skip) if skip else None
|
||||
self.breaks = {}
|
||||
self.fncache = {}
|
||||
self.frame_returning = None
|
||||
|
||||
def canonic(self, filename):
|
||||
if filename == "<" + filename[1:-1] + ">":
|
||||
return filename
|
||||
canonic = self.fncache.get(filename)
|
||||
if not canonic:
|
||||
canonic = os.path.abspath(filename)
|
||||
canonic = os.path.normcase(canonic)
|
||||
self.fncache[filename] = canonic
|
||||
return canonic
|
||||
|
||||
def reset(self):
|
||||
import linecache
|
||||
linecache.checkcache()
|
||||
self.botframe = None
|
||||
self._set_stopinfo(None, None)
|
||||
|
||||
def trace_dispatch(self, frame, event, arg):
|
||||
if self.quitting:
|
||||
return # None
|
||||
if event == 'line':
|
||||
return self.dispatch_line(frame)
|
||||
if event == 'call':
|
||||
return self.dispatch_call(frame, arg)
|
||||
if event == 'return':
|
||||
return self.dispatch_return(frame, arg)
|
||||
if event == 'exception':
|
||||
return self.dispatch_exception(frame, arg)
|
||||
if event == 'c_call':
|
||||
return self.trace_dispatch
|
||||
if event == 'c_exception':
|
||||
return self.trace_dispatch
|
||||
if event == 'c_return':
|
||||
return self.trace_dispatch
|
||||
print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_line(self, frame):
|
||||
if self.stop_here(frame) or self.break_here(frame):
|
||||
self.user_line(frame)
|
||||
if self.quitting: raise BdbQuit
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_call(self, frame, arg):
|
||||
# XXX 'arg' is no longer used
|
||||
if self.botframe is None:
|
||||
# First call of dispatch since reset()
|
||||
self.botframe = frame.f_back # (CT) Note that this may also be None!
|
||||
return self.trace_dispatch
|
||||
if not (self.stop_here(frame) or self.break_anywhere(frame)):
|
||||
# No need to trace this function
|
||||
return # None
|
||||
# Ignore call events in generator except when stepping.
|
||||
if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
|
||||
return self.trace_dispatch
|
||||
self.user_call(frame, arg)
|
||||
if self.quitting: raise BdbQuit
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_return(self, frame, arg):
|
||||
if self.stop_here(frame) or frame == self.returnframe:
|
||||
# Ignore return events in generator except when stepping.
|
||||
if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
|
||||
return self.trace_dispatch
|
||||
try:
|
||||
self.frame_returning = frame
|
||||
self.user_return(frame, arg)
|
||||
finally:
|
||||
self.frame_returning = None
|
||||
if self.quitting: raise BdbQuit
|
||||
# The user issued a 'next' or 'until' command.
|
||||
if self.stopframe is frame and self.stoplineno != -1:
|
||||
self._set_stopinfo(None, None)
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_exception(self, frame, arg):
|
||||
if self.stop_here(frame):
|
||||
# When stepping with next/until/return in a generator frame, skip
|
||||
# the internal StopIteration exception (with no traceback)
|
||||
# triggered by a subiterator run with the 'yield from' statement.
|
||||
if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
|
||||
and arg[0] is StopIteration and arg[2] is None):
|
||||
self.user_exception(frame, arg)
|
||||
if self.quitting: raise BdbQuit
|
||||
# Stop at the StopIteration or GeneratorExit exception when the user
|
||||
# has set stopframe in a generator by issuing a return command, or a
|
||||
# next/until command at the last statement in the generator before the
|
||||
# exception.
|
||||
elif (self.stopframe and frame is not self.stopframe
|
||||
and self.stopframe.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
|
||||
and arg[0] in (StopIteration, GeneratorExit)):
|
||||
self.user_exception(frame, arg)
|
||||
if self.quitting: raise BdbQuit
|
||||
|
||||
return self.trace_dispatch
|
||||
|
||||
# Normally derived classes don't override the following
|
||||
# methods, but they may if they want to redefine the
|
||||
# definition of stopping and breakpoints.
|
||||
|
||||
def is_skipped_module(self, module_name):
|
||||
for pattern in self.skip:
|
||||
if fnmatch.fnmatch(module_name, pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
def stop_here(self, frame):
|
||||
# (CT) stopframe may now also be None, see dispatch_call.
|
||||
# (CT) the former test for None is therefore removed from here.
|
||||
if self.skip and \
|
||||
self.is_skipped_module(frame.f_globals.get('__name__')):
|
||||
return False
|
||||
if frame is self.stopframe:
|
||||
if self.stoplineno == -1:
|
||||
return False
|
||||
return frame.f_lineno >= self.stoplineno
|
||||
if not self.stopframe:
|
||||
return True
|
||||
return False
|
||||
|
||||
def break_here(self, frame):
|
||||
filename = self.canonic(frame.f_code.co_filename)
|
||||
if filename not in self.breaks:
|
||||
return False
|
||||
lineno = frame.f_lineno
|
||||
if lineno not in self.breaks[filename]:
|
||||
# The line itself has no breakpoint, but maybe the line is the
|
||||
# first line of a function with breakpoint set by function name.
|
||||
lineno = frame.f_code.co_firstlineno
|
||||
if lineno not in self.breaks[filename]:
|
||||
return False
|
||||
|
||||
# flag says ok to delete temp. bp
|
||||
(bp, flag) = effective(filename, lineno, frame)
|
||||
if bp:
|
||||
self.currentbp = bp.number
|
||||
if (flag and bp.temporary):
|
||||
self.do_clear(str(bp.number))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def do_clear(self, arg):
|
||||
raise NotImplementedError("subclass of bdb must implement do_clear()")
|
||||
|
||||
def break_anywhere(self, frame):
|
||||
return self.canonic(frame.f_code.co_filename) in self.breaks
|
||||
|
||||
# Derived classes should override the user_* methods
|
||||
# to gain control.
|
||||
|
||||
def user_call(self, frame, argument_list):
|
||||
"""This method is called when there is the remote possibility
|
||||
that we ever need to stop in this function."""
|
||||
pass
|
||||
|
||||
def user_line(self, frame):
|
||||
"""This method is called when we stop or break at this line."""
|
||||
pass
|
||||
|
||||
def user_return(self, frame, return_value):
|
||||
"""This method is called when a return trap is set here."""
|
||||
pass
|
||||
|
||||
def user_exception(self, frame, exc_info):
|
||||
"""This method is called if an exception occurs,
|
||||
but only if we are to stop at or just below this level."""
|
||||
pass
|
||||
|
||||
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
|
||||
self.stopframe = stopframe
|
||||
self.returnframe = returnframe
|
||||
self.quitting = False
|
||||
# stoplineno >= 0 means: stop at line >= the stoplineno
|
||||
# stoplineno -1 means: don't stop at all
|
||||
self.stoplineno = stoplineno
|
||||
|
||||
# Derived classes and clients can call the following methods
|
||||
# to affect the stepping state.
|
||||
|
||||
def set_until(self, frame, lineno=None):
|
||||
"""Stop when the line with the line no greater than the current one is
|
||||
reached or when returning from current frame"""
|
||||
# the name "until" is borrowed from gdb
|
||||
if lineno is None:
|
||||
lineno = frame.f_lineno + 1
|
||||
self._set_stopinfo(frame, frame, lineno)
|
||||
|
||||
def set_step(self):
|
||||
"""Stop after one line of code."""
|
||||
# Issue #13183: pdb skips frames after hitting a breakpoint and running
|
||||
# step commands.
|
||||
# Restore the trace function in the caller (that may not have been set
|
||||
# for performance reasons) when returning from the current frame.
|
||||
if self.frame_returning:
|
||||
caller_frame = self.frame_returning.f_back
|
||||
if caller_frame and not caller_frame.f_trace:
|
||||
caller_frame.f_trace = self.trace_dispatch
|
||||
self._set_stopinfo(None, None)
|
||||
|
||||
def set_next(self, frame):
|
||||
"""Stop on the next line in or below the given frame."""
|
||||
self._set_stopinfo(frame, None)
|
||||
|
||||
def set_return(self, frame):
|
||||
"""Stop when returning from the given frame."""
|
||||
if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
|
||||
self._set_stopinfo(frame, None, -1)
|
||||
else:
|
||||
self._set_stopinfo(frame.f_back, frame)
|
||||
|
||||
def set_trace(self, frame=None):
|
||||
"""Start debugging from `frame`.
|
||||
|
||||
If frame is not specified, debugging starts from caller's frame.
|
||||
"""
|
||||
if frame is None:
|
||||
frame = sys._getframe().f_back
|
||||
self.reset()
|
||||
while frame:
|
||||
frame.f_trace = self.trace_dispatch
|
||||
self.botframe = frame
|
||||
frame = frame.f_back
|
||||
self.set_step()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
|
||||
def set_continue(self):
|
||||
# Don't stop except at breakpoints or when finished
|
||||
self._set_stopinfo(self.botframe, None, -1)
|
||||
if not self.breaks:
|
||||
# no breakpoints; run without debugger overhead
|
||||
sys.settrace(None)
|
||||
frame = sys._getframe().f_back
|
||||
while frame and frame is not self.botframe:
|
||||
del frame.f_trace
|
||||
frame = frame.f_back
|
||||
|
||||
def set_quit(self):
|
||||
self.stopframe = self.botframe
|
||||
self.returnframe = None
|
||||
self.quitting = True
|
||||
sys.settrace(None)
|
||||
|
||||
# Derived classes and clients can call the following methods
|
||||
# to manipulate breakpoints. These methods return an
|
||||
# error message is something went wrong, None if all is well.
|
||||
# Set_break prints out the breakpoint line and file:lineno.
|
||||
# Call self.get_*break*() to see the breakpoints or better
|
||||
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
|
||||
|
||||
def set_break(self, filename, lineno, temporary=False, cond=None,
|
||||
funcname=None):
|
||||
filename = self.canonic(filename)
|
||||
import linecache # Import as late as possible
|
||||
line = linecache.getline(filename, lineno)
|
||||
if not line:
|
||||
return 'Line %s:%d does not exist' % (filename, lineno)
|
||||
list = self.breaks.setdefault(filename, [])
|
||||
if lineno not in list:
|
||||
list.append(lineno)
|
||||
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
|
||||
|
||||
def _prune_breaks(self, filename, lineno):
|
||||
if (filename, lineno) not in Breakpoint.bplist:
|
||||
self.breaks[filename].remove(lineno)
|
||||
if not self.breaks[filename]:
|
||||
del self.breaks[filename]
|
||||
|
||||
def clear_break(self, filename, lineno):
|
||||
filename = self.canonic(filename)
|
||||
if filename not in self.breaks:
|
||||
return 'There are no breakpoints in %s' % filename
|
||||
if lineno not in self.breaks[filename]:
|
||||
return 'There is no breakpoint at %s:%d' % (filename, lineno)
|
||||
# If there's only one bp in the list for that file,line
|
||||
# pair, then remove the breaks entry
|
||||
for bp in Breakpoint.bplist[filename, lineno][:]:
|
||||
bp.deleteMe()
|
||||
self._prune_breaks(filename, lineno)
|
||||
|
||||
def clear_bpbynumber(self, arg):
|
||||
try:
|
||||
bp = self.get_bpbynumber(arg)
|
||||
except ValueError as err:
|
||||
return str(err)
|
||||
bp.deleteMe()
|
||||
self._prune_breaks(bp.file, bp.line)
|
||||
|
||||
def clear_all_file_breaks(self, filename):
|
||||
filename = self.canonic(filename)
|
||||
if filename not in self.breaks:
|
||||
return 'There are no breakpoints in %s' % filename
|
||||
for line in self.breaks[filename]:
|
||||
blist = Breakpoint.bplist[filename, line]
|
||||
for bp in blist:
|
||||
bp.deleteMe()
|
||||
del self.breaks[filename]
|
||||
|
||||
def clear_all_breaks(self):
|
||||
if not self.breaks:
|
||||
return 'There are no breakpoints'
|
||||
for bp in Breakpoint.bpbynumber:
|
||||
if bp:
|
||||
bp.deleteMe()
|
||||
self.breaks = {}
|
||||
|
||||
def get_bpbynumber(self, arg):
|
||||
if not arg:
|
||||
raise ValueError('Breakpoint number expected')
|
||||
try:
|
||||
number = int(arg)
|
||||
except ValueError:
|
||||
raise ValueError('Non-numeric breakpoint number %s' % arg)
|
||||
try:
|
||||
bp = Breakpoint.bpbynumber[number]
|
||||
except IndexError:
|
||||
raise ValueError('Breakpoint number %d out of range' % number)
|
||||
if bp is None:
|
||||
raise ValueError('Breakpoint %d already deleted' % number)
|
||||
return bp
|
||||
|
||||
def get_break(self, filename, lineno):
|
||||
filename = self.canonic(filename)
|
||||
return filename in self.breaks and \
|
||||
lineno in self.breaks[filename]
|
||||
|
||||
def get_breaks(self, filename, lineno):
|
||||
filename = self.canonic(filename)
|
||||
return filename in self.breaks and \
|
||||
lineno in self.breaks[filename] and \
|
||||
Breakpoint.bplist[filename, lineno] or []
|
||||
|
||||
def get_file_breaks(self, filename):
|
||||
filename = self.canonic(filename)
|
||||
if filename in self.breaks:
|
||||
return self.breaks[filename]
|
||||
else:
|
||||
return []
|
||||
|
||||
def get_all_breaks(self):
|
||||
return self.breaks
|
||||
|
||||
# Derived classes and clients can call the following method
|
||||
# to get a data structure representing a stack trace.
|
||||
|
||||
def get_stack(self, f, t):
|
||||
stack = []
|
||||
if t and t.tb_frame is f:
|
||||
t = t.tb_next
|
||||
while f is not None:
|
||||
stack.append((f, f.f_lineno))
|
||||
if f is self.botframe:
|
||||
break
|
||||
f = f.f_back
|
||||
stack.reverse()
|
||||
i = max(0, len(stack) - 1)
|
||||
while t is not None:
|
||||
stack.append((t.tb_frame, t.tb_lineno))
|
||||
t = t.tb_next
|
||||
if f is None:
|
||||
i = max(0, len(stack) - 1)
|
||||
return stack, i
|
||||
|
||||
def format_stack_entry(self, frame_lineno, lprefix=': '):
|
||||
import linecache, reprlib
|
||||
frame, lineno = frame_lineno
|
||||
filename = self.canonic(frame.f_code.co_filename)
|
||||
s = '%s(%r)' % (filename, lineno)
|
||||
if frame.f_code.co_name:
|
||||
s += frame.f_code.co_name
|
||||
else:
|
||||
s += "<lambda>"
|
||||
if '__args__' in frame.f_locals:
|
||||
args = frame.f_locals['__args__']
|
||||
else:
|
||||
args = None
|
||||
if args:
|
||||
s += reprlib.repr(args)
|
||||
else:
|
||||
s += '()'
|
||||
if '__return__' in frame.f_locals:
|
||||
rv = frame.f_locals['__return__']
|
||||
s += '->'
|
||||
s += reprlib.repr(rv)
|
||||
line = linecache.getline(filename, lineno, frame.f_globals)
|
||||
if line:
|
||||
s += lprefix + line.strip()
|
||||
return s
|
||||
|
||||
# The following methods can be called by clients to use
|
||||
# a debugger to debug a statement or an expression.
|
||||
# Both can be given as a string, or a code object.
|
||||
|
||||
def run(self, cmd, globals=None, locals=None):
|
||||
if globals is None:
|
||||
import __main__
|
||||
globals = __main__.__dict__
|
||||
if locals is None:
|
||||
locals = globals
|
||||
self.reset()
|
||||
if isinstance(cmd, str):
|
||||
cmd = compile(cmd, "<string>", "exec")
|
||||
sys.settrace(self.trace_dispatch)
|
||||
try:
|
||||
exec(cmd, globals, locals)
|
||||
except BdbQuit:
|
||||
pass
|
||||
finally:
|
||||
self.quitting = True
|
||||
sys.settrace(None)
|
||||
|
||||
def runeval(self, expr, globals=None, locals=None):
|
||||
if globals is None:
|
||||
import __main__
|
||||
globals = __main__.__dict__
|
||||
if locals is None:
|
||||
locals = globals
|
||||
self.reset()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
try:
|
||||
return eval(expr, globals, locals)
|
||||
except BdbQuit:
|
||||
pass
|
||||
finally:
|
||||
self.quitting = True
|
||||
sys.settrace(None)
|
||||
|
||||
def runctx(self, cmd, globals, locals):
|
||||
# B/W compatibility
|
||||
self.run(cmd, globals, locals)
|
||||
|
||||
# This method is more useful to debug a single function call.
|
||||
|
||||
def runcall(self, func, *args, **kwds):
|
||||
self.reset()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
res = None
|
||||
try:
|
||||
res = func(*args, **kwds)
|
||||
except BdbQuit:
|
||||
pass
|
||||
finally:
|
||||
self.quitting = True
|
||||
sys.settrace(None)
|
||||
return res
|
||||
|
||||
|
||||
def set_trace():
|
||||
Bdb().set_trace()
|
||||
|
||||
|
||||
class Breakpoint:
|
||||
"""Breakpoint class.
|
||||
|
||||
Implements temporary breakpoints, ignore counts, disabling and
|
||||
(re)-enabling, and conditionals.
|
||||
|
||||
Breakpoints are indexed by number through bpbynumber and by
|
||||
the file,line tuple using bplist. The former points to a
|
||||
single instance of class Breakpoint. The latter points to a
|
||||
list of such instances since there may be more than one
|
||||
breakpoint per line.
|
||||
|
||||
"""
|
||||
|
||||
# XXX Keeping state in the class is a mistake -- this means
|
||||
# you cannot have more than one active Bdb instance.
|
||||
|
||||
next = 1 # Next bp to be assigned
|
||||
bplist = {} # indexed by (file, lineno) tuple
|
||||
bpbynumber = [None] # Each entry is None or an instance of Bpt
|
||||
# index 0 is unused, except for marking an
|
||||
# effective break .... see effective()
|
||||
|
||||
def __init__(self, file, line, temporary=False, cond=None, funcname=None):
|
||||
self.funcname = funcname
|
||||
# Needed if funcname is not None.
|
||||
self.func_first_executable_line = None
|
||||
self.file = file # This better be in canonical form!
|
||||
self.line = line
|
||||
self.temporary = temporary
|
||||
self.cond = cond
|
||||
self.enabled = True
|
||||
self.ignore = 0
|
||||
self.hits = 0
|
||||
self.number = Breakpoint.next
|
||||
Breakpoint.next += 1
|
||||
# Build the two lists
|
||||
self.bpbynumber.append(self)
|
||||
if (file, line) in self.bplist:
|
||||
self.bplist[file, line].append(self)
|
||||
else:
|
||||
self.bplist[file, line] = [self]
|
||||
|
||||
def deleteMe(self):
|
||||
index = (self.file, self.line)
|
||||
self.bpbynumber[self.number] = None # No longer in list
|
||||
self.bplist[index].remove(self)
|
||||
if not self.bplist[index]:
|
||||
# No more bp for this f:l combo
|
||||
del self.bplist[index]
|
||||
|
||||
def enable(self):
|
||||
self.enabled = True
|
||||
|
||||
def disable(self):
|
||||
self.enabled = False
|
||||
|
||||
def bpprint(self, out=None):
|
||||
if out is None:
|
||||
out = sys.stdout
|
||||
print(self.bpformat(), file=out)
|
||||
|
||||
def bpformat(self):
|
||||
if self.temporary:
|
||||
disp = 'del '
|
||||
else:
|
||||
disp = 'keep '
|
||||
if self.enabled:
|
||||
disp = disp + 'yes '
|
||||
else:
|
||||
disp = disp + 'no '
|
||||
ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
|
||||
self.file, self.line)
|
||||
if self.cond:
|
||||
ret += '\n\tstop only if %s' % (self.cond,)
|
||||
if self.ignore:
|
||||
ret += '\n\tignore next %d hits' % (self.ignore,)
|
||||
if self.hits:
|
||||
if self.hits > 1:
|
||||
ss = 's'
|
||||
else:
|
||||
ss = ''
|
||||
ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss)
|
||||
return ret
|
||||
|
||||
def __str__(self):
|
||||
return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line)
|
||||
|
||||
# -----------end of Breakpoint class----------
|
||||
|
||||
def checkfuncname(b, frame):
|
||||
"""Check whether we should break here because of `b.funcname`."""
|
||||
if not b.funcname:
|
||||
# Breakpoint was set via line number.
|
||||
if b.line != frame.f_lineno:
|
||||
# Breakpoint was set at a line with a def statement and the function
|
||||
# defined is called: don't break.
|
||||
return False
|
||||
return True
|
||||
|
||||
# Breakpoint set via function name.
|
||||
|
||||
if frame.f_code.co_name != b.funcname:
|
||||
# It's not a function call, but rather execution of def statement.
|
||||
return False
|
||||
|
||||
# We are in the right frame.
|
||||
if not b.func_first_executable_line:
|
||||
# The function is entered for the 1st time.
|
||||
b.func_first_executable_line = frame.f_lineno
|
||||
|
||||
if b.func_first_executable_line != frame.f_lineno:
|
||||
# But we are not at the first line number: don't break.
|
||||
return False
|
||||
return True
|
||||
|
||||
# Determines if there is an effective (active) breakpoint at this
|
||||
# line of code. Returns breakpoint number or 0 if none
|
||||
def effective(file, line, frame):
|
||||
"""Determine which breakpoint for this file:line is to be acted upon.
|
||||
|
||||
Called only if we know there is a bpt at this
|
||||
location. Returns breakpoint that was triggered and a flag
|
||||
that indicates if it is ok to delete a temporary bp.
|
||||
|
||||
"""
|
||||
possibles = Breakpoint.bplist[file, line]
|
||||
for b in possibles:
|
||||
if not b.enabled:
|
||||
continue
|
||||
if not checkfuncname(b, frame):
|
||||
continue
|
||||
# Count every hit when bp is enabled
|
||||
b.hits += 1
|
||||
if not b.cond:
|
||||
# If unconditional, and ignoring go on to next, else break
|
||||
if b.ignore > 0:
|
||||
b.ignore -= 1
|
||||
continue
|
||||
else:
|
||||
# breakpoint and marker that it's ok to delete if temporary
|
||||
return (b, True)
|
||||
else:
|
||||
# Conditional bp.
|
||||
# Ignore count applies only to those bpt hits where the
|
||||
# condition evaluates to true.
|
||||
try:
|
||||
val = eval(b.cond, frame.f_globals, frame.f_locals)
|
||||
if val:
|
||||
if b.ignore > 0:
|
||||
b.ignore -= 1
|
||||
# continue
|
||||
else:
|
||||
return (b, True)
|
||||
# else:
|
||||
# continue
|
||||
except:
|
||||
# if eval fails, most conservative thing is to stop on
|
||||
# breakpoint regardless of ignore count. Don't delete
|
||||
# temporary, as another hint to user.
|
||||
return (b, False)
|
||||
return (None, None)
|
||||
|
||||
|
||||
# -------------------- testing --------------------
|
||||
|
||||
class Tdb(Bdb):
|
||||
def user_call(self, frame, args):
|
||||
name = frame.f_code.co_name
|
||||
if not name: name = '???'
|
||||
print('+++ call', name, args)
|
||||
def user_line(self, frame):
|
||||
import linecache
|
||||
name = frame.f_code.co_name
|
||||
if not name: name = '???'
|
||||
fn = self.canonic(frame.f_code.co_filename)
|
||||
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
|
||||
print('+++', fn, frame.f_lineno, name, ':', line.strip())
|
||||
def user_return(self, frame, retval):
|
||||
print('+++ return', retval)
|
||||
def user_exception(self, frame, exc_stuff):
|
||||
print('+++ exception', exc_stuff)
|
||||
self.set_continue()
|
||||
|
||||
def foo(n):
|
||||
print('foo(', n, ')')
|
||||
x = bar(n*10)
|
||||
print('bar returned', x)
|
||||
|
||||
def bar(a):
|
||||
print('bar(', a, ')')
|
||||
return a/2
|
||||
|
||||
def test():
|
||||
t = Tdb()
|
||||
t.run('import bdb; bdb.foo(10)')
|
479
third_party/python/Lib/binhex.py
vendored
Normal file
479
third_party/python/Lib/binhex.py
vendored
Normal file
|
@ -0,0 +1,479 @@
|
|||
"""Macintosh binhex compression/decompression.
|
||||
|
||||
easy interface:
|
||||
binhex(inputfilename, outputfilename)
|
||||
hexbin(inputfilename, outputfilename)
|
||||
"""
|
||||
|
||||
#
|
||||
# Jack Jansen, CWI, August 1995.
|
||||
#
|
||||
# The module is supposed to be as compatible as possible. Especially the
|
||||
# easy interface should work "as expected" on any platform.
|
||||
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
|
||||
# We seem to lack a simple character-translate in python.
|
||||
# (we should probably use ISO-Latin-1 on all but the mac platform).
|
||||
# XXXX The simple routines are too simple: they expect to hold the complete
|
||||
# files in-core. Should be fixed.
|
||||
# XXXX It would be nice to handle AppleDouble format on unix
|
||||
# (for servers serving macs).
|
||||
# XXXX I don't understand what happens when you get 0x90 times the same byte on
|
||||
# input. The resulting code (xx 90 90) would appear to be interpreted as an
|
||||
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
|
||||
#
|
||||
import io
|
||||
import os
|
||||
import struct
|
||||
import binascii
|
||||
|
||||
__all__ = ["binhex","hexbin","Error"]
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
# States (what have we written)
|
||||
_DID_HEADER = 0
|
||||
_DID_DATA = 1
|
||||
|
||||
# Various constants
|
||||
REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder
|
||||
LINELEN = 64
|
||||
RUNCHAR = b"\x90"
|
||||
|
||||
#
|
||||
# This code is no longer byte-order dependent
|
||||
|
||||
|
||||
class FInfo:
|
||||
def __init__(self):
|
||||
self.Type = '????'
|
||||
self.Creator = '????'
|
||||
self.Flags = 0
|
||||
|
||||
def getfileinfo(name):
|
||||
finfo = FInfo()
|
||||
with io.open(name, 'rb') as fp:
|
||||
# Quick check for textfile
|
||||
data = fp.read(512)
|
||||
if 0 not in data:
|
||||
finfo.Type = 'TEXT'
|
||||
fp.seek(0, 2)
|
||||
dsize = fp.tell()
|
||||
dir, file = os.path.split(name)
|
||||
file = file.replace(':', '-', 1)
|
||||
return file, finfo, dsize, 0
|
||||
|
||||
class openrsrc:
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
def read(self, *args):
|
||||
return b''
|
||||
|
||||
def write(self, *args):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
class _Hqxcoderengine:
|
||||
"""Write data to the coder in 3-byte chunks"""
|
||||
|
||||
def __init__(self, ofp):
|
||||
self.ofp = ofp
|
||||
self.data = b''
|
||||
self.hqxdata = b''
|
||||
self.linelen = LINELEN - 1
|
||||
|
||||
def write(self, data):
|
||||
self.data = self.data + data
|
||||
datalen = len(self.data)
|
||||
todo = (datalen // 3) * 3
|
||||
data = self.data[:todo]
|
||||
self.data = self.data[todo:]
|
||||
if not data:
|
||||
return
|
||||
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
|
||||
self._flush(0)
|
||||
|
||||
def _flush(self, force):
|
||||
first = 0
|
||||
while first <= len(self.hqxdata) - self.linelen:
|
||||
last = first + self.linelen
|
||||
self.ofp.write(self.hqxdata[first:last] + b'\n')
|
||||
self.linelen = LINELEN
|
||||
first = last
|
||||
self.hqxdata = self.hqxdata[first:]
|
||||
if force:
|
||||
self.ofp.write(self.hqxdata + b':\n')
|
||||
|
||||
def close(self):
|
||||
if self.data:
|
||||
self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
|
||||
self._flush(1)
|
||||
self.ofp.close()
|
||||
del self.ofp
|
||||
|
||||
class _Rlecoderengine:
|
||||
"""Write data to the RLE-coder in suitably large chunks"""
|
||||
|
||||
def __init__(self, ofp):
|
||||
self.ofp = ofp
|
||||
self.data = b''
|
||||
|
||||
def write(self, data):
|
||||
self.data = self.data + data
|
||||
if len(self.data) < REASONABLY_LARGE:
|
||||
return
|
||||
rledata = binascii.rlecode_hqx(self.data)
|
||||
self.ofp.write(rledata)
|
||||
self.data = b''
|
||||
|
||||
def close(self):
|
||||
if self.data:
|
||||
rledata = binascii.rlecode_hqx(self.data)
|
||||
self.ofp.write(rledata)
|
||||
self.ofp.close()
|
||||
del self.ofp
|
||||
|
||||
class BinHex:
|
||||
def __init__(self, name_finfo_dlen_rlen, ofp):
|
||||
name, finfo, dlen, rlen = name_finfo_dlen_rlen
|
||||
close_on_error = False
|
||||
if isinstance(ofp, str):
|
||||
ofname = ofp
|
||||
ofp = io.open(ofname, 'wb')
|
||||
close_on_error = True
|
||||
try:
|
||||
ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
|
||||
hqxer = _Hqxcoderengine(ofp)
|
||||
self.ofp = _Rlecoderengine(hqxer)
|
||||
self.crc = 0
|
||||
if finfo is None:
|
||||
finfo = FInfo()
|
||||
self.dlen = dlen
|
||||
self.rlen = rlen
|
||||
self._writeinfo(name, finfo)
|
||||
self.state = _DID_HEADER
|
||||
except:
|
||||
if close_on_error:
|
||||
ofp.close()
|
||||
raise
|
||||
|
||||
def _writeinfo(self, name, finfo):
|
||||
nl = len(name)
|
||||
if nl > 63:
|
||||
raise Error('Filename too long')
|
||||
d = bytes([nl]) + name.encode("latin-1") + b'\0'
|
||||
tp, cr = finfo.Type, finfo.Creator
|
||||
if isinstance(tp, str):
|
||||
tp = tp.encode("latin-1")
|
||||
if isinstance(cr, str):
|
||||
cr = cr.encode("latin-1")
|
||||
d2 = tp + cr
|
||||
|
||||
# Force all structs to be packed with big-endian
|
||||
d3 = struct.pack('>h', finfo.Flags)
|
||||
d4 = struct.pack('>ii', self.dlen, self.rlen)
|
||||
info = d + d2 + d3 + d4
|
||||
self._write(info)
|
||||
self._writecrc()
|
||||
|
||||
def _write(self, data):
|
||||
self.crc = binascii.crc_hqx(data, self.crc)
|
||||
self.ofp.write(data)
|
||||
|
||||
def _writecrc(self):
|
||||
# XXXX Should this be here??
|
||||
# self.crc = binascii.crc_hqx('\0\0', self.crc)
|
||||
if self.crc < 0:
|
||||
fmt = '>h'
|
||||
else:
|
||||
fmt = '>H'
|
||||
self.ofp.write(struct.pack(fmt, self.crc))
|
||||
self.crc = 0
|
||||
|
||||
def write(self, data):
|
||||
if self.state != _DID_HEADER:
|
||||
raise Error('Writing data at the wrong time')
|
||||
self.dlen = self.dlen - len(data)
|
||||
self._write(data)
|
||||
|
||||
def close_data(self):
|
||||
if self.dlen != 0:
|
||||
raise Error('Incorrect data size, diff=%r' % (self.rlen,))
|
||||
self._writecrc()
|
||||
self.state = _DID_DATA
|
||||
|
||||
def write_rsrc(self, data):
|
||||
if self.state < _DID_DATA:
|
||||
self.close_data()
|
||||
if self.state != _DID_DATA:
|
||||
raise Error('Writing resource data at the wrong time')
|
||||
self.rlen = self.rlen - len(data)
|
||||
self._write(data)
|
||||
|
||||
def close(self):
|
||||
if self.state is None:
|
||||
return
|
||||
try:
|
||||
if self.state < _DID_DATA:
|
||||
self.close_data()
|
||||
if self.state != _DID_DATA:
|
||||
raise Error('Close at the wrong time')
|
||||
if self.rlen != 0:
|
||||
raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
|
||||
self._writecrc()
|
||||
finally:
|
||||
self.state = None
|
||||
ofp = self.ofp
|
||||
del self.ofp
|
||||
ofp.close()
|
||||
|
||||
def binhex(inp, out):
|
||||
"""binhex(infilename, outfilename): create binhex-encoded copy of a file"""
|
||||
finfo = getfileinfo(inp)
|
||||
ofp = BinHex(finfo, out)
|
||||
|
||||
with io.open(inp, 'rb') as ifp:
|
||||
# XXXX Do textfile translation on non-mac systems
|
||||
while True:
|
||||
d = ifp.read(128000)
|
||||
if not d: break
|
||||
ofp.write(d)
|
||||
ofp.close_data()
|
||||
|
||||
ifp = openrsrc(inp, 'rb')
|
||||
while True:
|
||||
d = ifp.read(128000)
|
||||
if not d: break
|
||||
ofp.write_rsrc(d)
|
||||
ofp.close()
|
||||
ifp.close()
|
||||
|
||||
class _Hqxdecoderengine:
|
||||
"""Read data via the decoder in 4-byte chunks"""
|
||||
|
||||
def __init__(self, ifp):
|
||||
self.ifp = ifp
|
||||
self.eof = 0
|
||||
|
||||
def read(self, totalwtd):
|
||||
"""Read at least wtd bytes (or until EOF)"""
|
||||
decdata = b''
|
||||
wtd = totalwtd
|
||||
#
|
||||
# The loop here is convoluted, since we don't really now how
|
||||
# much to decode: there may be newlines in the incoming data.
|
||||
while wtd > 0:
|
||||
if self.eof: return decdata
|
||||
wtd = ((wtd + 2) // 3) * 4
|
||||
data = self.ifp.read(wtd)
|
||||
#
|
||||
# Next problem: there may not be a complete number of
|
||||
# bytes in what we pass to a2b. Solve by yet another
|
||||
# loop.
|
||||
#
|
||||
while True:
|
||||
try:
|
||||
decdatacur, self.eof = binascii.a2b_hqx(data)
|
||||
break
|
||||
except binascii.Incomplete:
|
||||
pass
|
||||
newdata = self.ifp.read(1)
|
||||
if not newdata:
|
||||
raise Error('Premature EOF on binhex file')
|
||||
data = data + newdata
|
||||
decdata = decdata + decdatacur
|
||||
wtd = totalwtd - len(decdata)
|
||||
if not decdata and not self.eof:
|
||||
raise Error('Premature EOF on binhex file')
|
||||
return decdata
|
||||
|
||||
def close(self):
|
||||
self.ifp.close()
|
||||
|
||||
class _Rledecoderengine:
|
||||
"""Read data via the RLE-coder"""
|
||||
|
||||
def __init__(self, ifp):
|
||||
self.ifp = ifp
|
||||
self.pre_buffer = b''
|
||||
self.post_buffer = b''
|
||||
self.eof = 0
|
||||
|
||||
def read(self, wtd):
|
||||
if wtd > len(self.post_buffer):
|
||||
self._fill(wtd - len(self.post_buffer))
|
||||
rv = self.post_buffer[:wtd]
|
||||
self.post_buffer = self.post_buffer[wtd:]
|
||||
return rv
|
||||
|
||||
def _fill(self, wtd):
|
||||
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
|
||||
if self.ifp.eof:
|
||||
self.post_buffer = self.post_buffer + \
|
||||
binascii.rledecode_hqx(self.pre_buffer)
|
||||
self.pre_buffer = b''
|
||||
return
|
||||
|
||||
#
|
||||
# Obfuscated code ahead. We have to take care that we don't
|
||||
# end up with an orphaned RUNCHAR later on. So, we keep a couple
|
||||
# of bytes in the buffer, depending on what the end of
|
||||
# the buffer looks like:
|
||||
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
|
||||
# '?\220' - Keep 2 bytes: repeated something-else
|
||||
# '\220\0' - Escaped \220: Keep 2 bytes.
|
||||
# '?\220?' - Complete repeat sequence: decode all
|
||||
# otherwise: keep 1 byte.
|
||||
#
|
||||
mark = len(self.pre_buffer)
|
||||
if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
|
||||
mark = mark - 3
|
||||
elif self.pre_buffer[-1:] == RUNCHAR:
|
||||
mark = mark - 2
|
||||
elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
|
||||
mark = mark - 2
|
||||
elif self.pre_buffer[-2:-1] == RUNCHAR:
|
||||
pass # Decode all
|
||||
else:
|
||||
mark = mark - 1
|
||||
|
||||
self.post_buffer = self.post_buffer + \
|
||||
binascii.rledecode_hqx(self.pre_buffer[:mark])
|
||||
self.pre_buffer = self.pre_buffer[mark:]
|
||||
|
||||
def close(self):
|
||||
self.ifp.close()
|
||||
|
||||
class HexBin:
|
||||
def __init__(self, ifp):
|
||||
if isinstance(ifp, str):
|
||||
ifp = io.open(ifp, 'rb')
|
||||
#
|
||||
# Find initial colon.
|
||||
#
|
||||
while True:
|
||||
ch = ifp.read(1)
|
||||
if not ch:
|
||||
raise Error("No binhex data found")
|
||||
# Cater for \r\n terminated lines (which show up as \n\r, hence
|
||||
# all lines start with \r)
|
||||
if ch == b'\r':
|
||||
continue
|
||||
if ch == b':':
|
||||
break
|
||||
|
||||
hqxifp = _Hqxdecoderengine(ifp)
|
||||
self.ifp = _Rledecoderengine(hqxifp)
|
||||
self.crc = 0
|
||||
self._readheader()
|
||||
|
||||
def _read(self, len):
|
||||
data = self.ifp.read(len)
|
||||
self.crc = binascii.crc_hqx(data, self.crc)
|
||||
return data
|
||||
|
||||
def _checkcrc(self):
|
||||
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
|
||||
#self.crc = binascii.crc_hqx('\0\0', self.crc)
|
||||
# XXXX Is this needed??
|
||||
self.crc = self.crc & 0xffff
|
||||
if filecrc != self.crc:
|
||||
raise Error('CRC error, computed %x, read %x'
|
||||
% (self.crc, filecrc))
|
||||
self.crc = 0
|
||||
|
||||
def _readheader(self):
|
||||
len = self._read(1)
|
||||
fname = self._read(ord(len))
|
||||
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
|
||||
self._checkcrc()
|
||||
|
||||
type = rest[1:5]
|
||||
creator = rest[5:9]
|
||||
flags = struct.unpack('>h', rest[9:11])[0]
|
||||
self.dlen = struct.unpack('>l', rest[11:15])[0]
|
||||
self.rlen = struct.unpack('>l', rest[15:19])[0]
|
||||
|
||||
self.FName = fname
|
||||
self.FInfo = FInfo()
|
||||
self.FInfo.Creator = creator
|
||||
self.FInfo.Type = type
|
||||
self.FInfo.Flags = flags
|
||||
|
||||
self.state = _DID_HEADER
|
||||
|
||||
def read(self, *n):
|
||||
if self.state != _DID_HEADER:
|
||||
raise Error('Read data at wrong time')
|
||||
if n:
|
||||
n = n[0]
|
||||
n = min(n, self.dlen)
|
||||
else:
|
||||
n = self.dlen
|
||||
rv = b''
|
||||
while len(rv) < n:
|
||||
rv = rv + self._read(n-len(rv))
|
||||
self.dlen = self.dlen - n
|
||||
return rv
|
||||
|
||||
def close_data(self):
|
||||
if self.state != _DID_HEADER:
|
||||
raise Error('close_data at wrong time')
|
||||
if self.dlen:
|
||||
dummy = self._read(self.dlen)
|
||||
self._checkcrc()
|
||||
self.state = _DID_DATA
|
||||
|
||||
def read_rsrc(self, *n):
|
||||
if self.state == _DID_HEADER:
|
||||
self.close_data()
|
||||
if self.state != _DID_DATA:
|
||||
raise Error('Read resource data at wrong time')
|
||||
if n:
|
||||
n = n[0]
|
||||
n = min(n, self.rlen)
|
||||
else:
|
||||
n = self.rlen
|
||||
self.rlen = self.rlen - n
|
||||
return self._read(n)
|
||||
|
||||
def close(self):
|
||||
if self.state is None:
|
||||
return
|
||||
try:
|
||||
if self.rlen:
|
||||
dummy = self.read_rsrc(self.rlen)
|
||||
self._checkcrc()
|
||||
finally:
|
||||
self.state = None
|
||||
self.ifp.close()
|
||||
|
||||
def hexbin(inp, out):
|
||||
"""hexbin(infilename, outfilename) - Decode binhexed file"""
|
||||
ifp = HexBin(inp)
|
||||
finfo = ifp.FInfo
|
||||
if not out:
|
||||
out = ifp.FName
|
||||
|
||||
with io.open(out, 'wb') as ofp:
|
||||
# XXXX Do translation on non-mac systems
|
||||
while True:
|
||||
d = ifp.read(128000)
|
||||
if not d: break
|
||||
ofp.write(d)
|
||||
ifp.close_data()
|
||||
|
||||
d = ifp.read_rsrc(128000)
|
||||
if d:
|
||||
ofp = openrsrc(out, 'wb')
|
||||
ofp.write(d)
|
||||
while True:
|
||||
d = ifp.read_rsrc(128000)
|
||||
if not d: break
|
||||
ofp.write(d)
|
||||
ofp.close()
|
||||
|
||||
ifp.close()
|
92
third_party/python/Lib/bisect.py
vendored
Normal file
92
third_party/python/Lib/bisect.py
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
"""Bisection algorithms."""
|
||||
|
||||
def insort_right(a, x, lo=0, hi=None):
|
||||
"""Insert item x in list a, and keep it sorted assuming a is sorted.
|
||||
|
||||
If x is already in a, insert it to the right of the rightmost x.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if x < a[mid]: hi = mid
|
||||
else: lo = mid+1
|
||||
a.insert(lo, x)
|
||||
|
||||
insort = insort_right # backward compatibility
|
||||
|
||||
def bisect_right(a, x, lo=0, hi=None):
|
||||
"""Return the index where to insert item x in list a, assuming a is sorted.
|
||||
|
||||
The return value i is such that all e in a[:i] have e <= x, and all e in
|
||||
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
|
||||
insert just after the rightmost x already there.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if x < a[mid]: hi = mid
|
||||
else: lo = mid+1
|
||||
return lo
|
||||
|
||||
bisect = bisect_right # backward compatibility
|
||||
|
||||
def insort_left(a, x, lo=0, hi=None):
|
||||
"""Insert item x in list a, and keep it sorted assuming a is sorted.
|
||||
|
||||
If x is already in a, insert it to the left of the leftmost x.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if a[mid] < x: lo = mid+1
|
||||
else: hi = mid
|
||||
a.insert(lo, x)
|
||||
|
||||
|
||||
def bisect_left(a, x, lo=0, hi=None):
|
||||
"""Return the index where to insert item x in list a, assuming a is sorted.
|
||||
|
||||
The return value i is such that all e in a[:i] have e < x, and all e in
|
||||
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
|
||||
insert just before the leftmost x already there.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if a[mid] < x: lo = mid+1
|
||||
else: hi = mid
|
||||
return lo
|
||||
|
||||
# Overwrite above definitions with a fast C implementation
|
||||
try:
|
||||
from _bisect import *
|
||||
except ImportError:
|
||||
pass
|
361
third_party/python/Lib/bz2.py
vendored
Normal file
361
third_party/python/Lib/bz2.py
vendored
Normal file
|
@ -0,0 +1,361 @@
|
|||
"""Interface to the libbzip2 compression library.
|
||||
|
||||
This module provides a file interface, classes for incremental
|
||||
(de)compression, and functions for one-shot (de)compression.
|
||||
"""
|
||||
|
||||
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
|
||||
"open", "compress", "decompress"]
|
||||
|
||||
__author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
|
||||
|
||||
from builtins import open as _builtin_open
|
||||
import io
|
||||
import os
|
||||
import warnings
|
||||
import _compression
|
||||
|
||||
try:
|
||||
from threading import RLock
|
||||
except ImportError:
|
||||
from dummy_threading import RLock
|
||||
|
||||
from _bz2 import BZ2Compressor, BZ2Decompressor
|
||||
|
||||
|
||||
_MODE_CLOSED = 0
|
||||
_MODE_READ = 1
|
||||
# Value 2 no longer used
|
||||
_MODE_WRITE = 3
|
||||
|
||||
|
||||
class BZ2File(_compression.BaseStream):
|
||||
|
||||
"""A file object providing transparent bzip2 (de)compression.
|
||||
|
||||
A BZ2File can act as a wrapper for an existing file object, or refer
|
||||
directly to a named file on disk.
|
||||
|
||||
Note that BZ2File provides a *binary* file interface - data read is
|
||||
returned as bytes, and data to be written should be given as bytes.
|
||||
"""
|
||||
|
||||
def __init__(self, filename, mode="r", buffering=None, compresslevel=9):
|
||||
"""Open a bzip2-compressed file.
|
||||
|
||||
If filename is a str, bytes, or PathLike object, it gives the
|
||||
name of the file to be opened. Otherwise, it should be a file
|
||||
object, which will be used to read or write the compressed data.
|
||||
|
||||
mode can be 'r' for reading (default), 'w' for (over)writing,
|
||||
'x' for creating exclusively, or 'a' for appending. These can
|
||||
equivalently be given as 'rb', 'wb', 'xb', and 'ab'.
|
||||
|
||||
buffering is ignored. Its use is deprecated.
|
||||
|
||||
If mode is 'w', 'x' or 'a', compresslevel can be a number between 1
|
||||
and 9 specifying the level of compression: 1 produces the least
|
||||
compression, and 9 (default) produces the most compression.
|
||||
|
||||
If mode is 'r', the input file may be the concatenation of
|
||||
multiple compressed streams.
|
||||
"""
|
||||
# This lock must be recursive, so that BufferedIOBase's
|
||||
# writelines() does not deadlock.
|
||||
self._lock = RLock()
|
||||
self._fp = None
|
||||
self._closefp = False
|
||||
self._mode = _MODE_CLOSED
|
||||
|
||||
if buffering is not None:
|
||||
warnings.warn("Use of 'buffering' argument is deprecated",
|
||||
DeprecationWarning)
|
||||
|
||||
if not (1 <= compresslevel <= 9):
|
||||
raise ValueError("compresslevel must be between 1 and 9")
|
||||
|
||||
if mode in ("", "r", "rb"):
|
||||
mode = "rb"
|
||||
mode_code = _MODE_READ
|
||||
elif mode in ("w", "wb"):
|
||||
mode = "wb"
|
||||
mode_code = _MODE_WRITE
|
||||
self._compressor = BZ2Compressor(compresslevel)
|
||||
elif mode in ("x", "xb"):
|
||||
mode = "xb"
|
||||
mode_code = _MODE_WRITE
|
||||
self._compressor = BZ2Compressor(compresslevel)
|
||||
elif mode in ("a", "ab"):
|
||||
mode = "ab"
|
||||
mode_code = _MODE_WRITE
|
||||
self._compressor = BZ2Compressor(compresslevel)
|
||||
else:
|
||||
raise ValueError("Invalid mode: %r" % (mode,))
|
||||
|
||||
if isinstance(filename, (str, bytes, os.PathLike)):
|
||||
self._fp = _builtin_open(filename, mode)
|
||||
self._closefp = True
|
||||
self._mode = mode_code
|
||||
elif hasattr(filename, "read") or hasattr(filename, "write"):
|
||||
self._fp = filename
|
||||
self._mode = mode_code
|
||||
else:
|
||||
raise TypeError("filename must be a str, bytes, file or PathLike object")
|
||||
|
||||
if self._mode == _MODE_READ:
|
||||
raw = _compression.DecompressReader(self._fp,
|
||||
BZ2Decompressor, trailing_error=OSError)
|
||||
self._buffer = io.BufferedReader(raw)
|
||||
else:
|
||||
self._pos = 0
|
||||
|
||||
def close(self):
|
||||
"""Flush and close the file.
|
||||
|
||||
May be called more than once without error. Once the file is
|
||||
closed, any other operation on it will raise a ValueError.
|
||||
"""
|
||||
with self._lock:
|
||||
if self._mode == _MODE_CLOSED:
|
||||
return
|
||||
try:
|
||||
if self._mode == _MODE_READ:
|
||||
self._buffer.close()
|
||||
elif self._mode == _MODE_WRITE:
|
||||
self._fp.write(self._compressor.flush())
|
||||
self._compressor = None
|
||||
finally:
|
||||
try:
|
||||
if self._closefp:
|
||||
self._fp.close()
|
||||
finally:
|
||||
self._fp = None
|
||||
self._closefp = False
|
||||
self._mode = _MODE_CLOSED
|
||||
self._buffer = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""True if this file is closed."""
|
||||
return self._mode == _MODE_CLOSED
|
||||
|
||||
def fileno(self):
|
||||
"""Return the file descriptor for the underlying file."""
|
||||
self._check_not_closed()
|
||||
return self._fp.fileno()
|
||||
|
||||
def seekable(self):
|
||||
"""Return whether the file supports seeking."""
|
||||
return self.readable() and self._buffer.seekable()
|
||||
|
||||
def readable(self):
|
||||
"""Return whether the file was opened for reading."""
|
||||
self._check_not_closed()
|
||||
return self._mode == _MODE_READ
|
||||
|
||||
def writable(self):
|
||||
"""Return whether the file was opened for writing."""
|
||||
self._check_not_closed()
|
||||
return self._mode == _MODE_WRITE
|
||||
|
||||
def peek(self, n=0):
|
||||
"""Return buffered data without advancing the file position.
|
||||
|
||||
Always returns at least one byte of data, unless at EOF.
|
||||
The exact number of bytes returned is unspecified.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
# Relies on the undocumented fact that BufferedReader.peek()
|
||||
# always returns at least one byte (except at EOF), independent
|
||||
# of the value of n
|
||||
return self._buffer.peek(n)
|
||||
|
||||
def read(self, size=-1):
|
||||
"""Read up to size uncompressed bytes from the file.
|
||||
|
||||
If size is negative or omitted, read until EOF is reached.
|
||||
Returns b'' if the file is already at EOF.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
return self._buffer.read(size)
|
||||
|
||||
def read1(self, size=-1):
|
||||
"""Read up to size uncompressed bytes, while trying to avoid
|
||||
making multiple reads from the underlying stream. Reads up to a
|
||||
buffer's worth of data if size is negative.
|
||||
|
||||
Returns b'' if the file is at EOF.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
if size < 0:
|
||||
size = io.DEFAULT_BUFFER_SIZE
|
||||
return self._buffer.read1(size)
|
||||
|
||||
def readinto(self, b):
|
||||
"""Read bytes into b.
|
||||
|
||||
Returns the number of bytes read (0 for EOF).
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
return self._buffer.readinto(b)
|
||||
|
||||
def readline(self, size=-1):
|
||||
"""Read a line of uncompressed bytes from the file.
|
||||
|
||||
The terminating newline (if present) is retained. If size is
|
||||
non-negative, no more than size bytes will be read (in which
|
||||
case the line may be incomplete). Returns b'' if already at EOF.
|
||||
"""
|
||||
if not isinstance(size, int):
|
||||
if not hasattr(size, "__index__"):
|
||||
raise TypeError("Integer argument expected")
|
||||
size = size.__index__()
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
return self._buffer.readline(size)
|
||||
|
||||
def readlines(self, size=-1):
|
||||
"""Read a list of lines of uncompressed bytes from the file.
|
||||
|
||||
size can be specified to control the number of lines read: no
|
||||
further lines will be read once the total size of the lines read
|
||||
so far equals or exceeds size.
|
||||
"""
|
||||
if not isinstance(size, int):
|
||||
if not hasattr(size, "__index__"):
|
||||
raise TypeError("Integer argument expected")
|
||||
size = size.__index__()
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
return self._buffer.readlines(size)
|
||||
|
||||
def write(self, data):
|
||||
"""Write a byte string to the file.
|
||||
|
||||
Returns the number of uncompressed bytes written, which is
|
||||
always len(data). Note that due to buffering, the file on disk
|
||||
may not reflect the data written until close() is called.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_write()
|
||||
compressed = self._compressor.compress(data)
|
||||
self._fp.write(compressed)
|
||||
self._pos += len(data)
|
||||
return len(data)
|
||||
|
||||
def writelines(self, seq):
|
||||
"""Write a sequence of byte strings to the file.
|
||||
|
||||
Returns the number of uncompressed bytes written.
|
||||
seq can be any iterable yielding byte strings.
|
||||
|
||||
Line separators are not added between the written byte strings.
|
||||
"""
|
||||
with self._lock:
|
||||
return _compression.BaseStream.writelines(self, seq)
|
||||
|
||||
def seek(self, offset, whence=io.SEEK_SET):
|
||||
"""Change the file position.
|
||||
|
||||
The new position is specified by offset, relative to the
|
||||
position indicated by whence. Values for whence are:
|
||||
|
||||
0: start of stream (default); offset must not be negative
|
||||
1: current stream position
|
||||
2: end of stream; offset must not be positive
|
||||
|
||||
Returns the new file position.
|
||||
|
||||
Note that seeking is emulated, so depending on the parameters,
|
||||
this operation may be extremely slow.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_seek()
|
||||
return self._buffer.seek(offset, whence)
|
||||
|
||||
def tell(self):
|
||||
"""Return the current file position."""
|
||||
with self._lock:
|
||||
self._check_not_closed()
|
||||
if self._mode == _MODE_READ:
|
||||
return self._buffer.tell()
|
||||
return self._pos
|
||||
|
||||
|
||||
def open(filename, mode="rb", compresslevel=9,
|
||||
encoding=None, errors=None, newline=None):
|
||||
"""Open a bzip2-compressed file in binary or text mode.
|
||||
|
||||
The filename argument can be an actual filename (a str, bytes, or
|
||||
PathLike object), or an existing file object to read from or write
|
||||
to.
|
||||
|
||||
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or
|
||||
"ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode.
|
||||
The default mode is "rb", and the default compresslevel is 9.
|
||||
|
||||
For binary mode, this function is equivalent to the BZ2File
|
||||
constructor: BZ2File(filename, mode, compresslevel). In this case,
|
||||
the encoding, errors and newline arguments must not be provided.
|
||||
|
||||
For text mode, a BZ2File object is created, and wrapped in an
|
||||
io.TextIOWrapper instance with the specified encoding, error
|
||||
handling behavior, and line ending(s).
|
||||
|
||||
"""
|
||||
if "t" in mode:
|
||||
if "b" in mode:
|
||||
raise ValueError("Invalid mode: %r" % (mode,))
|
||||
else:
|
||||
if encoding is not None:
|
||||
raise ValueError("Argument 'encoding' not supported in binary mode")
|
||||
if errors is not None:
|
||||
raise ValueError("Argument 'errors' not supported in binary mode")
|
||||
if newline is not None:
|
||||
raise ValueError("Argument 'newline' not supported in binary mode")
|
||||
|
||||
bz_mode = mode.replace("t", "")
|
||||
binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
|
||||
|
||||
if "t" in mode:
|
||||
return io.TextIOWrapper(binary_file, encoding, errors, newline)
|
||||
else:
|
||||
return binary_file
|
||||
|
||||
|
||||
def compress(data, compresslevel=9):
|
||||
"""Compress a block of data.
|
||||
|
||||
compresslevel, if given, must be a number between 1 and 9.
|
||||
|
||||
For incremental compression, use a BZ2Compressor object instead.
|
||||
"""
|
||||
comp = BZ2Compressor(compresslevel)
|
||||
return comp.compress(data) + comp.flush()
|
||||
|
||||
|
||||
def decompress(data):
|
||||
"""Decompress a block of data.
|
||||
|
||||
For incremental decompression, use a BZ2Decompressor object instead.
|
||||
"""
|
||||
results = []
|
||||
while data:
|
||||
decomp = BZ2Decompressor()
|
||||
try:
|
||||
res = decomp.decompress(data)
|
||||
except OSError:
|
||||
if results:
|
||||
break # Leftover data is not a valid bzip2 stream; ignore it.
|
||||
else:
|
||||
raise # Error on the first iteration; bail out.
|
||||
results.append(res)
|
||||
if not decomp.eof:
|
||||
raise ValueError("Compressed data ended before the "
|
||||
"end-of-stream marker was reached")
|
||||
data = decomp.unused_data
|
||||
return b"".join(results)
|
161
third_party/python/Lib/cProfile.py
vendored
Executable file
161
third_party/python/Lib/cProfile.py
vendored
Executable file
|
@ -0,0 +1,161 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
"""Python interface for the 'lsprof' profiler.
|
||||
Compatible with the 'profile' module.
|
||||
"""
|
||||
|
||||
__all__ = ["run", "runctx", "Profile"]
|
||||
|
||||
import _lsprof
|
||||
import profile as _pyprofile
|
||||
|
||||
# ____________________________________________________________
|
||||
# Simple interface
|
||||
|
||||
def run(statement, filename=None, sort=-1):
|
||||
return _pyprofile._Utils(Profile).run(statement, filename, sort)
|
||||
|
||||
def runctx(statement, globals, locals, filename=None, sort=-1):
|
||||
return _pyprofile._Utils(Profile).runctx(statement, globals, locals,
|
||||
filename, sort)
|
||||
|
||||
run.__doc__ = _pyprofile.run.__doc__
|
||||
runctx.__doc__ = _pyprofile.runctx.__doc__
|
||||
|
||||
# ____________________________________________________________
|
||||
|
||||
class Profile(_lsprof.Profiler):
|
||||
"""Profile(timer=None, timeunit=None, subcalls=True, builtins=True)
|
||||
|
||||
Builds a profiler object using the specified timer function.
|
||||
The default timer is a fast built-in one based on real time.
|
||||
For custom timer functions returning integers, timeunit can
|
||||
be a float specifying a scale (i.e. how long each integer unit
|
||||
is, in seconds).
|
||||
"""
|
||||
|
||||
# Most of the functionality is in the base class.
|
||||
# This subclass only adds convenient and backward-compatible methods.
|
||||
|
||||
def print_stats(self, sort=-1):
|
||||
import pstats
|
||||
pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
|
||||
|
||||
def dump_stats(self, file):
|
||||
import marshal
|
||||
with open(file, 'wb') as f:
|
||||
self.create_stats()
|
||||
marshal.dump(self.stats, f)
|
||||
|
||||
def create_stats(self):
|
||||
self.disable()
|
||||
self.snapshot_stats()
|
||||
|
||||
def snapshot_stats(self):
|
||||
entries = self.getstats()
|
||||
self.stats = {}
|
||||
callersdicts = {}
|
||||
# call information
|
||||
for entry in entries:
|
||||
func = label(entry.code)
|
||||
nc = entry.callcount # ncalls column of pstats (before '/')
|
||||
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
|
||||
tt = entry.inlinetime # tottime column of pstats
|
||||
ct = entry.totaltime # cumtime column of pstats
|
||||
callers = {}
|
||||
callersdicts[id(entry.code)] = callers
|
||||
self.stats[func] = cc, nc, tt, ct, callers
|
||||
# subcall information
|
||||
for entry in entries:
|
||||
if entry.calls:
|
||||
func = label(entry.code)
|
||||
for subentry in entry.calls:
|
||||
try:
|
||||
callers = callersdicts[id(subentry.code)]
|
||||
except KeyError:
|
||||
continue
|
||||
nc = subentry.callcount
|
||||
cc = nc - subentry.reccallcount
|
||||
tt = subentry.inlinetime
|
||||
ct = subentry.totaltime
|
||||
if func in callers:
|
||||
prev = callers[func]
|
||||
nc += prev[0]
|
||||
cc += prev[1]
|
||||
tt += prev[2]
|
||||
ct += prev[3]
|
||||
callers[func] = nc, cc, tt, ct
|
||||
|
||||
# The following two methods can be called by clients to use
|
||||
# a profiler to profile a statement, given as a string.
|
||||
|
||||
def run(self, cmd):
|
||||
import __main__
|
||||
dict = __main__.__dict__
|
||||
return self.runctx(cmd, dict, dict)
|
||||
|
||||
def runctx(self, cmd, globals, locals):
|
||||
self.enable()
|
||||
try:
|
||||
exec(cmd, globals, locals)
|
||||
finally:
|
||||
self.disable()
|
||||
return self
|
||||
|
||||
# This method is more useful to profile a single function call.
|
||||
def runcall(self, func, *args, **kw):
|
||||
self.enable()
|
||||
try:
|
||||
return func(*args, **kw)
|
||||
finally:
|
||||
self.disable()
|
||||
|
||||
# ____________________________________________________________
|
||||
|
||||
def label(code):
|
||||
if isinstance(code, str):
|
||||
return ('~', 0, code) # built-in functions ('~' sorts at the end)
|
||||
else:
|
||||
return (code.co_filename, code.co_firstlineno, code.co_name)
|
||||
|
||||
# ____________________________________________________________
|
||||
|
||||
def main():
|
||||
import os, sys, pstats
|
||||
from optparse import OptionParser
|
||||
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.allow_interspersed_args = False
|
||||
parser.add_option('-o', '--outfile', dest="outfile",
|
||||
help="Save stats to <outfile>", default=None)
|
||||
parser.add_option('-s', '--sort', dest="sort",
|
||||
help="Sort order when printing to stdout, based on pstats.Stats class",
|
||||
default=-1,
|
||||
choices=sorted(pstats.Stats.sort_arg_dict_default))
|
||||
|
||||
if not sys.argv[1:]:
|
||||
parser.print_usage()
|
||||
sys.exit(2)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
sys.argv[:] = args
|
||||
|
||||
if len(args) > 0:
|
||||
progname = args[0]
|
||||
sys.path.insert(0, os.path.dirname(progname))
|
||||
with open(progname, 'rb') as fp:
|
||||
code = compile(fp.read(), progname, 'exec')
|
||||
globs = {
|
||||
'__file__': progname,
|
||||
'__name__': '__main__',
|
||||
'__package__': None,
|
||||
'__cached__': None,
|
||||
}
|
||||
runctx(code, globs, None, options.outfile, options.sort)
|
||||
else:
|
||||
parser.print_usage()
|
||||
return parser
|
||||
|
||||
# When invoked as main program, invoke the profiler on a script
|
||||
if __name__ == '__main__':
|
||||
main()
|
713
third_party/python/Lib/calendar.py
vendored
Normal file
713
third_party/python/Lib/calendar.py
vendored
Normal file
|
@ -0,0 +1,713 @@
|
|||
"""Calendar printing functions
|
||||
|
||||
Note when comparing these calendars to the ones printed by cal(1): By
|
||||
default, these calendars have Monday as the first day of the week, and
|
||||
Sunday as the last (the European convention). Use setfirstweekday() to
|
||||
set the first day of the week (0=Monday, 6=Sunday)."""
|
||||
|
||||
import sys
|
||||
import datetime
|
||||
import locale as _locale
|
||||
from itertools import repeat
|
||||
|
||||
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
|
||||
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
|
||||
"monthcalendar", "prmonth", "month", "prcal", "calendar",
|
||||
"timegm", "month_name", "month_abbr", "day_name", "day_abbr",
|
||||
"Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
|
||||
"LocaleHTMLCalendar", "weekheader"]
|
||||
|
||||
# Exception raised for bad input (with string parameter for details)
|
||||
error = ValueError
|
||||
|
||||
# Exceptions raised for bad input
|
||||
class IllegalMonthError(ValueError):
|
||||
def __init__(self, month):
|
||||
self.month = month
|
||||
def __str__(self):
|
||||
return "bad month number %r; must be 1-12" % self.month
|
||||
|
||||
|
||||
class IllegalWeekdayError(ValueError):
|
||||
def __init__(self, weekday):
|
||||
self.weekday = weekday
|
||||
def __str__(self):
|
||||
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
|
||||
|
||||
|
||||
# Constants for months referenced later
|
||||
January = 1
|
||||
February = 2
|
||||
|
||||
# Number of days per month (except for February in leap years)
|
||||
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
|
||||
|
||||
# This module used to have hard-coded lists of day and month names, as
|
||||
# English strings. The classes following emulate a read-only version of
|
||||
# that, but supply localized names. Note that the values are computed
|
||||
# fresh on each call, in case the user changes locale between calls.
|
||||
|
||||
class _localized_month:
|
||||
|
||||
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
|
||||
_months.insert(0, lambda x: "")
|
||||
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
|
||||
def __getitem__(self, i):
|
||||
funcs = self._months[i]
|
||||
if isinstance(i, slice):
|
||||
return [f(self.format) for f in funcs]
|
||||
else:
|
||||
return funcs(self.format)
|
||||
|
||||
def __len__(self):
|
||||
return 13
|
||||
|
||||
|
||||
class _localized_day:
|
||||
|
||||
# January 1, 2001, was a Monday.
|
||||
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
|
||||
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
|
||||
def __getitem__(self, i):
|
||||
funcs = self._days[i]
|
||||
if isinstance(i, slice):
|
||||
return [f(self.format) for f in funcs]
|
||||
else:
|
||||
return funcs(self.format)
|
||||
|
||||
def __len__(self):
|
||||
return 7
|
||||
|
||||
|
||||
# Full and abbreviated names of weekdays
|
||||
day_name = _localized_day('%A')
|
||||
day_abbr = _localized_day('%a')
|
||||
|
||||
# Full and abbreviated names of months (1-based arrays!!!)
|
||||
month_name = _localized_month('%B')
|
||||
month_abbr = _localized_month('%b')
|
||||
|
||||
# Constants for weekdays
|
||||
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
|
||||
|
||||
|
||||
def isleap(year):
|
||||
"""Return True for leap years, False for non-leap years."""
|
||||
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
|
||||
|
||||
|
||||
def leapdays(y1, y2):
|
||||
"""Return number of leap years in range [y1, y2).
|
||||
Assume y1 <= y2."""
|
||||
y1 -= 1
|
||||
y2 -= 1
|
||||
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
|
||||
|
||||
|
||||
def weekday(year, month, day):
|
||||
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
|
||||
day (1-31)."""
|
||||
return datetime.date(year, month, day).weekday()
|
||||
|
||||
|
||||
def monthrange(year, month):
|
||||
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
|
||||
year, month."""
|
||||
if not 1 <= month <= 12:
|
||||
raise IllegalMonthError(month)
|
||||
day1 = weekday(year, month, 1)
|
||||
ndays = mdays[month] + (month == February and isleap(year))
|
||||
return day1, ndays
|
||||
|
||||
|
||||
class Calendar(object):
|
||||
"""
|
||||
Base calendar class. This class doesn't do any formatting. It simply
|
||||
provides data to subclasses.
|
||||
"""
|
||||
|
||||
def __init__(self, firstweekday=0):
|
||||
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
|
||||
|
||||
def getfirstweekday(self):
|
||||
return self._firstweekday % 7
|
||||
|
||||
def setfirstweekday(self, firstweekday):
|
||||
self._firstweekday = firstweekday
|
||||
|
||||
firstweekday = property(getfirstweekday, setfirstweekday)
|
||||
|
||||
def iterweekdays(self):
|
||||
"""
|
||||
Return an iterator for one week of weekday numbers starting with the
|
||||
configured first one.
|
||||
"""
|
||||
for i in range(self.firstweekday, self.firstweekday + 7):
|
||||
yield i%7
|
||||
|
||||
def itermonthdates(self, year, month):
|
||||
"""
|
||||
Return an iterator for one month. The iterator will yield datetime.date
|
||||
values and will always iterate through complete weeks, so it will yield
|
||||
dates outside the specified month.
|
||||
"""
|
||||
date = datetime.date(year, month, 1)
|
||||
# Go back to the beginning of the week
|
||||
days = (date.weekday() - self.firstweekday) % 7
|
||||
date -= datetime.timedelta(days=days)
|
||||
oneday = datetime.timedelta(days=1)
|
||||
while True:
|
||||
yield date
|
||||
try:
|
||||
date += oneday
|
||||
except OverflowError:
|
||||
# Adding one day could fail after datetime.MAXYEAR
|
||||
break
|
||||
if date.month != month and date.weekday() == self.firstweekday:
|
||||
break
|
||||
|
||||
def itermonthdays2(self, year, month):
|
||||
"""
|
||||
Like itermonthdates(), but will yield (day number, weekday number)
|
||||
tuples. For days outside the specified month the day number is 0.
|
||||
"""
|
||||
for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):
|
||||
yield d, i % 7
|
||||
|
||||
def itermonthdays(self, year, month):
|
||||
"""
|
||||
Like itermonthdates(), but will yield day numbers. For days outside
|
||||
the specified month the day number is 0.
|
||||
"""
|
||||
day1, ndays = monthrange(year, month)
|
||||
days_before = (day1 - self.firstweekday) % 7
|
||||
yield from repeat(0, days_before)
|
||||
yield from range(1, ndays + 1)
|
||||
days_after = (self.firstweekday - day1 - ndays) % 7
|
||||
yield from repeat(0, days_after)
|
||||
|
||||
def monthdatescalendar(self, year, month):
|
||||
"""
|
||||
Return a matrix (list of lists) representing a month's calendar.
|
||||
Each row represents a week; week entries are datetime.date values.
|
||||
"""
|
||||
dates = list(self.itermonthdates(year, month))
|
||||
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
|
||||
|
||||
def monthdays2calendar(self, year, month):
|
||||
"""
|
||||
Return a matrix representing a month's calendar.
|
||||
Each row represents a week; week entries are
|
||||
(day number, weekday number) tuples. Day numbers outside this month
|
||||
are zero.
|
||||
"""
|
||||
days = list(self.itermonthdays2(year, month))
|
||||
return [ days[i:i+7] for i in range(0, len(days), 7) ]
|
||||
|
||||
def monthdayscalendar(self, year, month):
|
||||
"""
|
||||
Return a matrix representing a month's calendar.
|
||||
Each row represents a week; days outside this month are zero.
|
||||
"""
|
||||
days = list(self.itermonthdays(year, month))
|
||||
return [ days[i:i+7] for i in range(0, len(days), 7) ]
|
||||
|
||||
def yeardatescalendar(self, year, width=3):
|
||||
"""
|
||||
Return the data for the specified year ready for formatting. The return
|
||||
value is a list of month rows. Each month row contains up to width months.
|
||||
Each month contains between 4 and 6 weeks and each week contains 1-7
|
||||
days. Days are datetime.date objects.
|
||||
"""
|
||||
months = [
|
||||
self.monthdatescalendar(year, i)
|
||||
for i in range(January, January+12)
|
||||
]
|
||||
return [months[i:i+width] for i in range(0, len(months), width) ]
|
||||
|
||||
def yeardays2calendar(self, year, width=3):
|
||||
"""
|
||||
Return the data for the specified year ready for formatting (similar to
|
||||
yeardatescalendar()). Entries in the week lists are
|
||||
(day number, weekday number) tuples. Day numbers outside this month are
|
||||
zero.
|
||||
"""
|
||||
months = [
|
||||
self.monthdays2calendar(year, i)
|
||||
for i in range(January, January+12)
|
||||
]
|
||||
return [months[i:i+width] for i in range(0, len(months), width) ]
|
||||
|
||||
def yeardayscalendar(self, year, width=3):
|
||||
"""
|
||||
Return the data for the specified year ready for formatting (similar to
|
||||
yeardatescalendar()). Entries in the week lists are day numbers.
|
||||
Day numbers outside this month are zero.
|
||||
"""
|
||||
months = [
|
||||
self.monthdayscalendar(year, i)
|
||||
for i in range(January, January+12)
|
||||
]
|
||||
return [months[i:i+width] for i in range(0, len(months), width) ]
|
||||
|
||||
|
||||
class TextCalendar(Calendar):
|
||||
"""
|
||||
Subclass of Calendar that outputs a calendar as a simple plain text
|
||||
similar to the UNIX program cal.
|
||||
"""
|
||||
|
||||
def prweek(self, theweek, width):
|
||||
"""
|
||||
Print a single week (no newline).
|
||||
"""
|
||||
print(self.formatweek(theweek, width), end=' ')
|
||||
|
||||
def formatday(self, day, weekday, width):
|
||||
"""
|
||||
Returns a formatted day.
|
||||
"""
|
||||
if day == 0:
|
||||
s = ''
|
||||
else:
|
||||
s = '%2i' % day # right-align single-digit days
|
||||
return s.center(width)
|
||||
|
||||
def formatweek(self, theweek, width):
|
||||
"""
|
||||
Returns a single week in a string (no newline).
|
||||
"""
|
||||
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
|
||||
|
||||
def formatweekday(self, day, width):
|
||||
"""
|
||||
Returns a formatted week day name.
|
||||
"""
|
||||
if width >= 9:
|
||||
names = day_name
|
||||
else:
|
||||
names = day_abbr
|
||||
return names[day][:width].center(width)
|
||||
|
||||
def formatweekheader(self, width):
|
||||
"""
|
||||
Return a header for a week.
|
||||
"""
|
||||
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
|
||||
|
||||
def formatmonthname(self, theyear, themonth, width, withyear=True):
|
||||
"""
|
||||
Return a formatted month name.
|
||||
"""
|
||||
s = month_name[themonth]
|
||||
if withyear:
|
||||
s = "%s %r" % (s, theyear)
|
||||
return s.center(width)
|
||||
|
||||
def prmonth(self, theyear, themonth, w=0, l=0):
|
||||
"""
|
||||
Print a month's calendar.
|
||||
"""
|
||||
print(self.formatmonth(theyear, themonth, w, l), end='')
|
||||
|
||||
def formatmonth(self, theyear, themonth, w=0, l=0):
|
||||
"""
|
||||
Return a month's calendar string (multi-line).
|
||||
"""
|
||||
w = max(2, w)
|
||||
l = max(1, l)
|
||||
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
|
||||
s = s.rstrip()
|
||||
s += '\n' * l
|
||||
s += self.formatweekheader(w).rstrip()
|
||||
s += '\n' * l
|
||||
for week in self.monthdays2calendar(theyear, themonth):
|
||||
s += self.formatweek(week, w).rstrip()
|
||||
s += '\n' * l
|
||||
return s
|
||||
|
||||
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
|
||||
"""
|
||||
Returns a year's calendar as a multi-line string.
|
||||
"""
|
||||
w = max(2, w)
|
||||
l = max(1, l)
|
||||
c = max(2, c)
|
||||
colwidth = (w + 1) * 7 - 1
|
||||
v = []
|
||||
a = v.append
|
||||
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
|
||||
a('\n'*l)
|
||||
header = self.formatweekheader(w)
|
||||
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
|
||||
# months in this row
|
||||
months = range(m*i+1, min(m*(i+1)+1, 13))
|
||||
a('\n'*l)
|
||||
names = (self.formatmonthname(theyear, k, colwidth, False)
|
||||
for k in months)
|
||||
a(formatstring(names, colwidth, c).rstrip())
|
||||
a('\n'*l)
|
||||
headers = (header for k in months)
|
||||
a(formatstring(headers, colwidth, c).rstrip())
|
||||
a('\n'*l)
|
||||
# max number of weeks for this row
|
||||
height = max(len(cal) for cal in row)
|
||||
for j in range(height):
|
||||
weeks = []
|
||||
for cal in row:
|
||||
if j >= len(cal):
|
||||
weeks.append('')
|
||||
else:
|
||||
weeks.append(self.formatweek(cal[j], w))
|
||||
a(formatstring(weeks, colwidth, c).rstrip())
|
||||
a('\n' * l)
|
||||
return ''.join(v)
|
||||
|
||||
def pryear(self, theyear, w=0, l=0, c=6, m=3):
|
||||
"""Print a year's calendar."""
|
||||
print(self.formatyear(theyear, w, l, c, m))
|
||||
|
||||
|
||||
class HTMLCalendar(Calendar):
|
||||
"""
|
||||
This calendar returns complete HTML pages.
|
||||
"""
|
||||
|
||||
# CSS classes for the day <td>s
|
||||
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
|
||||
|
||||
def formatday(self, day, weekday):
|
||||
"""
|
||||
Return a day as a table cell.
|
||||
"""
|
||||
if day == 0:
|
||||
return '<td class="noday"> </td>' # day outside month
|
||||
else:
|
||||
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
|
||||
|
||||
def formatweek(self, theweek):
|
||||
"""
|
||||
Return a complete week as a table row.
|
||||
"""
|
||||
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
|
||||
return '<tr>%s</tr>' % s
|
||||
|
||||
def formatweekday(self, day):
|
||||
"""
|
||||
Return a weekday name as a table header.
|
||||
"""
|
||||
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
|
||||
|
||||
def formatweekheader(self):
|
||||
"""
|
||||
Return a header for a week as a table row.
|
||||
"""
|
||||
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
|
||||
return '<tr>%s</tr>' % s
|
||||
|
||||
def formatmonthname(self, theyear, themonth, withyear=True):
|
||||
"""
|
||||
Return a month name as a table row.
|
||||
"""
|
||||
if withyear:
|
||||
s = '%s %s' % (month_name[themonth], theyear)
|
||||
else:
|
||||
s = '%s' % month_name[themonth]
|
||||
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
|
||||
|
||||
def formatmonth(self, theyear, themonth, withyear=True):
|
||||
"""
|
||||
Return a formatted month as a table.
|
||||
"""
|
||||
v = []
|
||||
a = v.append
|
||||
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
|
||||
a('\n')
|
||||
a(self.formatmonthname(theyear, themonth, withyear=withyear))
|
||||
a('\n')
|
||||
a(self.formatweekheader())
|
||||
a('\n')
|
||||
for week in self.monthdays2calendar(theyear, themonth):
|
||||
a(self.formatweek(week))
|
||||
a('\n')
|
||||
a('</table>')
|
||||
a('\n')
|
||||
return ''.join(v)
|
||||
|
||||
def formatyear(self, theyear, width=3):
|
||||
"""
|
||||
Return a formatted year as a table of tables.
|
||||
"""
|
||||
v = []
|
||||
a = v.append
|
||||
width = max(width, 1)
|
||||
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
|
||||
a('\n')
|
||||
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
|
||||
for i in range(January, January+12, width):
|
||||
# months in this row
|
||||
months = range(i, min(i+width, 13))
|
||||
a('<tr>')
|
||||
for m in months:
|
||||
a('<td>')
|
||||
a(self.formatmonth(theyear, m, withyear=False))
|
||||
a('</td>')
|
||||
a('</tr>')
|
||||
a('</table>')
|
||||
return ''.join(v)
|
||||
|
||||
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
|
||||
"""
|
||||
Return a formatted year as a complete HTML page.
|
||||
"""
|
||||
if encoding is None:
|
||||
encoding = sys.getdefaultencoding()
|
||||
v = []
|
||||
a = v.append
|
||||
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
|
||||
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
|
||||
a('<html>\n')
|
||||
a('<head>\n')
|
||||
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
|
||||
if css is not None:
|
||||
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
|
||||
a('<title>Calendar for %d</title>\n' % theyear)
|
||||
a('</head>\n')
|
||||
a('<body>\n')
|
||||
a(self.formatyear(theyear, width))
|
||||
a('</body>\n')
|
||||
a('</html>\n')
|
||||
return ''.join(v).encode(encoding, "xmlcharrefreplace")
|
||||
|
||||
|
||||
class different_locale:
|
||||
def __init__(self, locale):
|
||||
self.locale = locale
|
||||
|
||||
def __enter__(self):
|
||||
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
|
||||
_locale.setlocale(_locale.LC_TIME, self.locale)
|
||||
|
||||
def __exit__(self, *args):
|
||||
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
|
||||
|
||||
|
||||
class LocaleTextCalendar(TextCalendar):
|
||||
"""
|
||||
This class can be passed a locale name in the constructor and will return
|
||||
month and weekday names in the specified locale. If this locale includes
|
||||
an encoding all strings containing month and weekday names will be returned
|
||||
as unicode.
|
||||
"""
|
||||
|
||||
def __init__(self, firstweekday=0, locale=None):
|
||||
TextCalendar.__init__(self, firstweekday)
|
||||
if locale is None:
|
||||
locale = _locale.getdefaultlocale()
|
||||
self.locale = locale
|
||||
|
||||
def formatweekday(self, day, width):
|
||||
with different_locale(self.locale):
|
||||
if width >= 9:
|
||||
names = day_name
|
||||
else:
|
||||
names = day_abbr
|
||||
name = names[day]
|
||||
return name[:width].center(width)
|
||||
|
||||
def formatmonthname(self, theyear, themonth, width, withyear=True):
|
||||
with different_locale(self.locale):
|
||||
s = month_name[themonth]
|
||||
if withyear:
|
||||
s = "%s %r" % (s, theyear)
|
||||
return s.center(width)
|
||||
|
||||
|
||||
class LocaleHTMLCalendar(HTMLCalendar):
|
||||
"""
|
||||
This class can be passed a locale name in the constructor and will return
|
||||
month and weekday names in the specified locale. If this locale includes
|
||||
an encoding all strings containing month and weekday names will be returned
|
||||
as unicode.
|
||||
"""
|
||||
def __init__(self, firstweekday=0, locale=None):
|
||||
HTMLCalendar.__init__(self, firstweekday)
|
||||
if locale is None:
|
||||
locale = _locale.getdefaultlocale()
|
||||
self.locale = locale
|
||||
|
||||
def formatweekday(self, day):
|
||||
with different_locale(self.locale):
|
||||
s = day_abbr[day]
|
||||
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
|
||||
|
||||
def formatmonthname(self, theyear, themonth, withyear=True):
|
||||
with different_locale(self.locale):
|
||||
s = month_name[themonth]
|
||||
if withyear:
|
||||
s = '%s %s' % (s, theyear)
|
||||
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
|
||||
|
||||
|
||||
# Support for old module level interface
|
||||
c = TextCalendar()
|
||||
|
||||
firstweekday = c.getfirstweekday
|
||||
|
||||
def setfirstweekday(firstweekday):
|
||||
if not MONDAY <= firstweekday <= SUNDAY:
|
||||
raise IllegalWeekdayError(firstweekday)
|
||||
c.firstweekday = firstweekday
|
||||
|
||||
monthcalendar = c.monthdayscalendar
|
||||
prweek = c.prweek
|
||||
week = c.formatweek
|
||||
weekheader = c.formatweekheader
|
||||
prmonth = c.prmonth
|
||||
month = c.formatmonth
|
||||
calendar = c.formatyear
|
||||
prcal = c.pryear
|
||||
|
||||
|
||||
# Spacing of month columns for multi-column year calendar
|
||||
_colwidth = 7*3 - 1 # Amount printed by prweek()
|
||||
_spacing = 6 # Number of spaces between columns
|
||||
|
||||
|
||||
def format(cols, colwidth=_colwidth, spacing=_spacing):
|
||||
"""Prints multi-column formatting for year calendars"""
|
||||
print(formatstring(cols, colwidth, spacing))
|
||||
|
||||
|
||||
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
|
||||
"""Returns a string formatted from n strings, centered within n columns."""
|
||||
spacing *= ' '
|
||||
return spacing.join(c.center(colwidth) for c in cols)
|
||||
|
||||
|
||||
EPOCH = 1970
|
||||
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
|
||||
|
||||
|
||||
def timegm(tuple):
|
||||
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
|
||||
year, month, day, hour, minute, second = tuple[:6]
|
||||
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
|
||||
hours = days*24 + hour
|
||||
minutes = hours*60 + minute
|
||||
seconds = minutes*60 + second
|
||||
return seconds
|
||||
|
||||
|
||||
def main(args):
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
textgroup = parser.add_argument_group('text only arguments')
|
||||
htmlgroup = parser.add_argument_group('html only arguments')
|
||||
textgroup.add_argument(
|
||||
"-w", "--width",
|
||||
type=int, default=2,
|
||||
help="width of date column (default 2)"
|
||||
)
|
||||
textgroup.add_argument(
|
||||
"-l", "--lines",
|
||||
type=int, default=1,
|
||||
help="number of lines for each week (default 1)"
|
||||
)
|
||||
textgroup.add_argument(
|
||||
"-s", "--spacing",
|
||||
type=int, default=6,
|
||||
help="spacing between months (default 6)"
|
||||
)
|
||||
textgroup.add_argument(
|
||||
"-m", "--months",
|
||||
type=int, default=3,
|
||||
help="months per row (default 3)"
|
||||
)
|
||||
htmlgroup.add_argument(
|
||||
"-c", "--css",
|
||||
default="calendar.css",
|
||||
help="CSS to use for page"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-L", "--locale",
|
||||
default=None,
|
||||
help="locale to be used from month and weekday names"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-e", "--encoding",
|
||||
default=None,
|
||||
help="encoding to use for output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t", "--type",
|
||||
default="text",
|
||||
choices=("text", "html"),
|
||||
help="output type (text or html)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"year",
|
||||
nargs='?', type=int,
|
||||
help="year number (1-9999)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"month",
|
||||
nargs='?', type=int,
|
||||
help="month number (1-12, text only)"
|
||||
)
|
||||
|
||||
options = parser.parse_args(args[1:])
|
||||
|
||||
if options.locale and not options.encoding:
|
||||
parser.error("if --locale is specified --encoding is required")
|
||||
sys.exit(1)
|
||||
|
||||
locale = options.locale, options.encoding
|
||||
|
||||
if options.type == "html":
|
||||
if options.locale:
|
||||
cal = LocaleHTMLCalendar(locale=locale)
|
||||
else:
|
||||
cal = HTMLCalendar()
|
||||
encoding = options.encoding
|
||||
if encoding is None:
|
||||
encoding = sys.getdefaultencoding()
|
||||
optdict = dict(encoding=encoding, css=options.css)
|
||||
write = sys.stdout.buffer.write
|
||||
if options.year is None:
|
||||
write(cal.formatyearpage(datetime.date.today().year, **optdict))
|
||||
elif options.month is None:
|
||||
write(cal.formatyearpage(options.year, **optdict))
|
||||
else:
|
||||
parser.error("incorrect number of arguments")
|
||||
sys.exit(1)
|
||||
else:
|
||||
if options.locale:
|
||||
cal = LocaleTextCalendar(locale=locale)
|
||||
else:
|
||||
cal = TextCalendar()
|
||||
optdict = dict(w=options.width, l=options.lines)
|
||||
if options.month is None:
|
||||
optdict["c"] = options.spacing
|
||||
optdict["m"] = options.months
|
||||
if options.year is None:
|
||||
result = cal.formatyear(datetime.date.today().year, **optdict)
|
||||
elif options.month is None:
|
||||
result = cal.formatyear(options.year, **optdict)
|
||||
else:
|
||||
result = cal.formatmonth(options.year, options.month, **optdict)
|
||||
write = sys.stdout.write
|
||||
if options.encoding:
|
||||
result = result.encode(options.encoding)
|
||||
write = sys.stdout.buffer.write
|
||||
write(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
1099
third_party/python/Lib/cgi.py
vendored
Executable file
1099
third_party/python/Lib/cgi.py
vendored
Executable file
File diff suppressed because it is too large
Load diff
319
third_party/python/Lib/cgitb.py
vendored
Normal file
319
third_party/python/Lib/cgitb.py
vendored
Normal file
|
@ -0,0 +1,319 @@
|
|||
"""More comprehensive traceback formatting for Python scripts.
|
||||
|
||||
To enable this module, do:
|
||||
|
||||
import cgitb; cgitb.enable()
|
||||
|
||||
at the top of your script. The optional arguments to enable() are:
|
||||
|
||||
display - if true, tracebacks are displayed in the web browser
|
||||
logdir - if set, tracebacks are written to files in this directory
|
||||
context - number of lines of source code to show for each stack frame
|
||||
format - 'text' or 'html' controls the output format
|
||||
|
||||
By default, tracebacks are displayed but not saved, the context is 5 lines
|
||||
and the output format is 'html' (for backwards compatibility with the
|
||||
original use of this module)
|
||||
|
||||
Alternatively, if you have caught an exception and want cgitb to display it
|
||||
for you, call cgitb.handler(). The optional argument to handler() is a
|
||||
3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
|
||||
The default handler displays output as HTML.
|
||||
|
||||
"""
|
||||
import inspect
|
||||
import keyword
|
||||
import linecache
|
||||
import os
|
||||
import pydoc
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import tokenize
|
||||
import traceback
|
||||
|
||||
def reset():
|
||||
"""Return a string that resets the CGI and browser to a known state."""
|
||||
return '''<!--: spam
|
||||
Content-Type: text/html
|
||||
|
||||
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
|
||||
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
|
||||
</font> </font> </font> </script> </object> </blockquote> </pre>
|
||||
</table> </table> </table> </table> </table> </font> </font> </font>'''
|
||||
|
||||
__UNDEF__ = [] # a special sentinel object
|
||||
def small(text):
|
||||
if text:
|
||||
return '<small>' + text + '</small>'
|
||||
else:
|
||||
return ''
|
||||
|
||||
def strong(text):
|
||||
if text:
|
||||
return '<strong>' + text + '</strong>'
|
||||
else:
|
||||
return ''
|
||||
|
||||
def grey(text):
|
||||
if text:
|
||||
return '<font color="#909090">' + text + '</font>'
|
||||
else:
|
||||
return ''
|
||||
|
||||
def lookup(name, frame, locals):
|
||||
"""Find the value for a given name in the given environment."""
|
||||
if name in locals:
|
||||
return 'local', locals[name]
|
||||
if name in frame.f_globals:
|
||||
return 'global', frame.f_globals[name]
|
||||
if '__builtins__' in frame.f_globals:
|
||||
builtins = frame.f_globals['__builtins__']
|
||||
if type(builtins) is type({}):
|
||||
if name in builtins:
|
||||
return 'builtin', builtins[name]
|
||||
else:
|
||||
if hasattr(builtins, name):
|
||||
return 'builtin', getattr(builtins, name)
|
||||
return None, __UNDEF__
|
||||
|
||||
def scanvars(reader, frame, locals):
|
||||
"""Scan one logical line of Python and look up values of variables used."""
|
||||
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
|
||||
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
|
||||
if ttype == tokenize.NEWLINE: break
|
||||
if ttype == tokenize.NAME and token not in keyword.kwlist:
|
||||
if lasttoken == '.':
|
||||
if parent is not __UNDEF__:
|
||||
value = getattr(parent, token, __UNDEF__)
|
||||
vars.append((prefix + token, prefix, value))
|
||||
else:
|
||||
where, value = lookup(token, frame, locals)
|
||||
vars.append((token, where, value))
|
||||
elif token == '.':
|
||||
prefix += lasttoken + '.'
|
||||
parent = value
|
||||
else:
|
||||
parent, prefix = None, ''
|
||||
lasttoken = token
|
||||
return vars
|
||||
|
||||
def html(einfo, context=5):
|
||||
"""Return a nice HTML document describing a given traceback."""
|
||||
etype, evalue, etb = einfo
|
||||
if isinstance(etype, type):
|
||||
etype = etype.__name__
|
||||
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
|
||||
date = time.ctime(time.time())
|
||||
head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
|
||||
'<big><big>%s</big></big>' %
|
||||
strong(pydoc.html.escape(str(etype))),
|
||||
'#ffffff', '#6622aa', pyver + '<br>' + date) + '''
|
||||
<p>A problem occurred in a Python script. Here is the sequence of
|
||||
function calls leading up to the error, in the order they occurred.</p>'''
|
||||
|
||||
indent = '<tt>' + small(' ' * 5) + ' </tt>'
|
||||
frames = []
|
||||
records = inspect.getinnerframes(etb, context)
|
||||
for frame, file, lnum, func, lines, index in records:
|
||||
if file:
|
||||
file = os.path.abspath(file)
|
||||
link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
|
||||
else:
|
||||
file = link = '?'
|
||||
args, varargs, varkw, locals = inspect.getargvalues(frame)
|
||||
call = ''
|
||||
if func != '?':
|
||||
call = 'in ' + strong(pydoc.html.escape(func)) + \
|
||||
inspect.formatargvalues(args, varargs, varkw, locals,
|
||||
formatvalue=lambda value: '=' + pydoc.html.repr(value))
|
||||
|
||||
highlight = {}
|
||||
def reader(lnum=[lnum]):
|
||||
highlight[lnum[0]] = 1
|
||||
try: return linecache.getline(file, lnum[0])
|
||||
finally: lnum[0] += 1
|
||||
vars = scanvars(reader, frame, locals)
|
||||
|
||||
rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
|
||||
('<big> </big>', link, call)]
|
||||
if index is not None:
|
||||
i = lnum - index
|
||||
for line in lines:
|
||||
num = small(' ' * (5-len(str(i))) + str(i)) + ' '
|
||||
if i in highlight:
|
||||
line = '<tt>=>%s%s</tt>' % (num, pydoc.html.preformat(line))
|
||||
rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
|
||||
else:
|
||||
line = '<tt> %s%s</tt>' % (num, pydoc.html.preformat(line))
|
||||
rows.append('<tr><td>%s</td></tr>' % grey(line))
|
||||
i += 1
|
||||
|
||||
done, dump = {}, []
|
||||
for name, where, value in vars:
|
||||
if name in done: continue
|
||||
done[name] = 1
|
||||
if value is not __UNDEF__:
|
||||
if where in ('global', 'builtin'):
|
||||
name = ('<em>%s</em> ' % where) + strong(name)
|
||||
elif where == 'local':
|
||||
name = strong(name)
|
||||
else:
|
||||
name = where + strong(name.split('.')[-1])
|
||||
dump.append('%s = %s' % (name, pydoc.html.repr(value)))
|
||||
else:
|
||||
dump.append(name + ' <em>undefined</em>')
|
||||
|
||||
rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
|
||||
frames.append('''
|
||||
<table width="100%%" cellspacing=0 cellpadding=0 border=0>
|
||||
%s</table>''' % '\n'.join(rows))
|
||||
|
||||
exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))),
|
||||
pydoc.html.escape(str(evalue)))]
|
||||
for name in dir(evalue):
|
||||
if name[:1] == '_': continue
|
||||
value = pydoc.html.repr(getattr(evalue, name))
|
||||
exception.append('\n<br>%s%s =\n%s' % (indent, name, value))
|
||||
|
||||
return head + ''.join(frames) + ''.join(exception) + '''
|
||||
|
||||
|
||||
<!-- The above is a description of an error in a Python program, formatted
|
||||
for a Web browser because the 'cgitb' module was enabled. In case you
|
||||
are not reading this in a Web browser, here is the original traceback:
|
||||
|
||||
%s
|
||||
-->
|
||||
''' % pydoc.html.escape(
|
||||
''.join(traceback.format_exception(etype, evalue, etb)))
|
||||
|
||||
def text(einfo, context=5):
|
||||
"""Return a plain text document describing a given traceback."""
|
||||
etype, evalue, etb = einfo
|
||||
if isinstance(etype, type):
|
||||
etype = etype.__name__
|
||||
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
|
||||
date = time.ctime(time.time())
|
||||
head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
|
||||
A problem occurred in a Python script. Here is the sequence of
|
||||
function calls leading up to the error, in the order they occurred.
|
||||
'''
|
||||
|
||||
frames = []
|
||||
records = inspect.getinnerframes(etb, context)
|
||||
for frame, file, lnum, func, lines, index in records:
|
||||
file = file and os.path.abspath(file) or '?'
|
||||
args, varargs, varkw, locals = inspect.getargvalues(frame)
|
||||
call = ''
|
||||
if func != '?':
|
||||
call = 'in ' + func + \
|
||||
inspect.formatargvalues(args, varargs, varkw, locals,
|
||||
formatvalue=lambda value: '=' + pydoc.text.repr(value))
|
||||
|
||||
highlight = {}
|
||||
def reader(lnum=[lnum]):
|
||||
highlight[lnum[0]] = 1
|
||||
try: return linecache.getline(file, lnum[0])
|
||||
finally: lnum[0] += 1
|
||||
vars = scanvars(reader, frame, locals)
|
||||
|
||||
rows = [' %s %s' % (file, call)]
|
||||
if index is not None:
|
||||
i = lnum - index
|
||||
for line in lines:
|
||||
num = '%5d ' % i
|
||||
rows.append(num+line.rstrip())
|
||||
i += 1
|
||||
|
||||
done, dump = {}, []
|
||||
for name, where, value in vars:
|
||||
if name in done: continue
|
||||
done[name] = 1
|
||||
if value is not __UNDEF__:
|
||||
if where == 'global': name = 'global ' + name
|
||||
elif where != 'local': name = where + name.split('.')[-1]
|
||||
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
|
||||
else:
|
||||
dump.append(name + ' undefined')
|
||||
|
||||
rows.append('\n'.join(dump))
|
||||
frames.append('\n%s\n' % '\n'.join(rows))
|
||||
|
||||
exception = ['%s: %s' % (str(etype), str(evalue))]
|
||||
for name in dir(evalue):
|
||||
value = pydoc.text.repr(getattr(evalue, name))
|
||||
exception.append('\n%s%s = %s' % (" "*4, name, value))
|
||||
|
||||
return head + ''.join(frames) + ''.join(exception) + '''
|
||||
|
||||
The above is a description of an error in a Python program. Here is
|
||||
the original traceback:
|
||||
|
||||
%s
|
||||
''' % ''.join(traceback.format_exception(etype, evalue, etb))
|
||||
|
||||
class Hook:
|
||||
"""A hook to replace sys.excepthook that shows tracebacks in HTML."""
|
||||
|
||||
def __init__(self, display=1, logdir=None, context=5, file=None,
|
||||
format="html"):
|
||||
self.display = display # send tracebacks to browser if true
|
||||
self.logdir = logdir # log tracebacks to files if not None
|
||||
self.context = context # number of source code lines per frame
|
||||
self.file = file or sys.stdout # place to send the output
|
||||
self.format = format
|
||||
|
||||
def __call__(self, etype, evalue, etb):
|
||||
self.handle((etype, evalue, etb))
|
||||
|
||||
def handle(self, info=None):
|
||||
info = info or sys.exc_info()
|
||||
if self.format == "html":
|
||||
self.file.write(reset())
|
||||
|
||||
formatter = (self.format=="html") and html or text
|
||||
plain = False
|
||||
try:
|
||||
doc = formatter(info, self.context)
|
||||
except: # just in case something goes wrong
|
||||
doc = ''.join(traceback.format_exception(*info))
|
||||
plain = True
|
||||
|
||||
if self.display:
|
||||
if plain:
|
||||
doc = pydoc.html.escape(doc)
|
||||
self.file.write('<pre>' + doc + '</pre>\n')
|
||||
else:
|
||||
self.file.write(doc + '\n')
|
||||
else:
|
||||
self.file.write('<p>A problem occurred in a Python script.\n')
|
||||
|
||||
if self.logdir is not None:
|
||||
suffix = ['.txt', '.html'][self.format=="html"]
|
||||
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
|
||||
|
||||
try:
|
||||
with os.fdopen(fd, 'w') as file:
|
||||
file.write(doc)
|
||||
msg = '%s contains the description of this error.' % path
|
||||
except:
|
||||
msg = 'Tried to save traceback to %s, but failed.' % path
|
||||
|
||||
if self.format == 'html':
|
||||
self.file.write('<p>%s</p>\n' % msg)
|
||||
else:
|
||||
self.file.write(msg + '\n')
|
||||
try:
|
||||
self.file.flush()
|
||||
except: pass
|
||||
|
||||
handler = Hook().handle
|
||||
def enable(display=1, logdir=None, context=5, format="html"):
|
||||
"""Install an exception handler that formats tracebacks as HTML.
|
||||
|
||||
The optional argument 'display' can be set to 0 to suppress sending the
|
||||
traceback to the browser, and 'logdir' can be set to a directory to cause
|
||||
tracebacks to be written to files there."""
|
||||
sys.excepthook = Hook(display=display, logdir=logdir,
|
||||
context=context, format=format)
|
169
third_party/python/Lib/chunk.py
vendored
Normal file
169
third_party/python/Lib/chunk.py
vendored
Normal file
|
@ -0,0 +1,169 @@
|
|||
"""Simple class to read IFF chunks.
|
||||
|
||||
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
|
||||
Format)) has the following structure:
|
||||
|
||||
+----------------+
|
||||
| ID (4 bytes) |
|
||||
+----------------+
|
||||
| size (4 bytes) |
|
||||
+----------------+
|
||||
| data |
|
||||
| ... |
|
||||
+----------------+
|
||||
|
||||
The ID is a 4-byte string which identifies the type of chunk.
|
||||
|
||||
The size field (a 32-bit value, encoded using big-endian byte order)
|
||||
gives the size of the whole chunk, including the 8-byte header.
|
||||
|
||||
Usually an IFF-type file consists of one or more chunks. The proposed
|
||||
usage of the Chunk class defined here is to instantiate an instance at
|
||||
the start of each chunk and read from the instance until it reaches
|
||||
the end, after which a new instance can be instantiated. At the end
|
||||
of the file, creating a new instance will fail with an EOFError
|
||||
exception.
|
||||
|
||||
Usage:
|
||||
while True:
|
||||
try:
|
||||
chunk = Chunk(file)
|
||||
except EOFError:
|
||||
break
|
||||
chunktype = chunk.getname()
|
||||
while True:
|
||||
data = chunk.read(nbytes)
|
||||
if not data:
|
||||
pass
|
||||
# do something with data
|
||||
|
||||
The interface is file-like. The implemented methods are:
|
||||
read, close, seek, tell, isatty.
|
||||
Extra methods are: skip() (called by close, skips to the end of the chunk),
|
||||
getname() (returns the name (ID) of the chunk)
|
||||
|
||||
The __init__ method has one required argument, a file-like object
|
||||
(including a chunk instance), and one optional argument, a flag which
|
||||
specifies whether or not chunks are aligned on 2-byte boundaries. The
|
||||
default is 1, i.e. aligned.
|
||||
"""
|
||||
|
||||
class Chunk:
|
||||
def __init__(self, file, align=True, bigendian=True, inclheader=False):
|
||||
import struct
|
||||
self.closed = False
|
||||
self.align = align # whether to align to word (2-byte) boundaries
|
||||
if bigendian:
|
||||
strflag = '>'
|
||||
else:
|
||||
strflag = '<'
|
||||
self.file = file
|
||||
self.chunkname = file.read(4)
|
||||
if len(self.chunkname) < 4:
|
||||
raise EOFError
|
||||
try:
|
||||
self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
if inclheader:
|
||||
self.chunksize = self.chunksize - 8 # subtract header
|
||||
self.size_read = 0
|
||||
try:
|
||||
self.offset = self.file.tell()
|
||||
except (AttributeError, OSError):
|
||||
self.seekable = False
|
||||
else:
|
||||
self.seekable = True
|
||||
|
||||
def getname(self):
|
||||
"""Return the name (ID) of the current chunk."""
|
||||
return self.chunkname
|
||||
|
||||
def getsize(self):
|
||||
"""Return the size of the current chunk."""
|
||||
return self.chunksize
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
try:
|
||||
self.skip()
|
||||
finally:
|
||||
self.closed = True
|
||||
|
||||
def isatty(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
return False
|
||||
|
||||
def seek(self, pos, whence=0):
|
||||
"""Seek to specified position into the chunk.
|
||||
Default position is 0 (start of chunk).
|
||||
If the file is not seekable, this will result in an error.
|
||||
"""
|
||||
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if not self.seekable:
|
||||
raise OSError("cannot seek")
|
||||
if whence == 1:
|
||||
pos = pos + self.size_read
|
||||
elif whence == 2:
|
||||
pos = pos + self.chunksize
|
||||
if pos < 0 or pos > self.chunksize:
|
||||
raise RuntimeError
|
||||
self.file.seek(self.offset + pos, 0)
|
||||
self.size_read = pos
|
||||
|
||||
def tell(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
return self.size_read
|
||||
|
||||
def read(self, size=-1):
|
||||
"""Read at most size bytes from the chunk.
|
||||
If size is omitted or negative, read until the end
|
||||
of the chunk.
|
||||
"""
|
||||
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if self.size_read >= self.chunksize:
|
||||
return b''
|
||||
if size < 0:
|
||||
size = self.chunksize - self.size_read
|
||||
if size > self.chunksize - self.size_read:
|
||||
size = self.chunksize - self.size_read
|
||||
data = self.file.read(size)
|
||||
self.size_read = self.size_read + len(data)
|
||||
if self.size_read == self.chunksize and \
|
||||
self.align and \
|
||||
(self.chunksize & 1):
|
||||
dummy = self.file.read(1)
|
||||
self.size_read = self.size_read + len(dummy)
|
||||
return data
|
||||
|
||||
def skip(self):
|
||||
"""Skip the rest of the chunk.
|
||||
If you are not interested in the contents of the chunk,
|
||||
this method should be called so that the file points to
|
||||
the start of the next chunk.
|
||||
"""
|
||||
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if self.seekable:
|
||||
try:
|
||||
n = self.chunksize - self.size_read
|
||||
# maybe fix alignment
|
||||
if self.align and (self.chunksize & 1):
|
||||
n = n + 1
|
||||
self.file.seek(n, 1)
|
||||
self.size_read = self.size_read + n
|
||||
return
|
||||
except OSError:
|
||||
pass
|
||||
while self.size_read < self.chunksize:
|
||||
n = min(8192, self.chunksize - self.size_read)
|
||||
dummy = self.read(n)
|
||||
if not dummy:
|
||||
raise EOFError
|
401
third_party/python/Lib/cmd.py
vendored
Normal file
401
third_party/python/Lib/cmd.py
vendored
Normal file
|
@ -0,0 +1,401 @@
|
|||
"""A generic class to build line-oriented command interpreters.
|
||||
|
||||
Interpreters constructed with this class obey the following conventions:
|
||||
|
||||
1. End of file on input is processed as the command 'EOF'.
|
||||
2. A command is parsed out of each line by collecting the prefix composed
|
||||
of characters in the identchars member.
|
||||
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
|
||||
is passed a single argument consisting of the remainder of the line.
|
||||
4. Typing an empty line repeats the last command. (Actually, it calls the
|
||||
method `emptyline', which may be overridden in a subclass.)
|
||||
5. There is a predefined `help' method. Given an argument `topic', it
|
||||
calls the command `help_topic'. With no arguments, it lists all topics
|
||||
with defined help_ functions, broken into up to three topics; documented
|
||||
commands, miscellaneous help topics, and undocumented commands.
|
||||
6. The command '?' is a synonym for `help'. The command '!' is a synonym
|
||||
for `shell', if a do_shell method exists.
|
||||
7. If completion is enabled, completing commands will be done automatically,
|
||||
and completing of commands args is done by calling complete_foo() with
|
||||
arguments text, line, begidx, endidx. text is string we are matching
|
||||
against, all returned matches must begin with it. line is the current
|
||||
input line (lstripped), begidx and endidx are the beginning and end
|
||||
indexes of the text being matched, which could be used to provide
|
||||
different completion depending upon which position the argument is in.
|
||||
|
||||
The `default' method may be overridden to intercept commands for which there
|
||||
is no do_ method.
|
||||
|
||||
The `completedefault' method may be overridden to intercept completions for
|
||||
commands that have no complete_ method.
|
||||
|
||||
The data member `self.ruler' sets the character used to draw separator lines
|
||||
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
|
||||
|
||||
If the value of `self.intro' is nonempty when the cmdloop method is called,
|
||||
it is printed out on interpreter startup. This value may be overridden
|
||||
via an optional argument to the cmdloop() method.
|
||||
|
||||
The data members `self.doc_header', `self.misc_header', and
|
||||
`self.undoc_header' set the headers used for the help function's
|
||||
listings of documented functions, miscellaneous topics, and undocumented
|
||||
functions respectively.
|
||||
"""
|
||||
|
||||
import string, sys
|
||||
|
||||
__all__ = ["Cmd"]
|
||||
|
||||
PROMPT = '(Cmd) '
|
||||
IDENTCHARS = string.ascii_letters + string.digits + '_'
|
||||
|
||||
class Cmd:
|
||||
"""A simple framework for writing line-oriented command interpreters.
|
||||
|
||||
These are often useful for test harnesses, administrative tools, and
|
||||
prototypes that will later be wrapped in a more sophisticated interface.
|
||||
|
||||
A Cmd instance or subclass instance is a line-oriented interpreter
|
||||
framework. There is no good reason to instantiate Cmd itself; rather,
|
||||
it's useful as a superclass of an interpreter class you define yourself
|
||||
in order to inherit Cmd's methods and encapsulate action methods.
|
||||
|
||||
"""
|
||||
prompt = PROMPT
|
||||
identchars = IDENTCHARS
|
||||
ruler = '='
|
||||
lastcmd = ''
|
||||
intro = None
|
||||
doc_leader = ""
|
||||
doc_header = "Documented commands (type help <topic>):"
|
||||
misc_header = "Miscellaneous help topics:"
|
||||
undoc_header = "Undocumented commands:"
|
||||
nohelp = "*** No help on %s"
|
||||
use_rawinput = 1
|
||||
|
||||
def __init__(self, completekey='tab', stdin=None, stdout=None):
|
||||
"""Instantiate a line-oriented interpreter framework.
|
||||
|
||||
The optional argument 'completekey' is the readline name of a
|
||||
completion key; it defaults to the Tab key. If completekey is
|
||||
not None and the readline module is available, command completion
|
||||
is done automatically. The optional arguments stdin and stdout
|
||||
specify alternate input and output file objects; if not specified,
|
||||
sys.stdin and sys.stdout are used.
|
||||
|
||||
"""
|
||||
if stdin is not None:
|
||||
self.stdin = stdin
|
||||
else:
|
||||
self.stdin = sys.stdin
|
||||
if stdout is not None:
|
||||
self.stdout = stdout
|
||||
else:
|
||||
self.stdout = sys.stdout
|
||||
self.cmdqueue = []
|
||||
self.completekey = completekey
|
||||
|
||||
def cmdloop(self, intro=None):
|
||||
"""Repeatedly issue a prompt, accept input, parse an initial prefix
|
||||
off the received input, and dispatch to action methods, passing them
|
||||
the remainder of the line as argument.
|
||||
|
||||
"""
|
||||
|
||||
self.preloop()
|
||||
if self.use_rawinput and self.completekey:
|
||||
try:
|
||||
import readline
|
||||
self.old_completer = readline.get_completer()
|
||||
readline.set_completer(self.complete)
|
||||
readline.parse_and_bind(self.completekey+": complete")
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
if intro is not None:
|
||||
self.intro = intro
|
||||
if self.intro:
|
||||
self.stdout.write(str(self.intro)+"\n")
|
||||
stop = None
|
||||
while not stop:
|
||||
if self.cmdqueue:
|
||||
line = self.cmdqueue.pop(0)
|
||||
else:
|
||||
if self.use_rawinput:
|
||||
try:
|
||||
line = input(self.prompt)
|
||||
except EOFError:
|
||||
line = 'EOF'
|
||||
else:
|
||||
self.stdout.write(self.prompt)
|
||||
self.stdout.flush()
|
||||
line = self.stdin.readline()
|
||||
if not len(line):
|
||||
line = 'EOF'
|
||||
else:
|
||||
line = line.rstrip('\r\n')
|
||||
line = self.precmd(line)
|
||||
stop = self.onecmd(line)
|
||||
stop = self.postcmd(stop, line)
|
||||
self.postloop()
|
||||
finally:
|
||||
if self.use_rawinput and self.completekey:
|
||||
try:
|
||||
import readline
|
||||
readline.set_completer(self.old_completer)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def precmd(self, line):
|
||||
"""Hook method executed just before the command line is
|
||||
interpreted, but after the input prompt is generated and issued.
|
||||
|
||||
"""
|
||||
return line
|
||||
|
||||
def postcmd(self, stop, line):
|
||||
"""Hook method executed just after a command dispatch is finished."""
|
||||
return stop
|
||||
|
||||
def preloop(self):
|
||||
"""Hook method executed once when the cmdloop() method is called."""
|
||||
pass
|
||||
|
||||
def postloop(self):
|
||||
"""Hook method executed once when the cmdloop() method is about to
|
||||
return.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def parseline(self, line):
|
||||
"""Parse the line into a command name and a string containing
|
||||
the arguments. Returns a tuple containing (command, args, line).
|
||||
'command' and 'args' may be None if the line couldn't be parsed.
|
||||
"""
|
||||
line = line.strip()
|
||||
if not line:
|
||||
return None, None, line
|
||||
elif line[0] == '?':
|
||||
line = 'help ' + line[1:]
|
||||
elif line[0] == '!':
|
||||
if hasattr(self, 'do_shell'):
|
||||
line = 'shell ' + line[1:]
|
||||
else:
|
||||
return None, None, line
|
||||
i, n = 0, len(line)
|
||||
while i < n and line[i] in self.identchars: i = i+1
|
||||
cmd, arg = line[:i], line[i:].strip()
|
||||
return cmd, arg, line
|
||||
|
||||
def onecmd(self, line):
|
||||
"""Interpret the argument as though it had been typed in response
|
||||
to the prompt.
|
||||
|
||||
This may be overridden, but should not normally need to be;
|
||||
see the precmd() and postcmd() methods for useful execution hooks.
|
||||
The return value is a flag indicating whether interpretation of
|
||||
commands by the interpreter should stop.
|
||||
|
||||
"""
|
||||
cmd, arg, line = self.parseline(line)
|
||||
if not line:
|
||||
return self.emptyline()
|
||||
if cmd is None:
|
||||
return self.default(line)
|
||||
self.lastcmd = line
|
||||
if line == 'EOF' :
|
||||
self.lastcmd = ''
|
||||
if cmd == '':
|
||||
return self.default(line)
|
||||
else:
|
||||
try:
|
||||
func = getattr(self, 'do_' + cmd)
|
||||
except AttributeError:
|
||||
return self.default(line)
|
||||
return func(arg)
|
||||
|
||||
def emptyline(self):
|
||||
"""Called when an empty line is entered in response to the prompt.
|
||||
|
||||
If this method is not overridden, it repeats the last nonempty
|
||||
command entered.
|
||||
|
||||
"""
|
||||
if self.lastcmd:
|
||||
return self.onecmd(self.lastcmd)
|
||||
|
||||
def default(self, line):
|
||||
"""Called on an input line when the command prefix is not recognized.
|
||||
|
||||
If this method is not overridden, it prints an error message and
|
||||
returns.
|
||||
|
||||
"""
|
||||
self.stdout.write('*** Unknown syntax: %s\n'%line)
|
||||
|
||||
def completedefault(self, *ignored):
|
||||
"""Method called to complete an input line when no command-specific
|
||||
complete_*() method is available.
|
||||
|
||||
By default, it returns an empty list.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
def completenames(self, text, *ignored):
|
||||
dotext = 'do_'+text
|
||||
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
|
||||
|
||||
def complete(self, text, state):
|
||||
"""Return the next possible completion for 'text'.
|
||||
|
||||
If a command has not been entered, then complete against command list.
|
||||
Otherwise try to call complete_<command> to get list of completions.
|
||||
"""
|
||||
if state == 0:
|
||||
import readline
|
||||
origline = readline.get_line_buffer()
|
||||
line = origline.lstrip()
|
||||
stripped = len(origline) - len(line)
|
||||
begidx = readline.get_begidx() - stripped
|
||||
endidx = readline.get_endidx() - stripped
|
||||
if begidx>0:
|
||||
cmd, args, foo = self.parseline(line)
|
||||
if cmd == '':
|
||||
compfunc = self.completedefault
|
||||
else:
|
||||
try:
|
||||
compfunc = getattr(self, 'complete_' + cmd)
|
||||
except AttributeError:
|
||||
compfunc = self.completedefault
|
||||
else:
|
||||
compfunc = self.completenames
|
||||
self.completion_matches = compfunc(text, line, begidx, endidx)
|
||||
try:
|
||||
return self.completion_matches[state]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def get_names(self):
|
||||
# This method used to pull in base class attributes
|
||||
# at a time dir() didn't do it yet.
|
||||
return dir(self.__class__)
|
||||
|
||||
def complete_help(self, *args):
|
||||
commands = set(self.completenames(*args))
|
||||
topics = set(a[5:] for a in self.get_names()
|
||||
if a.startswith('help_' + args[0]))
|
||||
return list(commands | topics)
|
||||
|
||||
def do_help(self, arg):
|
||||
'List available commands with "help" or detailed help with "help cmd".'
|
||||
if arg:
|
||||
# XXX check arg syntax
|
||||
try:
|
||||
func = getattr(self, 'help_' + arg)
|
||||
except AttributeError:
|
||||
try:
|
||||
doc=getattr(self, 'do_' + arg).__doc__
|
||||
if doc:
|
||||
self.stdout.write("%s\n"%str(doc))
|
||||
return
|
||||
except AttributeError:
|
||||
pass
|
||||
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
|
||||
return
|
||||
func()
|
||||
else:
|
||||
names = self.get_names()
|
||||
cmds_doc = []
|
||||
cmds_undoc = []
|
||||
help = {}
|
||||
for name in names:
|
||||
if name[:5] == 'help_':
|
||||
help[name[5:]]=1
|
||||
names.sort()
|
||||
# There can be duplicates if routines overridden
|
||||
prevname = ''
|
||||
for name in names:
|
||||
if name[:3] == 'do_':
|
||||
if name == prevname:
|
||||
continue
|
||||
prevname = name
|
||||
cmd=name[3:]
|
||||
if cmd in help:
|
||||
cmds_doc.append(cmd)
|
||||
del help[cmd]
|
||||
elif getattr(self, name).__doc__:
|
||||
cmds_doc.append(cmd)
|
||||
else:
|
||||
cmds_undoc.append(cmd)
|
||||
self.stdout.write("%s\n"%str(self.doc_leader))
|
||||
self.print_topics(self.doc_header, cmds_doc, 15,80)
|
||||
self.print_topics(self.misc_header, list(help.keys()),15,80)
|
||||
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
|
||||
|
||||
def print_topics(self, header, cmds, cmdlen, maxcol):
|
||||
if cmds:
|
||||
self.stdout.write("%s\n"%str(header))
|
||||
if self.ruler:
|
||||
self.stdout.write("%s\n"%str(self.ruler * len(header)))
|
||||
self.columnize(cmds, maxcol-1)
|
||||
self.stdout.write("\n")
|
||||
|
||||
def columnize(self, list, displaywidth=80):
|
||||
"""Display a list of strings as a compact set of columns.
|
||||
|
||||
Each column is only as wide as necessary.
|
||||
Columns are separated by two spaces (one was not legible enough).
|
||||
"""
|
||||
if not list:
|
||||
self.stdout.write("<empty>\n")
|
||||
return
|
||||
|
||||
nonstrings = [i for i in range(len(list))
|
||||
if not isinstance(list[i], str)]
|
||||
if nonstrings:
|
||||
raise TypeError("list[i] not a string for i in %s"
|
||||
% ", ".join(map(str, nonstrings)))
|
||||
size = len(list)
|
||||
if size == 1:
|
||||
self.stdout.write('%s\n'%str(list[0]))
|
||||
return
|
||||
# Try every row count from 1 upwards
|
||||
for nrows in range(1, len(list)):
|
||||
ncols = (size+nrows-1) // nrows
|
||||
colwidths = []
|
||||
totwidth = -2
|
||||
for col in range(ncols):
|
||||
colwidth = 0
|
||||
for row in range(nrows):
|
||||
i = row + nrows*col
|
||||
if i >= size:
|
||||
break
|
||||
x = list[i]
|
||||
colwidth = max(colwidth, len(x))
|
||||
colwidths.append(colwidth)
|
||||
totwidth += colwidth + 2
|
||||
if totwidth > displaywidth:
|
||||
break
|
||||
if totwidth <= displaywidth:
|
||||
break
|
||||
else:
|
||||
nrows = len(list)
|
||||
ncols = 1
|
||||
colwidths = [0]
|
||||
for row in range(nrows):
|
||||
texts = []
|
||||
for col in range(ncols):
|
||||
i = row + nrows*col
|
||||
if i >= size:
|
||||
x = ""
|
||||
else:
|
||||
x = list[i]
|
||||
texts.append(x)
|
||||
while texts and not texts[-1]:
|
||||
del texts[-1]
|
||||
for col in range(len(texts)):
|
||||
texts[col] = texts[col].ljust(colwidths[col])
|
||||
self.stdout.write("%s\n"%str(" ".join(texts)))
|
314
third_party/python/Lib/code.py
vendored
Normal file
314
third_party/python/Lib/code.py
vendored
Normal file
|
@ -0,0 +1,314 @@
|
|||
"""Utilities needed to emulate Python's interactive interpreter.
|
||||
|
||||
"""
|
||||
|
||||
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
|
||||
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
import argparse
|
||||
from codeop import CommandCompiler, compile_command
|
||||
|
||||
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
|
||||
"compile_command"]
|
||||
|
||||
class InteractiveInterpreter:
|
||||
"""Base class for InteractiveConsole.
|
||||
|
||||
This class deals with parsing and interpreter state (the user's
|
||||
namespace); it doesn't deal with input buffering or prompting or
|
||||
input file naming (the filename is always passed in explicitly).
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, locals=None):
|
||||
"""Constructor.
|
||||
|
||||
The optional 'locals' argument specifies the dictionary in
|
||||
which code will be executed; it defaults to a newly created
|
||||
dictionary with key "__name__" set to "__console__" and key
|
||||
"__doc__" set to None.
|
||||
|
||||
"""
|
||||
if locals is None:
|
||||
locals = {"__name__": "__console__", "__doc__": None}
|
||||
self.locals = locals
|
||||
self.compile = CommandCompiler()
|
||||
|
||||
def runsource(self, source, filename="<input>", symbol="single"):
|
||||
"""Compile and run some source in the interpreter.
|
||||
|
||||
Arguments are as for compile_command().
|
||||
|
||||
One several things can happen:
|
||||
|
||||
1) The input is incorrect; compile_command() raised an
|
||||
exception (SyntaxError or OverflowError). A syntax traceback
|
||||
will be printed by calling the showsyntaxerror() method.
|
||||
|
||||
2) The input is incomplete, and more input is required;
|
||||
compile_command() returned None. Nothing happens.
|
||||
|
||||
3) The input is complete; compile_command() returned a code
|
||||
object. The code is executed by calling self.runcode() (which
|
||||
also handles run-time exceptions, except for SystemExit).
|
||||
|
||||
The return value is True in case 2, False in the other cases (unless
|
||||
an exception is raised). The return value can be used to
|
||||
decide whether to use sys.ps1 or sys.ps2 to prompt the next
|
||||
line.
|
||||
|
||||
"""
|
||||
try:
|
||||
code = self.compile(source, filename, symbol)
|
||||
except (OverflowError, SyntaxError, ValueError):
|
||||
# Case 1
|
||||
self.showsyntaxerror(filename)
|
||||
return False
|
||||
|
||||
if code is None:
|
||||
# Case 2
|
||||
return True
|
||||
|
||||
# Case 3
|
||||
self.runcode(code)
|
||||
return False
|
||||
|
||||
def runcode(self, code):
|
||||
"""Execute a code object.
|
||||
|
||||
When an exception occurs, self.showtraceback() is called to
|
||||
display a traceback. All exceptions are caught except
|
||||
SystemExit, which is reraised.
|
||||
|
||||
A note about KeyboardInterrupt: this exception may occur
|
||||
elsewhere in this code, and may not always be caught. The
|
||||
caller should be prepared to deal with it.
|
||||
|
||||
"""
|
||||
try:
|
||||
exec(code, self.locals)
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
self.showtraceback()
|
||||
|
||||
def showsyntaxerror(self, filename=None):
|
||||
"""Display the syntax error that just occurred.
|
||||
|
||||
This doesn't display a stack trace because there isn't one.
|
||||
|
||||
If a filename is given, it is stuffed in the exception instead
|
||||
of what was there before (because Python's parser always uses
|
||||
"<string>" when reading from a string).
|
||||
|
||||
The output is written by self.write(), below.
|
||||
|
||||
"""
|
||||
type, value, tb = sys.exc_info()
|
||||
sys.last_type = type
|
||||
sys.last_value = value
|
||||
sys.last_traceback = tb
|
||||
if filename and type is SyntaxError:
|
||||
# Work hard to stuff the correct filename in the exception
|
||||
try:
|
||||
msg, (dummy_filename, lineno, offset, line) = value.args
|
||||
except ValueError:
|
||||
# Not the format we expect; leave it alone
|
||||
pass
|
||||
else:
|
||||
# Stuff in the right filename
|
||||
value = SyntaxError(msg, (filename, lineno, offset, line))
|
||||
sys.last_value = value
|
||||
if sys.excepthook is sys.__excepthook__:
|
||||
lines = traceback.format_exception_only(type, value)
|
||||
self.write(''.join(lines))
|
||||
else:
|
||||
# If someone has set sys.excepthook, we let that take precedence
|
||||
# over self.write
|
||||
sys.excepthook(type, value, tb)
|
||||
|
||||
def showtraceback(self):
|
||||
"""Display the exception that just occurred.
|
||||
|
||||
We remove the first stack item because it is our own code.
|
||||
|
||||
The output is written by self.write(), below.
|
||||
|
||||
"""
|
||||
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
|
||||
sys.last_traceback = last_tb
|
||||
try:
|
||||
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
|
||||
if sys.excepthook is sys.__excepthook__:
|
||||
self.write(''.join(lines))
|
||||
else:
|
||||
# If someone has set sys.excepthook, we let that take precedence
|
||||
# over self.write
|
||||
sys.excepthook(ei[0], ei[1], last_tb)
|
||||
finally:
|
||||
last_tb = ei = None
|
||||
|
||||
def write(self, data):
|
||||
"""Write a string.
|
||||
|
||||
The base implementation writes to sys.stderr; a subclass may
|
||||
replace this with a different implementation.
|
||||
|
||||
"""
|
||||
sys.stderr.write(data)
|
||||
|
||||
|
||||
class InteractiveConsole(InteractiveInterpreter):
|
||||
"""Closely emulate the behavior of the interactive Python interpreter.
|
||||
|
||||
This class builds on InteractiveInterpreter and adds prompting
|
||||
using the familiar sys.ps1 and sys.ps2, and input buffering.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, locals=None, filename="<console>"):
|
||||
"""Constructor.
|
||||
|
||||
The optional locals argument will be passed to the
|
||||
InteractiveInterpreter base class.
|
||||
|
||||
The optional filename argument should specify the (file)name
|
||||
of the input stream; it will show up in tracebacks.
|
||||
|
||||
"""
|
||||
InteractiveInterpreter.__init__(self, locals)
|
||||
self.filename = filename
|
||||
self.resetbuffer()
|
||||
|
||||
def resetbuffer(self):
|
||||
"""Reset the input buffer."""
|
||||
self.buffer = []
|
||||
|
||||
def interact(self, banner=None, exitmsg=None):
|
||||
"""Closely emulate the interactive Python console.
|
||||
|
||||
The optional banner argument specifies the banner to print
|
||||
before the first interaction; by default it prints a banner
|
||||
similar to the one printed by the real Python interpreter,
|
||||
followed by the current class name in parentheses (so as not
|
||||
to confuse this with the real interpreter -- since it's so
|
||||
close!).
|
||||
|
||||
The optional exitmsg argument specifies the exit message
|
||||
printed when exiting. Pass the empty string to suppress
|
||||
printing an exit message. If exitmsg is not given or None,
|
||||
a default message is printed.
|
||||
|
||||
"""
|
||||
try:
|
||||
sys.ps1
|
||||
except AttributeError:
|
||||
sys.ps1 = ">>> "
|
||||
try:
|
||||
sys.ps2
|
||||
except AttributeError:
|
||||
sys.ps2 = "... "
|
||||
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
|
||||
if banner is None:
|
||||
self.write("Python %s on %s\n%s\n(%s)\n" %
|
||||
(sys.version, sys.platform, cprt,
|
||||
self.__class__.__name__))
|
||||
elif banner:
|
||||
self.write("%s\n" % str(banner))
|
||||
more = 0
|
||||
while 1:
|
||||
try:
|
||||
if more:
|
||||
prompt = sys.ps2
|
||||
else:
|
||||
prompt = sys.ps1
|
||||
try:
|
||||
line = self.raw_input(prompt)
|
||||
except EOFError:
|
||||
self.write("\n")
|
||||
break
|
||||
else:
|
||||
more = self.push(line)
|
||||
except KeyboardInterrupt:
|
||||
self.write("\nKeyboardInterrupt\n")
|
||||
self.resetbuffer()
|
||||
more = 0
|
||||
if exitmsg is None:
|
||||
self.write('now exiting %s...\n' % self.__class__.__name__)
|
||||
elif exitmsg != '':
|
||||
self.write('%s\n' % exitmsg)
|
||||
|
||||
def push(self, line):
|
||||
"""Push a line to the interpreter.
|
||||
|
||||
The line should not have a trailing newline; it may have
|
||||
internal newlines. The line is appended to a buffer and the
|
||||
interpreter's runsource() method is called with the
|
||||
concatenated contents of the buffer as source. If this
|
||||
indicates that the command was executed or invalid, the buffer
|
||||
is reset; otherwise, the command is incomplete, and the buffer
|
||||
is left as it was after the line was appended. The return
|
||||
value is 1 if more input is required, 0 if the line was dealt
|
||||
with in some way (this is the same as runsource()).
|
||||
|
||||
"""
|
||||
self.buffer.append(line)
|
||||
source = "\n".join(self.buffer)
|
||||
more = self.runsource(source, self.filename)
|
||||
if not more:
|
||||
self.resetbuffer()
|
||||
return more
|
||||
|
||||
def raw_input(self, prompt=""):
|
||||
"""Write a prompt and read a line.
|
||||
|
||||
The returned line does not include the trailing newline.
|
||||
When the user enters the EOF key sequence, EOFError is raised.
|
||||
|
||||
The base implementation uses the built-in function
|
||||
input(); a subclass may replace this with a different
|
||||
implementation.
|
||||
|
||||
"""
|
||||
return input(prompt)
|
||||
|
||||
|
||||
|
||||
def interact(banner=None, readfunc=None, local=None, exitmsg=None):
|
||||
"""Closely emulate the interactive Python interpreter.
|
||||
|
||||
This is a backwards compatible interface to the InteractiveConsole
|
||||
class. When readfunc is not specified, it attempts to import the
|
||||
readline module to enable GNU readline if it is available.
|
||||
|
||||
Arguments (all optional, all default to None):
|
||||
|
||||
banner -- passed to InteractiveConsole.interact()
|
||||
readfunc -- if not None, replaces InteractiveConsole.raw_input()
|
||||
local -- passed to InteractiveInterpreter.__init__()
|
||||
exitmsg -- passed to InteractiveConsole.interact()
|
||||
|
||||
"""
|
||||
console = InteractiveConsole(local)
|
||||
if readfunc is not None:
|
||||
console.raw_input = readfunc
|
||||
else:
|
||||
try:
|
||||
import readline
|
||||
except ImportError:
|
||||
pass
|
||||
console.interact(banner, exitmsg)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-q', action='store_true',
|
||||
help="don't print version and copyright messages")
|
||||
args = parser.parse_args()
|
||||
if args.q or sys.flags.quiet:
|
||||
banner = ''
|
||||
else:
|
||||
banner = None
|
||||
interact(banner)
|
1113
third_party/python/Lib/codecs.py
vendored
Normal file
1113
third_party/python/Lib/codecs.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
168
third_party/python/Lib/codeop.py
vendored
Normal file
168
third_party/python/Lib/codeop.py
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
r"""Utilities to compile possibly incomplete Python source code.
|
||||
|
||||
This module provides two interfaces, broadly similar to the builtin
|
||||
function compile(), which take program text, a filename and a 'mode'
|
||||
and:
|
||||
|
||||
- Return code object if the command is complete and valid
|
||||
- Return None if the command is incomplete
|
||||
- Raise SyntaxError, ValueError or OverflowError if the command is a
|
||||
syntax error (OverflowError and ValueError can be produced by
|
||||
malformed literals).
|
||||
|
||||
Approach:
|
||||
|
||||
First, check if the source consists entirely of blank lines and
|
||||
comments; if so, replace it with 'pass', because the built-in
|
||||
parser doesn't always do the right thing for these.
|
||||
|
||||
Compile three times: as is, with \n, and with \n\n appended. If it
|
||||
compiles as is, it's complete. If it compiles with one \n appended,
|
||||
we expect more. If it doesn't compile either way, we compare the
|
||||
error we get when compiling with \n or \n\n appended. If the errors
|
||||
are the same, the code is broken. But if the errors are different, we
|
||||
expect more. Not intuitive; not even guaranteed to hold in future
|
||||
releases; but this matches the compiler's behavior from Python 1.4
|
||||
through 2.2, at least.
|
||||
|
||||
Caveat:
|
||||
|
||||
It is possible (but not likely) that the parser stops parsing with a
|
||||
successful outcome before reaching the end of the source; in this
|
||||
case, trailing symbols may be ignored instead of causing an error.
|
||||
For example, a backslash followed by two newlines may be followed by
|
||||
arbitrary garbage. This will be fixed once the API for the parser is
|
||||
better.
|
||||
|
||||
The two interfaces are:
|
||||
|
||||
compile_command(source, filename, symbol):
|
||||
|
||||
Compiles a single command in the manner described above.
|
||||
|
||||
CommandCompiler():
|
||||
|
||||
Instances of this class have __call__ methods identical in
|
||||
signature to compile_command; the difference is that if the
|
||||
instance compiles program text containing a __future__ statement,
|
||||
the instance 'remembers' and compiles all subsequent program texts
|
||||
with the statement in force.
|
||||
|
||||
The module also provides another class:
|
||||
|
||||
Compile():
|
||||
|
||||
Instances of this class act like the built-in function compile,
|
||||
but with 'memory' in the sense described above.
|
||||
"""
|
||||
|
||||
import __future__
|
||||
|
||||
_features = [getattr(__future__, fname)
|
||||
for fname in __future__.all_feature_names]
|
||||
|
||||
__all__ = ["compile_command", "Compile", "CommandCompiler"]
|
||||
|
||||
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
|
||||
|
||||
def _maybe_compile(compiler, source, filename, symbol):
|
||||
# Check for source consisting of only blank lines and comments
|
||||
for line in source.split("\n"):
|
||||
line = line.strip()
|
||||
if line and line[0] != '#':
|
||||
break # Leave it alone
|
||||
else:
|
||||
if symbol != "eval":
|
||||
source = "pass" # Replace it with a 'pass' statement
|
||||
|
||||
err = err1 = err2 = None
|
||||
code = code1 = code2 = None
|
||||
|
||||
try:
|
||||
code = compiler(source, filename, symbol)
|
||||
except SyntaxError as err:
|
||||
pass
|
||||
|
||||
try:
|
||||
code1 = compiler(source + "\n", filename, symbol)
|
||||
except SyntaxError as e:
|
||||
err1 = e
|
||||
|
||||
try:
|
||||
code2 = compiler(source + "\n\n", filename, symbol)
|
||||
except SyntaxError as e:
|
||||
err2 = e
|
||||
|
||||
if code:
|
||||
return code
|
||||
if not code1 and repr(err1) == repr(err2):
|
||||
raise err1
|
||||
|
||||
def _compile(source, filename, symbol):
|
||||
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
|
||||
|
||||
def compile_command(source, filename="<input>", symbol="single"):
|
||||
r"""Compile a command and determine whether it is incomplete.
|
||||
|
||||
Arguments:
|
||||
|
||||
source -- the source string; may contain \n characters
|
||||
filename -- optional filename from which source was read; default
|
||||
"<input>"
|
||||
symbol -- optional grammar start symbol; "single" (default) or "eval"
|
||||
|
||||
Return value / exceptions raised:
|
||||
|
||||
- Return a code object if the command is complete and valid
|
||||
- Return None if the command is incomplete
|
||||
- Raise SyntaxError, ValueError or OverflowError if the command is a
|
||||
syntax error (OverflowError and ValueError can be produced by
|
||||
malformed literals).
|
||||
"""
|
||||
return _maybe_compile(_compile, source, filename, symbol)
|
||||
|
||||
class Compile:
|
||||
"""Instances of this class behave much like the built-in compile
|
||||
function, but if one is used to compile text containing a future
|
||||
statement, it "remembers" and compiles all subsequent program texts
|
||||
with the statement in force."""
|
||||
def __init__(self):
|
||||
self.flags = PyCF_DONT_IMPLY_DEDENT
|
||||
|
||||
def __call__(self, source, filename, symbol):
|
||||
codeob = compile(source, filename, symbol, self.flags, 1)
|
||||
for feature in _features:
|
||||
if codeob.co_flags & feature.compiler_flag:
|
||||
self.flags |= feature.compiler_flag
|
||||
return codeob
|
||||
|
||||
class CommandCompiler:
|
||||
"""Instances of this class have __call__ methods identical in
|
||||
signature to compile_command; the difference is that if the
|
||||
instance compiles program text containing a __future__ statement,
|
||||
the instance 'remembers' and compiles all subsequent program texts
|
||||
with the statement in force."""
|
||||
|
||||
def __init__(self,):
|
||||
self.compiler = Compile()
|
||||
|
||||
def __call__(self, source, filename="<input>", symbol="single"):
|
||||
r"""Compile a command and determine whether it is incomplete.
|
||||
|
||||
Arguments:
|
||||
|
||||
source -- the source string; may contain \n characters
|
||||
filename -- optional filename from which source was read;
|
||||
default "<input>"
|
||||
symbol -- optional grammar start symbol; "single" (default) or
|
||||
"eval"
|
||||
|
||||
Return value / exceptions raised:
|
||||
|
||||
- Return a code object if the command is complete and valid
|
||||
- Return None if the command is incomplete
|
||||
- Raise SyntaxError, ValueError or OverflowError if the command is a
|
||||
syntax error (OverflowError and ValueError can be produced by
|
||||
malformed literals).
|
||||
"""
|
||||
return _maybe_compile(self.compiler, source, filename, symbol)
|
1244
third_party/python/Lib/collections/__init__.py
vendored
Normal file
1244
third_party/python/Lib/collections/__init__.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
third_party/python/Lib/collections/abc.py
vendored
Normal file
2
third_party/python/Lib/collections/abc.py
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
from _collections_abc import *
|
||||
from _collections_abc import __all__
|
164
third_party/python/Lib/colorsys.py
vendored
Normal file
164
third_party/python/Lib/colorsys.py
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
"""Conversion functions between RGB and other color systems.
|
||||
|
||||
This modules provides two functions for each color system ABC:
|
||||
|
||||
rgb_to_abc(r, g, b) --> a, b, c
|
||||
abc_to_rgb(a, b, c) --> r, g, b
|
||||
|
||||
All inputs and outputs are triples of floats in the range [0.0...1.0]
|
||||
(with the exception of I and Q, which covers a slightly larger range).
|
||||
Inputs outside the valid range may cause exceptions or invalid outputs.
|
||||
|
||||
Supported color systems:
|
||||
RGB: Red, Green, Blue components
|
||||
YIQ: Luminance, Chrominance (used by composite video signals)
|
||||
HLS: Hue, Luminance, Saturation
|
||||
HSV: Hue, Saturation, Value
|
||||
"""
|
||||
|
||||
# References:
|
||||
# http://en.wikipedia.org/wiki/YIQ
|
||||
# http://en.wikipedia.org/wiki/HLS_color_space
|
||||
# http://en.wikipedia.org/wiki/HSV_color_space
|
||||
|
||||
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
|
||||
"rgb_to_hsv","hsv_to_rgb"]
|
||||
|
||||
# Some floating point constants
|
||||
|
||||
ONE_THIRD = 1.0/3.0
|
||||
ONE_SIXTH = 1.0/6.0
|
||||
TWO_THIRD = 2.0/3.0
|
||||
|
||||
# YIQ: used by composite video signals (linear combinations of RGB)
|
||||
# Y: perceived grey level (0.0 == black, 1.0 == white)
|
||||
# I, Q: color components
|
||||
#
|
||||
# There are a great many versions of the constants used in these formulae.
|
||||
# The ones in this library uses constants from the FCC version of NTSC.
|
||||
|
||||
def rgb_to_yiq(r, g, b):
|
||||
y = 0.30*r + 0.59*g + 0.11*b
|
||||
i = 0.74*(r-y) - 0.27*(b-y)
|
||||
q = 0.48*(r-y) + 0.41*(b-y)
|
||||
return (y, i, q)
|
||||
|
||||
def yiq_to_rgb(y, i, q):
|
||||
# r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48)
|
||||
# b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48)
|
||||
# g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59
|
||||
|
||||
r = y + 0.9468822170900693*i + 0.6235565819861433*q
|
||||
g = y - 0.27478764629897834*i - 0.6356910791873801*q
|
||||
b = y - 1.1085450346420322*i + 1.7090069284064666*q
|
||||
|
||||
if r < 0.0:
|
||||
r = 0.0
|
||||
if g < 0.0:
|
||||
g = 0.0
|
||||
if b < 0.0:
|
||||
b = 0.0
|
||||
if r > 1.0:
|
||||
r = 1.0
|
||||
if g > 1.0:
|
||||
g = 1.0
|
||||
if b > 1.0:
|
||||
b = 1.0
|
||||
return (r, g, b)
|
||||
|
||||
|
||||
# HLS: Hue, Luminance, Saturation
|
||||
# H: position in the spectrum
|
||||
# L: color lightness
|
||||
# S: color saturation
|
||||
|
||||
def rgb_to_hls(r, g, b):
|
||||
maxc = max(r, g, b)
|
||||
minc = min(r, g, b)
|
||||
# XXX Can optimize (maxc+minc) and (maxc-minc)
|
||||
l = (minc+maxc)/2.0
|
||||
if minc == maxc:
|
||||
return 0.0, l, 0.0
|
||||
if l <= 0.5:
|
||||
s = (maxc-minc) / (maxc+minc)
|
||||
else:
|
||||
s = (maxc-minc) / (2.0-maxc-minc)
|
||||
rc = (maxc-r) / (maxc-minc)
|
||||
gc = (maxc-g) / (maxc-minc)
|
||||
bc = (maxc-b) / (maxc-minc)
|
||||
if r == maxc:
|
||||
h = bc-gc
|
||||
elif g == maxc:
|
||||
h = 2.0+rc-bc
|
||||
else:
|
||||
h = 4.0+gc-rc
|
||||
h = (h/6.0) % 1.0
|
||||
return h, l, s
|
||||
|
||||
def hls_to_rgb(h, l, s):
|
||||
if s == 0.0:
|
||||
return l, l, l
|
||||
if l <= 0.5:
|
||||
m2 = l * (1.0+s)
|
||||
else:
|
||||
m2 = l+s-(l*s)
|
||||
m1 = 2.0*l - m2
|
||||
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
|
||||
|
||||
def _v(m1, m2, hue):
|
||||
hue = hue % 1.0
|
||||
if hue < ONE_SIXTH:
|
||||
return m1 + (m2-m1)*hue*6.0
|
||||
if hue < 0.5:
|
||||
return m2
|
||||
if hue < TWO_THIRD:
|
||||
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
|
||||
return m1
|
||||
|
||||
|
||||
# HSV: Hue, Saturation, Value
|
||||
# H: position in the spectrum
|
||||
# S: color saturation ("purity")
|
||||
# V: color brightness
|
||||
|
||||
def rgb_to_hsv(r, g, b):
|
||||
maxc = max(r, g, b)
|
||||
minc = min(r, g, b)
|
||||
v = maxc
|
||||
if minc == maxc:
|
||||
return 0.0, 0.0, v
|
||||
s = (maxc-minc) / maxc
|
||||
rc = (maxc-r) / (maxc-minc)
|
||||
gc = (maxc-g) / (maxc-minc)
|
||||
bc = (maxc-b) / (maxc-minc)
|
||||
if r == maxc:
|
||||
h = bc-gc
|
||||
elif g == maxc:
|
||||
h = 2.0+rc-bc
|
||||
else:
|
||||
h = 4.0+gc-rc
|
||||
h = (h/6.0) % 1.0
|
||||
return h, s, v
|
||||
|
||||
def hsv_to_rgb(h, s, v):
|
||||
if s == 0.0:
|
||||
return v, v, v
|
||||
i = int(h*6.0) # XXX assume int() truncates!
|
||||
f = (h*6.0) - i
|
||||
p = v*(1.0 - s)
|
||||
q = v*(1.0 - s*f)
|
||||
t = v*(1.0 - s*(1.0-f))
|
||||
i = i%6
|
||||
if i == 0:
|
||||
return v, t, p
|
||||
if i == 1:
|
||||
return q, v, p
|
||||
if i == 2:
|
||||
return p, v, t
|
||||
if i == 3:
|
||||
return p, q, v
|
||||
if i == 4:
|
||||
return t, p, v
|
||||
if i == 5:
|
||||
return v, p, q
|
||||
# Cannot get here
|
299
third_party/python/Lib/compileall.py
vendored
Normal file
299
third_party/python/Lib/compileall.py
vendored
Normal file
|
@ -0,0 +1,299 @@
|
|||
"""Module/script to byte-compile all .py files to .pyc files.
|
||||
|
||||
When called as a script with arguments, this compiles the directories
|
||||
given as arguments recursively; the -l option prevents it from
|
||||
recursing into directories.
|
||||
|
||||
Without arguments, if compiles all modules on sys.path, without
|
||||
recursing into subdirectories. (Even though it should do so for
|
||||
packages -- for now, you'll have to deal with packages separately.)
|
||||
|
||||
See module py_compile for details of the actual byte-compilation.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import importlib.util
|
||||
import py_compile
|
||||
import struct
|
||||
|
||||
from functools import partial
|
||||
|
||||
__all__ = ["compile_dir","compile_file","compile_path"]
|
||||
|
||||
def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0):
|
||||
if quiet < 2 and isinstance(dir, os.PathLike):
|
||||
dir = os.fspath(dir)
|
||||
if not quiet:
|
||||
print('Listing {!r}...'.format(dir))
|
||||
try:
|
||||
names = os.listdir(dir)
|
||||
except OSError:
|
||||
if quiet < 2:
|
||||
print("Can't list {!r}".format(dir))
|
||||
names = []
|
||||
names.sort()
|
||||
for name in names:
|
||||
if name == '__pycache__':
|
||||
continue
|
||||
fullname = os.path.join(dir, name)
|
||||
if ddir is not None:
|
||||
dfile = os.path.join(ddir, name)
|
||||
else:
|
||||
dfile = None
|
||||
if not os.path.isdir(fullname):
|
||||
yield fullname
|
||||
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
|
||||
os.path.isdir(fullname) and not os.path.islink(fullname)):
|
||||
yield from _walk_dir(fullname, ddir=dfile,
|
||||
maxlevels=maxlevels - 1, quiet=quiet)
|
||||
|
||||
def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
|
||||
quiet=0, legacy=False, optimize=-1, workers=1):
|
||||
"""Byte-compile all modules in the given directory tree.
|
||||
|
||||
Arguments (only dir is required):
|
||||
|
||||
dir: the directory to byte-compile
|
||||
maxlevels: maximum recursion level (default 10)
|
||||
ddir: the directory that will be prepended to the path to the
|
||||
file as it is compiled into each byte-code file.
|
||||
force: if True, force compilation, even if timestamps are up-to-date
|
||||
quiet: full output with False or 0, errors only with 1,
|
||||
no output with 2
|
||||
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
|
||||
optimize: optimization level or -1 for level of the interpreter
|
||||
workers: maximum number of parallel workers
|
||||
"""
|
||||
ProcessPoolExecutor = None
|
||||
if workers is not None:
|
||||
if workers < 0:
|
||||
raise ValueError('workers must be greater or equal to 0')
|
||||
elif workers != 1:
|
||||
try:
|
||||
# Only import when needed, as low resource platforms may
|
||||
# fail to import it
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
except ImportError:
|
||||
workers = 1
|
||||
files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels,
|
||||
ddir=ddir)
|
||||
success = True
|
||||
if workers is not None and workers != 1 and ProcessPoolExecutor is not None:
|
||||
workers = workers or None
|
||||
with ProcessPoolExecutor(max_workers=workers) as executor:
|
||||
results = executor.map(partial(compile_file,
|
||||
ddir=ddir, force=force,
|
||||
rx=rx, quiet=quiet,
|
||||
legacy=legacy,
|
||||
optimize=optimize),
|
||||
files)
|
||||
success = min(results, default=True)
|
||||
else:
|
||||
for file in files:
|
||||
if not compile_file(file, ddir, force, rx, quiet,
|
||||
legacy, optimize):
|
||||
success = False
|
||||
return success
|
||||
|
||||
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
|
||||
legacy=False, optimize=-1):
|
||||
"""Byte-compile one file.
|
||||
|
||||
Arguments (only fullname is required):
|
||||
|
||||
fullname: the file to byte-compile
|
||||
ddir: if given, the directory name compiled in to the
|
||||
byte-code file.
|
||||
force: if True, force compilation, even if timestamps are up-to-date
|
||||
quiet: full output with False or 0, errors only with 1,
|
||||
no output with 2
|
||||
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
|
||||
optimize: optimization level or -1 for level of the interpreter
|
||||
"""
|
||||
success = True
|
||||
if quiet < 2 and isinstance(fullname, os.PathLike):
|
||||
fullname = os.fspath(fullname)
|
||||
name = os.path.basename(fullname)
|
||||
if ddir is not None:
|
||||
dfile = os.path.join(ddir, name)
|
||||
else:
|
||||
dfile = None
|
||||
if rx is not None:
|
||||
mo = rx.search(fullname)
|
||||
if mo:
|
||||
return success
|
||||
if os.path.isfile(fullname):
|
||||
if legacy:
|
||||
cfile = fullname + 'c'
|
||||
else:
|
||||
if optimize >= 0:
|
||||
opt = optimize if optimize >= 1 else ''
|
||||
cfile = importlib.util.cache_from_source(
|
||||
fullname, optimization=opt)
|
||||
else:
|
||||
cfile = importlib.util.cache_from_source(fullname)
|
||||
cache_dir = os.path.dirname(cfile)
|
||||
head, tail = name[:-3], name[-3:]
|
||||
if tail == '.py':
|
||||
if not force:
|
||||
try:
|
||||
mtime = int(os.stat(fullname).st_mtime)
|
||||
expect = struct.pack('<4sl', importlib.util.MAGIC_NUMBER,
|
||||
mtime)
|
||||
with open(cfile, 'rb') as chandle:
|
||||
actual = chandle.read(8)
|
||||
if expect == actual:
|
||||
return success
|
||||
except OSError:
|
||||
pass
|
||||
if not quiet:
|
||||
print('Compiling {!r}...'.format(fullname))
|
||||
try:
|
||||
ok = py_compile.compile(fullname, cfile, dfile, True,
|
||||
optimize=optimize)
|
||||
except py_compile.PyCompileError as err:
|
||||
success = False
|
||||
if quiet >= 2:
|
||||
return success
|
||||
elif quiet:
|
||||
print('*** Error compiling {!r}...'.format(fullname))
|
||||
else:
|
||||
print('*** ', end='')
|
||||
# escape non-printable characters in msg
|
||||
msg = err.msg.encode(sys.stdout.encoding,
|
||||
errors='backslashreplace')
|
||||
msg = msg.decode(sys.stdout.encoding)
|
||||
print(msg)
|
||||
except (SyntaxError, UnicodeError, OSError) as e:
|
||||
success = False
|
||||
if quiet >= 2:
|
||||
return success
|
||||
elif quiet:
|
||||
print('*** Error compiling {!r}...'.format(fullname))
|
||||
else:
|
||||
print('*** ', end='')
|
||||
print(e.__class__.__name__ + ':', e)
|
||||
else:
|
||||
if ok == 0:
|
||||
success = False
|
||||
return success
|
||||
|
||||
def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0,
|
||||
legacy=False, optimize=-1):
|
||||
"""Byte-compile all module on sys.path.
|
||||
|
||||
Arguments (all optional):
|
||||
|
||||
skip_curdir: if true, skip current directory (default True)
|
||||
maxlevels: max recursion level (default 0)
|
||||
force: as for compile_dir() (default False)
|
||||
quiet: as for compile_dir() (default 0)
|
||||
legacy: as for compile_dir() (default False)
|
||||
optimize: as for compile_dir() (default -1)
|
||||
"""
|
||||
success = True
|
||||
for dir in sys.path:
|
||||
if (not dir or dir == os.curdir) and skip_curdir:
|
||||
if quiet < 2:
|
||||
print('Skipping current directory')
|
||||
else:
|
||||
success = success and compile_dir(dir, maxlevels, None,
|
||||
force, quiet=quiet,
|
||||
legacy=legacy, optimize=optimize)
|
||||
return success
|
||||
|
||||
|
||||
def main():
|
||||
"""Script main program."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Utilities to support installing Python libraries.')
|
||||
parser.add_argument('-l', action='store_const', const=0,
|
||||
default=10, dest='maxlevels',
|
||||
help="don't recurse into subdirectories")
|
||||
parser.add_argument('-r', type=int, dest='recursion',
|
||||
help=('control the maximum recursion level. '
|
||||
'if `-l` and `-r` options are specified, '
|
||||
'then `-r` takes precedence.'))
|
||||
parser.add_argument('-f', action='store_true', dest='force',
|
||||
help='force rebuild even if timestamps are up to date')
|
||||
parser.add_argument('-q', action='count', dest='quiet', default=0,
|
||||
help='output only error messages; -qq will suppress '
|
||||
'the error messages as well.')
|
||||
parser.add_argument('-b', action='store_true', dest='legacy',
|
||||
help='use legacy (pre-PEP3147) compiled file locations')
|
||||
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
|
||||
help=('directory to prepend to file paths for use in '
|
||||
'compile-time tracebacks and in runtime '
|
||||
'tracebacks in cases where the source file is '
|
||||
'unavailable'))
|
||||
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
|
||||
help=('skip files matching the regular expression; '
|
||||
'the regexp is searched for in the full path '
|
||||
'of each file considered for compilation'))
|
||||
parser.add_argument('-i', metavar='FILE', dest='flist',
|
||||
help=('add all the files and directories listed in '
|
||||
'FILE to the list considered for compilation; '
|
||||
'if "-", names are read from stdin'))
|
||||
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
|
||||
help=('zero or more file and directory names '
|
||||
'to compile; if no arguments given, defaults '
|
||||
'to the equivalent of -l sys.path'))
|
||||
parser.add_argument('-j', '--workers', default=1,
|
||||
type=int, help='Run compileall concurrently')
|
||||
|
||||
args = parser.parse_args()
|
||||
compile_dests = args.compile_dest
|
||||
|
||||
if args.rx:
|
||||
import re
|
||||
args.rx = re.compile(args.rx)
|
||||
|
||||
|
||||
if args.recursion is not None:
|
||||
maxlevels = args.recursion
|
||||
else:
|
||||
maxlevels = args.maxlevels
|
||||
|
||||
# if flist is provided then load it
|
||||
if args.flist:
|
||||
try:
|
||||
with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
|
||||
for line in f:
|
||||
compile_dests.append(line.strip())
|
||||
except OSError:
|
||||
if args.quiet < 2:
|
||||
print("Error reading file list {}".format(args.flist))
|
||||
return False
|
||||
|
||||
if args.workers is not None:
|
||||
args.workers = args.workers or None
|
||||
|
||||
success = True
|
||||
try:
|
||||
if compile_dests:
|
||||
for dest in compile_dests:
|
||||
if os.path.isfile(dest):
|
||||
if not compile_file(dest, args.ddir, args.force, args.rx,
|
||||
args.quiet, args.legacy):
|
||||
success = False
|
||||
else:
|
||||
if not compile_dir(dest, maxlevels, args.ddir,
|
||||
args.force, args.rx, args.quiet,
|
||||
args.legacy, workers=args.workers):
|
||||
success = False
|
||||
return success
|
||||
else:
|
||||
return compile_path(legacy=args.legacy, force=args.force,
|
||||
quiet=args.quiet)
|
||||
except KeyboardInterrupt:
|
||||
if args.quiet < 2:
|
||||
print("\n[interrupted]")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
exit_status = int(not main())
|
||||
sys.exit(exit_status)
|
1
third_party/python/Lib/concurrent/__init__.py
vendored
Normal file
1
third_party/python/Lib/concurrent/__init__.py
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
# This directory is a Python package.
|
18
third_party/python/Lib/concurrent/futures/__init__.py
vendored
Normal file
18
third_party/python/Lib/concurrent/futures/__init__.py
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Execute computations asynchronously using threads or processes."""
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
from concurrent.futures._base import (FIRST_COMPLETED,
|
||||
FIRST_EXCEPTION,
|
||||
ALL_COMPLETED,
|
||||
CancelledError,
|
||||
TimeoutError,
|
||||
Future,
|
||||
Executor,
|
||||
wait,
|
||||
as_completed)
|
||||
from concurrent.futures.process import ProcessPoolExecutor
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
612
third_party/python/Lib/concurrent/futures/_base.py
vendored
Normal file
612
third_party/python/Lib/concurrent/futures/_base.py
vendored
Normal file
|
@ -0,0 +1,612 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
FIRST_COMPLETED = 'FIRST_COMPLETED'
|
||||
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
|
||||
ALL_COMPLETED = 'ALL_COMPLETED'
|
||||
_AS_COMPLETED = '_AS_COMPLETED'
|
||||
|
||||
# Possible future states (for internal use by the futures package).
|
||||
PENDING = 'PENDING'
|
||||
RUNNING = 'RUNNING'
|
||||
# The future was cancelled by the user...
|
||||
CANCELLED = 'CANCELLED'
|
||||
# ...and _Waiter.add_cancelled() was called by a worker.
|
||||
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
|
||||
FINISHED = 'FINISHED'
|
||||
|
||||
_FUTURE_STATES = [
|
||||
PENDING,
|
||||
RUNNING,
|
||||
CANCELLED,
|
||||
CANCELLED_AND_NOTIFIED,
|
||||
FINISHED
|
||||
]
|
||||
|
||||
_STATE_TO_DESCRIPTION_MAP = {
|
||||
PENDING: "pending",
|
||||
RUNNING: "running",
|
||||
CANCELLED: "cancelled",
|
||||
CANCELLED_AND_NOTIFIED: "cancelled",
|
||||
FINISHED: "finished"
|
||||
}
|
||||
|
||||
# Logger for internal use by the futures package.
|
||||
LOGGER = logging.getLogger("concurrent.futures")
|
||||
|
||||
class Error(Exception):
|
||||
"""Base class for all future-related exceptions."""
|
||||
pass
|
||||
|
||||
class CancelledError(Error):
|
||||
"""The Future was cancelled."""
|
||||
pass
|
||||
|
||||
class TimeoutError(Error):
|
||||
"""The operation exceeded the given deadline."""
|
||||
pass
|
||||
|
||||
class _Waiter(object):
|
||||
"""Provides the event that wait() and as_completed() block on."""
|
||||
def __init__(self):
|
||||
self.event = threading.Event()
|
||||
self.finished_futures = []
|
||||
|
||||
def add_result(self, future):
|
||||
self.finished_futures.append(future)
|
||||
|
||||
def add_exception(self, future):
|
||||
self.finished_futures.append(future)
|
||||
|
||||
def add_cancelled(self, future):
|
||||
self.finished_futures.append(future)
|
||||
|
||||
class _AsCompletedWaiter(_Waiter):
|
||||
"""Used by as_completed()."""
|
||||
|
||||
def __init__(self):
|
||||
super(_AsCompletedWaiter, self).__init__()
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def add_result(self, future):
|
||||
with self.lock:
|
||||
super(_AsCompletedWaiter, self).add_result(future)
|
||||
self.event.set()
|
||||
|
||||
def add_exception(self, future):
|
||||
with self.lock:
|
||||
super(_AsCompletedWaiter, self).add_exception(future)
|
||||
self.event.set()
|
||||
|
||||
def add_cancelled(self, future):
|
||||
with self.lock:
|
||||
super(_AsCompletedWaiter, self).add_cancelled(future)
|
||||
self.event.set()
|
||||
|
||||
class _FirstCompletedWaiter(_Waiter):
|
||||
"""Used by wait(return_when=FIRST_COMPLETED)."""
|
||||
|
||||
def add_result(self, future):
|
||||
super().add_result(future)
|
||||
self.event.set()
|
||||
|
||||
def add_exception(self, future):
|
||||
super().add_exception(future)
|
||||
self.event.set()
|
||||
|
||||
def add_cancelled(self, future):
|
||||
super().add_cancelled(future)
|
||||
self.event.set()
|
||||
|
||||
class _AllCompletedWaiter(_Waiter):
|
||||
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
|
||||
|
||||
def __init__(self, num_pending_calls, stop_on_exception):
|
||||
self.num_pending_calls = num_pending_calls
|
||||
self.stop_on_exception = stop_on_exception
|
||||
self.lock = threading.Lock()
|
||||
super().__init__()
|
||||
|
||||
def _decrement_pending_calls(self):
|
||||
with self.lock:
|
||||
self.num_pending_calls -= 1
|
||||
if not self.num_pending_calls:
|
||||
self.event.set()
|
||||
|
||||
def add_result(self, future):
|
||||
super().add_result(future)
|
||||
self._decrement_pending_calls()
|
||||
|
||||
def add_exception(self, future):
|
||||
super().add_exception(future)
|
||||
if self.stop_on_exception:
|
||||
self.event.set()
|
||||
else:
|
||||
self._decrement_pending_calls()
|
||||
|
||||
def add_cancelled(self, future):
|
||||
super().add_cancelled(future)
|
||||
self._decrement_pending_calls()
|
||||
|
||||
class _AcquireFutures(object):
|
||||
"""A context manager that does an ordered acquire of Future conditions."""
|
||||
|
||||
def __init__(self, futures):
|
||||
self.futures = sorted(futures, key=id)
|
||||
|
||||
def __enter__(self):
|
||||
for future in self.futures:
|
||||
future._condition.acquire()
|
||||
|
||||
def __exit__(self, *args):
|
||||
for future in self.futures:
|
||||
future._condition.release()
|
||||
|
||||
def _create_and_install_waiters(fs, return_when):
|
||||
if return_when == _AS_COMPLETED:
|
||||
waiter = _AsCompletedWaiter()
|
||||
elif return_when == FIRST_COMPLETED:
|
||||
waiter = _FirstCompletedWaiter()
|
||||
else:
|
||||
pending_count = sum(
|
||||
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
|
||||
|
||||
if return_when == FIRST_EXCEPTION:
|
||||
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
|
||||
elif return_when == ALL_COMPLETED:
|
||||
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
|
||||
else:
|
||||
raise ValueError("Invalid return condition: %r" % return_when)
|
||||
|
||||
for f in fs:
|
||||
f._waiters.append(waiter)
|
||||
|
||||
return waiter
|
||||
|
||||
|
||||
def _yield_finished_futures(fs, waiter, ref_collect):
|
||||
"""
|
||||
Iterate on the list *fs*, yielding finished futures one by one in
|
||||
reverse order.
|
||||
Before yielding a future, *waiter* is removed from its waiters
|
||||
and the future is removed from each set in the collection of sets
|
||||
*ref_collect*.
|
||||
|
||||
The aim of this function is to avoid keeping stale references after
|
||||
the future is yielded and before the iterator resumes.
|
||||
"""
|
||||
while fs:
|
||||
f = fs[-1]
|
||||
for futures_set in ref_collect:
|
||||
futures_set.remove(f)
|
||||
with f._condition:
|
||||
f._waiters.remove(waiter)
|
||||
del f
|
||||
# Careful not to keep a reference to the popped value
|
||||
yield fs.pop()
|
||||
|
||||
|
||||
def as_completed(fs, timeout=None):
|
||||
"""An iterator over the given futures that yields each as it completes.
|
||||
|
||||
Args:
|
||||
fs: The sequence of Futures (possibly created by different Executors) to
|
||||
iterate over.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
|
||||
Returns:
|
||||
An iterator that yields the given Futures as they complete (finished or
|
||||
cancelled). If any given Futures are duplicated, they will be returned
|
||||
once.
|
||||
|
||||
Raises:
|
||||
TimeoutError: If the entire result iterator could not be generated
|
||||
before the given timeout.
|
||||
"""
|
||||
if timeout is not None:
|
||||
end_time = timeout + time.monotonic()
|
||||
|
||||
fs = set(fs)
|
||||
total_futures = len(fs)
|
||||
with _AcquireFutures(fs):
|
||||
finished = set(
|
||||
f for f in fs
|
||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
||||
pending = fs - finished
|
||||
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
|
||||
finished = list(finished)
|
||||
try:
|
||||
yield from _yield_finished_futures(finished, waiter,
|
||||
ref_collect=(fs,))
|
||||
|
||||
while pending:
|
||||
if timeout is None:
|
||||
wait_timeout = None
|
||||
else:
|
||||
wait_timeout = end_time - time.monotonic()
|
||||
if wait_timeout < 0:
|
||||
raise TimeoutError(
|
||||
'%d (of %d) futures unfinished' % (
|
||||
len(pending), total_futures))
|
||||
|
||||
waiter.event.wait(wait_timeout)
|
||||
|
||||
with waiter.lock:
|
||||
finished = waiter.finished_futures
|
||||
waiter.finished_futures = []
|
||||
waiter.event.clear()
|
||||
|
||||
# reverse to keep finishing order
|
||||
finished.reverse()
|
||||
yield from _yield_finished_futures(finished, waiter,
|
||||
ref_collect=(fs, pending))
|
||||
|
||||
finally:
|
||||
# Remove waiter from unfinished futures
|
||||
for f in fs:
|
||||
with f._condition:
|
||||
f._waiters.remove(waiter)
|
||||
|
||||
DoneAndNotDoneFutures = collections.namedtuple(
|
||||
'DoneAndNotDoneFutures', 'done not_done')
|
||||
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
|
||||
"""Wait for the futures in the given sequence to complete.
|
||||
|
||||
Args:
|
||||
fs: The sequence of Futures (possibly created by different Executors) to
|
||||
wait upon.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
return_when: Indicates when this function should return. The options
|
||||
are:
|
||||
|
||||
FIRST_COMPLETED - Return when any future finishes or is
|
||||
cancelled.
|
||||
FIRST_EXCEPTION - Return when any future finishes by raising an
|
||||
exception. If no future raises an exception
|
||||
then it is equivalent to ALL_COMPLETED.
|
||||
ALL_COMPLETED - Return when all futures finish or are cancelled.
|
||||
|
||||
Returns:
|
||||
A named 2-tuple of sets. The first set, named 'done', contains the
|
||||
futures that completed (is finished or cancelled) before the wait
|
||||
completed. The second set, named 'not_done', contains uncompleted
|
||||
futures.
|
||||
"""
|
||||
with _AcquireFutures(fs):
|
||||
done = set(f for f in fs
|
||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
||||
not_done = set(fs) - done
|
||||
|
||||
if (return_when == FIRST_COMPLETED) and done:
|
||||
return DoneAndNotDoneFutures(done, not_done)
|
||||
elif (return_when == FIRST_EXCEPTION) and done:
|
||||
if any(f for f in done
|
||||
if not f.cancelled() and f.exception() is not None):
|
||||
return DoneAndNotDoneFutures(done, not_done)
|
||||
|
||||
if len(done) == len(fs):
|
||||
return DoneAndNotDoneFutures(done, not_done)
|
||||
|
||||
waiter = _create_and_install_waiters(fs, return_when)
|
||||
|
||||
waiter.event.wait(timeout)
|
||||
for f in fs:
|
||||
with f._condition:
|
||||
f._waiters.remove(waiter)
|
||||
|
||||
done.update(waiter.finished_futures)
|
||||
return DoneAndNotDoneFutures(done, set(fs) - done)
|
||||
|
||||
class Future(object):
|
||||
"""Represents the result of an asynchronous computation."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initializes the future. Should not be called by clients."""
|
||||
self._condition = threading.Condition()
|
||||
self._state = PENDING
|
||||
self._result = None
|
||||
self._exception = None
|
||||
self._waiters = []
|
||||
self._done_callbacks = []
|
||||
|
||||
def _invoke_callbacks(self):
|
||||
for callback in self._done_callbacks:
|
||||
try:
|
||||
callback(self)
|
||||
except Exception:
|
||||
LOGGER.exception('exception calling callback for %r', self)
|
||||
|
||||
def __repr__(self):
|
||||
with self._condition:
|
||||
if self._state == FINISHED:
|
||||
if self._exception:
|
||||
return '<%s at %#x state=%s raised %s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
||||
self._exception.__class__.__name__)
|
||||
else:
|
||||
return '<%s at %#x state=%s returned %s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
||||
self._result.__class__.__name__)
|
||||
return '<%s at %#x state=%s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state])
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the future if possible.
|
||||
|
||||
Returns True if the future was cancelled, False otherwise. A future
|
||||
cannot be cancelled if it is running or has already completed.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state in [RUNNING, FINISHED]:
|
||||
return False
|
||||
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
return True
|
||||
|
||||
self._state = CANCELLED
|
||||
self._condition.notify_all()
|
||||
|
||||
self._invoke_callbacks()
|
||||
return True
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future was cancelled."""
|
||||
with self._condition:
|
||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
|
||||
|
||||
def running(self):
|
||||
"""Return True if the future is currently executing."""
|
||||
with self._condition:
|
||||
return self._state == RUNNING
|
||||
|
||||
def done(self):
|
||||
"""Return True of the future was cancelled or finished executing."""
|
||||
with self._condition:
|
||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
|
||||
|
||||
def __get_result(self):
|
||||
if self._exception:
|
||||
raise self._exception
|
||||
else:
|
||||
return self._result
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
"""Attaches a callable that will be called when the future finishes.
|
||||
|
||||
Args:
|
||||
fn: A callable that will be called with this future as its only
|
||||
argument when the future completes or is cancelled. The callable
|
||||
will always be called by a thread in the same process in which
|
||||
it was added. If the future has already completed or been
|
||||
cancelled then the callable will be called immediately. These
|
||||
callables are called in the order that they were added.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
|
||||
self._done_callbacks.append(fn)
|
||||
return
|
||||
fn(self)
|
||||
|
||||
def result(self, timeout=None):
|
||||
"""Return the result of the call that the future represents.
|
||||
|
||||
Args:
|
||||
timeout: The number of seconds to wait for the result if the future
|
||||
isn't done. If None, then there is no limit on the wait time.
|
||||
|
||||
Returns:
|
||||
The result of the call that the future represents.
|
||||
|
||||
Raises:
|
||||
CancelledError: If the future was cancelled.
|
||||
TimeoutError: If the future didn't finish executing before the given
|
||||
timeout.
|
||||
Exception: If the call raised then that exception will be raised.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self.__get_result()
|
||||
|
||||
self._condition.wait(timeout)
|
||||
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self.__get_result()
|
||||
else:
|
||||
raise TimeoutError()
|
||||
|
||||
def exception(self, timeout=None):
|
||||
"""Return the exception raised by the call that the future represents.
|
||||
|
||||
Args:
|
||||
timeout: The number of seconds to wait for the exception if the
|
||||
future isn't done. If None, then there is no limit on the wait
|
||||
time.
|
||||
|
||||
Returns:
|
||||
The exception raised by the call that the future represents or None
|
||||
if the call completed without raising.
|
||||
|
||||
Raises:
|
||||
CancelledError: If the future was cancelled.
|
||||
TimeoutError: If the future didn't finish executing before the given
|
||||
timeout.
|
||||
"""
|
||||
|
||||
with self._condition:
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self._exception
|
||||
|
||||
self._condition.wait(timeout)
|
||||
|
||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
||||
raise CancelledError()
|
||||
elif self._state == FINISHED:
|
||||
return self._exception
|
||||
else:
|
||||
raise TimeoutError()
|
||||
|
||||
# The following methods should only be used by Executors and in tests.
|
||||
def set_running_or_notify_cancel(self):
|
||||
"""Mark the future as running or process any cancel notifications.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
|
||||
If the future has been cancelled (cancel() was called and returned
|
||||
True) then any threads waiting on the future completing (though calls
|
||||
to as_completed() or wait()) are notified and False is returned.
|
||||
|
||||
If the future was not cancelled then it is put in the running state
|
||||
(future calls to running() will return True) and True is returned.
|
||||
|
||||
This method should be called by Executor implementations before
|
||||
executing the work associated with this future. If this method returns
|
||||
False then the work should not be executed.
|
||||
|
||||
Returns:
|
||||
False if the Future was cancelled, True otherwise.
|
||||
|
||||
Raises:
|
||||
RuntimeError: if this method was already called or if set_result()
|
||||
or set_exception() was called.
|
||||
"""
|
||||
with self._condition:
|
||||
if self._state == CANCELLED:
|
||||
self._state = CANCELLED_AND_NOTIFIED
|
||||
for waiter in self._waiters:
|
||||
waiter.add_cancelled(self)
|
||||
# self._condition.notify_all() is not necessary because
|
||||
# self.cancel() triggers a notification.
|
||||
return False
|
||||
elif self._state == PENDING:
|
||||
self._state = RUNNING
|
||||
return True
|
||||
else:
|
||||
LOGGER.critical('Future %s in unexpected state: %s',
|
||||
id(self),
|
||||
self._state)
|
||||
raise RuntimeError('Future in unexpected state')
|
||||
|
||||
def set_result(self, result):
|
||||
"""Sets the return value of work associated with the future.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
"""
|
||||
with self._condition:
|
||||
self._result = result
|
||||
self._state = FINISHED
|
||||
for waiter in self._waiters:
|
||||
waiter.add_result(self)
|
||||
self._condition.notify_all()
|
||||
self._invoke_callbacks()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Sets the result of the future as being the given exception.
|
||||
|
||||
Should only be used by Executor implementations and unit tests.
|
||||
"""
|
||||
with self._condition:
|
||||
self._exception = exception
|
||||
self._state = FINISHED
|
||||
for waiter in self._waiters:
|
||||
waiter.add_exception(self)
|
||||
self._condition.notify_all()
|
||||
self._invoke_callbacks()
|
||||
|
||||
class Executor(object):
|
||||
"""This is an abstract base class for concrete asynchronous executors."""
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
"""Submits a callable to be executed with the given arguments.
|
||||
|
||||
Schedules the callable to be executed as fn(*args, **kwargs) and returns
|
||||
a Future instance representing the execution of the callable.
|
||||
|
||||
Returns:
|
||||
A Future representing the given call.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def map(self, fn, *iterables, timeout=None, chunksize=1):
|
||||
"""Returns an iterator equivalent to map(fn, iter).
|
||||
|
||||
Args:
|
||||
fn: A callable that will take as many arguments as there are
|
||||
passed iterables.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
chunksize: The size of the chunks the iterable will be broken into
|
||||
before being passed to a child process. This argument is only
|
||||
used by ProcessPoolExecutor; it is ignored by
|
||||
ThreadPoolExecutor.
|
||||
|
||||
Returns:
|
||||
An iterator equivalent to: map(func, *iterables) but the calls may
|
||||
be evaluated out-of-order.
|
||||
|
||||
Raises:
|
||||
TimeoutError: If the entire result iterator could not be generated
|
||||
before the given timeout.
|
||||
Exception: If fn(*args) raises for any values.
|
||||
"""
|
||||
if timeout is not None:
|
||||
end_time = timeout + time.monotonic()
|
||||
|
||||
fs = [self.submit(fn, *args) for args in zip(*iterables)]
|
||||
|
||||
# Yield must be hidden in closure so that the futures are submitted
|
||||
# before the first iterator value is required.
|
||||
def result_iterator():
|
||||
try:
|
||||
# reverse to keep finishing order
|
||||
fs.reverse()
|
||||
while fs:
|
||||
# Careful not to keep a reference to the popped future
|
||||
if timeout is None:
|
||||
yield fs.pop().result()
|
||||
else:
|
||||
yield fs.pop().result(end_time - time.monotonic())
|
||||
finally:
|
||||
for future in fs:
|
||||
future.cancel()
|
||||
return result_iterator()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
"""Clean-up the resources associated with the Executor.
|
||||
|
||||
It is safe to call this method several times. Otherwise, no other
|
||||
methods can be called after this one.
|
||||
|
||||
Args:
|
||||
wait: If True then shutdown will not return until all running
|
||||
futures have finished executing and the resources used by the
|
||||
executor have been reclaimed.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.shutdown(wait=True)
|
||||
return False
|
515
third_party/python/Lib/concurrent/futures/process.py
vendored
Normal file
515
third_party/python/Lib/concurrent/futures/process.py
vendored
Normal file
|
@ -0,0 +1,515 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Implements ProcessPoolExecutor.
|
||||
|
||||
The follow diagram and text describe the data-flow through the system:
|
||||
|
||||
|======================= In-process =====================|== Out-of-process ==|
|
||||
|
||||
+----------+ +----------+ +--------+ +-----------+ +---------+
|
||||
| | => | Work Ids | => | | => | Call Q | => | |
|
||||
| | +----------+ | | +-----------+ | |
|
||||
| | | ... | | | | ... | | |
|
||||
| | | 6 | | | | 5, call() | | |
|
||||
| | | 7 | | | | ... | | |
|
||||
| Process | | ... | | Local | +-----------+ | Process |
|
||||
| Pool | +----------+ | Worker | | #1..n |
|
||||
| Executor | | Thread | | |
|
||||
| | +----------- + | | +-----------+ | |
|
||||
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
|
||||
| | +------------+ | | +-----------+ | |
|
||||
| | | 6: call() | | | | ... | | |
|
||||
| | | future | | | | 4, result | | |
|
||||
| | | ... | | | | 3, except | | |
|
||||
+----------+ +------------+ +--------+ +-----------+ +---------+
|
||||
|
||||
Executor.submit() called:
|
||||
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
|
||||
- adds the id of the _WorkItem to the "Work Ids" queue
|
||||
|
||||
Local worker thread:
|
||||
- reads work ids from the "Work Ids" queue and looks up the corresponding
|
||||
WorkItem from the "Work Items" dict: if the work item has been cancelled then
|
||||
it is simply removed from the dict, otherwise it is repackaged as a
|
||||
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
|
||||
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
|
||||
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
|
||||
- reads _ResultItems from "Result Q", updates the future stored in the
|
||||
"Work Items" dict and deletes the dict entry
|
||||
|
||||
Process #1..n:
|
||||
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
|
||||
_ResultItems in "Result Q"
|
||||
"""
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
import atexit
|
||||
import os
|
||||
from concurrent.futures import _base
|
||||
import queue
|
||||
from queue import Full
|
||||
import multiprocessing
|
||||
from multiprocessing import SimpleQueue
|
||||
from multiprocessing.connection import wait
|
||||
import threading
|
||||
import weakref
|
||||
from functools import partial
|
||||
import itertools
|
||||
import traceback
|
||||
|
||||
# Workers are created as daemon threads and processes. This is done to allow the
|
||||
# interpreter to exit when there are still idle processes in a
|
||||
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
|
||||
# allowing workers to die with the interpreter has two undesirable properties:
|
||||
# - The workers would still be running during interpreter shutdown,
|
||||
# meaning that they would fail in unpredictable ways.
|
||||
# - The workers could be killed while evaluating a work item, which could
|
||||
# be bad if the callable being evaluated has external side-effects e.g.
|
||||
# writing to a file.
|
||||
#
|
||||
# To work around this problem, an exit handler is installed which tells the
|
||||
# workers to exit when their work queues are empty and then waits until the
|
||||
# threads/processes finish.
|
||||
|
||||
_threads_queues = weakref.WeakKeyDictionary()
|
||||
_shutdown = False
|
||||
|
||||
def _python_exit():
|
||||
global _shutdown
|
||||
_shutdown = True
|
||||
items = list(_threads_queues.items())
|
||||
for t, q in items:
|
||||
q.put(None)
|
||||
for t, q in items:
|
||||
t.join()
|
||||
|
||||
# Controls how many more calls than processes will be queued in the call queue.
|
||||
# A smaller number will mean that processes spend more time idle waiting for
|
||||
# work while a larger number will make Future.cancel() succeed less frequently
|
||||
# (Futures in the call queue cannot be cancelled).
|
||||
EXTRA_QUEUED_CALLS = 1
|
||||
|
||||
# Hack to embed stringification of remote traceback in local traceback
|
||||
|
||||
class _RemoteTraceback(Exception):
|
||||
def __init__(self, tb):
|
||||
self.tb = tb
|
||||
def __str__(self):
|
||||
return self.tb
|
||||
|
||||
class _ExceptionWithTraceback:
|
||||
def __init__(self, exc, tb):
|
||||
tb = traceback.format_exception(type(exc), exc, tb)
|
||||
tb = ''.join(tb)
|
||||
self.exc = exc
|
||||
self.tb = '\n"""\n%s"""' % tb
|
||||
def __reduce__(self):
|
||||
return _rebuild_exc, (self.exc, self.tb)
|
||||
|
||||
def _rebuild_exc(exc, tb):
|
||||
exc.__cause__ = _RemoteTraceback(tb)
|
||||
return exc
|
||||
|
||||
class _WorkItem(object):
|
||||
def __init__(self, future, fn, args, kwargs):
|
||||
self.future = future
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
class _ResultItem(object):
|
||||
def __init__(self, work_id, exception=None, result=None):
|
||||
self.work_id = work_id
|
||||
self.exception = exception
|
||||
self.result = result
|
||||
|
||||
class _CallItem(object):
|
||||
def __init__(self, work_id, fn, args, kwargs):
|
||||
self.work_id = work_id
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def _get_chunks(*iterables, chunksize):
|
||||
""" Iterates over zip()ed iterables in chunks. """
|
||||
it = zip(*iterables)
|
||||
while True:
|
||||
chunk = tuple(itertools.islice(it, chunksize))
|
||||
if not chunk:
|
||||
return
|
||||
yield chunk
|
||||
|
||||
def _process_chunk(fn, chunk):
|
||||
""" Processes a chunk of an iterable passed to map.
|
||||
|
||||
Runs the function passed to map() on a chunk of the
|
||||
iterable passed to map.
|
||||
|
||||
This function is run in a separate process.
|
||||
|
||||
"""
|
||||
return [fn(*args) for args in chunk]
|
||||
|
||||
def _process_worker(call_queue, result_queue):
|
||||
"""Evaluates calls from call_queue and places the results in result_queue.
|
||||
|
||||
This worker is run in a separate process.
|
||||
|
||||
Args:
|
||||
call_queue: A multiprocessing.Queue of _CallItems that will be read and
|
||||
evaluated by the worker.
|
||||
result_queue: A multiprocessing.Queue of _ResultItems that will written
|
||||
to by the worker.
|
||||
shutdown: A multiprocessing.Event that will be set as a signal to the
|
||||
worker that it should exit when call_queue is empty.
|
||||
"""
|
||||
while True:
|
||||
call_item = call_queue.get(block=True)
|
||||
if call_item is None:
|
||||
# Wake up queue management thread
|
||||
result_queue.put(os.getpid())
|
||||
return
|
||||
try:
|
||||
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
||||
except BaseException as e:
|
||||
exc = _ExceptionWithTraceback(e, e.__traceback__)
|
||||
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
|
||||
else:
|
||||
result_queue.put(_ResultItem(call_item.work_id,
|
||||
result=r))
|
||||
|
||||
def _add_call_item_to_queue(pending_work_items,
|
||||
work_ids,
|
||||
call_queue):
|
||||
"""Fills call_queue with _WorkItems from pending_work_items.
|
||||
|
||||
This function never blocks.
|
||||
|
||||
Args:
|
||||
pending_work_items: A dict mapping work ids to _WorkItems e.g.
|
||||
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
||||
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
|
||||
are consumed and the corresponding _WorkItems from
|
||||
pending_work_items are transformed into _CallItems and put in
|
||||
call_queue.
|
||||
call_queue: A multiprocessing.Queue that will be filled with _CallItems
|
||||
derived from _WorkItems.
|
||||
"""
|
||||
while True:
|
||||
if call_queue.full():
|
||||
return
|
||||
try:
|
||||
work_id = work_ids.get(block=False)
|
||||
except queue.Empty:
|
||||
return
|
||||
else:
|
||||
work_item = pending_work_items[work_id]
|
||||
|
||||
if work_item.future.set_running_or_notify_cancel():
|
||||
call_queue.put(_CallItem(work_id,
|
||||
work_item.fn,
|
||||
work_item.args,
|
||||
work_item.kwargs),
|
||||
block=True)
|
||||
else:
|
||||
del pending_work_items[work_id]
|
||||
continue
|
||||
|
||||
def _queue_management_worker(executor_reference,
|
||||
processes,
|
||||
pending_work_items,
|
||||
work_ids_queue,
|
||||
call_queue,
|
||||
result_queue):
|
||||
"""Manages the communication between this process and the worker processes.
|
||||
|
||||
This function is run in a local thread.
|
||||
|
||||
Args:
|
||||
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
|
||||
this thread. Used to determine if the ProcessPoolExecutor has been
|
||||
garbage collected and that this function can exit.
|
||||
process: A list of the multiprocessing.Process instances used as
|
||||
workers.
|
||||
pending_work_items: A dict mapping work ids to _WorkItems e.g.
|
||||
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
||||
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
|
||||
call_queue: A multiprocessing.Queue that will be filled with _CallItems
|
||||
derived from _WorkItems for processing by the process workers.
|
||||
result_queue: A multiprocessing.Queue of _ResultItems generated by the
|
||||
process workers.
|
||||
"""
|
||||
executor = None
|
||||
|
||||
def shutting_down():
|
||||
return _shutdown or executor is None or executor._shutdown_thread
|
||||
|
||||
def shutdown_worker():
|
||||
# This is an upper bound
|
||||
nb_children_alive = sum(p.is_alive() for p in processes.values())
|
||||
for i in range(0, nb_children_alive):
|
||||
call_queue.put_nowait(None)
|
||||
# Release the queue's resources as soon as possible.
|
||||
call_queue.close()
|
||||
# If .join() is not called on the created processes then
|
||||
# some multiprocessing.Queue methods may deadlock on Mac OS X.
|
||||
for p in processes.values():
|
||||
p.join()
|
||||
|
||||
reader = result_queue._reader
|
||||
|
||||
while True:
|
||||
_add_call_item_to_queue(pending_work_items,
|
||||
work_ids_queue,
|
||||
call_queue)
|
||||
|
||||
sentinels = [p.sentinel for p in processes.values()]
|
||||
assert sentinels
|
||||
ready = wait([reader] + sentinels)
|
||||
if reader in ready:
|
||||
result_item = reader.recv()
|
||||
else:
|
||||
# Mark the process pool broken so that submits fail right now.
|
||||
executor = executor_reference()
|
||||
if executor is not None:
|
||||
executor._broken = True
|
||||
executor._shutdown_thread = True
|
||||
executor = None
|
||||
# All futures in flight must be marked failed
|
||||
for work_id, work_item in pending_work_items.items():
|
||||
work_item.future.set_exception(
|
||||
BrokenProcessPool(
|
||||
"A process in the process pool was "
|
||||
"terminated abruptly while the future was "
|
||||
"running or pending."
|
||||
))
|
||||
# Delete references to object. See issue16284
|
||||
del work_item
|
||||
pending_work_items.clear()
|
||||
# Terminate remaining workers forcibly: the queues or their
|
||||
# locks may be in a dirty state and block forever.
|
||||
for p in processes.values():
|
||||
p.terminate()
|
||||
shutdown_worker()
|
||||
return
|
||||
if isinstance(result_item, int):
|
||||
# Clean shutdown of a worker using its PID
|
||||
# (avoids marking the executor broken)
|
||||
assert shutting_down()
|
||||
p = processes.pop(result_item)
|
||||
p.join()
|
||||
if not processes:
|
||||
shutdown_worker()
|
||||
return
|
||||
elif result_item is not None:
|
||||
work_item = pending_work_items.pop(result_item.work_id, None)
|
||||
# work_item can be None if another process terminated (see above)
|
||||
if work_item is not None:
|
||||
if result_item.exception:
|
||||
work_item.future.set_exception(result_item.exception)
|
||||
else:
|
||||
work_item.future.set_result(result_item.result)
|
||||
# Delete references to object. See issue16284
|
||||
del work_item
|
||||
# Check whether we should start shutting down.
|
||||
executor = executor_reference()
|
||||
# No more work items can be added if:
|
||||
# - The interpreter is shutting down OR
|
||||
# - The executor that owns this worker has been collected OR
|
||||
# - The executor that owns this worker has been shutdown.
|
||||
if shutting_down():
|
||||
try:
|
||||
# Since no new work items can be added, it is safe to shutdown
|
||||
# this thread if there are no pending work items.
|
||||
if not pending_work_items:
|
||||
shutdown_worker()
|
||||
return
|
||||
except Full:
|
||||
# This is not a problem: we will eventually be woken up (in
|
||||
# result_queue.get()) and be able to send a sentinel again.
|
||||
pass
|
||||
executor = None
|
||||
|
||||
_system_limits_checked = False
|
||||
_system_limited = None
|
||||
def _check_system_limits():
|
||||
global _system_limits_checked, _system_limited
|
||||
if _system_limits_checked:
|
||||
if _system_limited:
|
||||
raise NotImplementedError(_system_limited)
|
||||
_system_limits_checked = True
|
||||
try:
|
||||
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
|
||||
except (AttributeError, ValueError):
|
||||
# sysconf not available or setting not available
|
||||
return
|
||||
if nsems_max == -1:
|
||||
# indetermined limit, assume that limit is determined
|
||||
# by available memory only
|
||||
return
|
||||
if nsems_max >= 256:
|
||||
# minimum number of semaphores available
|
||||
# according to POSIX
|
||||
return
|
||||
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
|
||||
raise NotImplementedError(_system_limited)
|
||||
|
||||
|
||||
def _chain_from_iterable_of_lists(iterable):
|
||||
"""
|
||||
Specialized implementation of itertools.chain.from_iterable.
|
||||
Each item in *iterable* should be a list. This function is
|
||||
careful not to keep references to yielded objects.
|
||||
"""
|
||||
for element in iterable:
|
||||
element.reverse()
|
||||
while element:
|
||||
yield element.pop()
|
||||
|
||||
|
||||
class BrokenProcessPool(RuntimeError):
|
||||
"""
|
||||
Raised when a process in a ProcessPoolExecutor terminated abruptly
|
||||
while a future was in the running state.
|
||||
"""
|
||||
|
||||
|
||||
class ProcessPoolExecutor(_base.Executor):
|
||||
def __init__(self, max_workers=None):
|
||||
"""Initializes a new ProcessPoolExecutor instance.
|
||||
|
||||
Args:
|
||||
max_workers: The maximum number of processes that can be used to
|
||||
execute the given calls. If None or not given then as many
|
||||
worker processes will be created as the machine has processors.
|
||||
"""
|
||||
_check_system_limits()
|
||||
|
||||
if max_workers is None:
|
||||
self._max_workers = os.cpu_count() or 1
|
||||
else:
|
||||
if max_workers <= 0:
|
||||
raise ValueError("max_workers must be greater than 0")
|
||||
|
||||
self._max_workers = max_workers
|
||||
|
||||
# Make the call queue slightly larger than the number of processes to
|
||||
# prevent the worker processes from idling. But don't make it too big
|
||||
# because futures in the call queue cannot be cancelled.
|
||||
self._call_queue = multiprocessing.Queue(self._max_workers +
|
||||
EXTRA_QUEUED_CALLS)
|
||||
# Killed worker processes can produce spurious "broken pipe"
|
||||
# tracebacks in the queue's own worker thread. But we detect killed
|
||||
# processes anyway, so silence the tracebacks.
|
||||
self._call_queue._ignore_epipe = True
|
||||
self._result_queue = SimpleQueue()
|
||||
self._work_ids = queue.Queue()
|
||||
self._queue_management_thread = None
|
||||
# Map of pids to processes
|
||||
self._processes = {}
|
||||
|
||||
# Shutdown is a two-step process.
|
||||
self._shutdown_thread = False
|
||||
self._shutdown_lock = threading.Lock()
|
||||
self._broken = False
|
||||
self._queue_count = 0
|
||||
self._pending_work_items = {}
|
||||
|
||||
def _start_queue_management_thread(self):
|
||||
# When the executor gets lost, the weakref callback will wake up
|
||||
# the queue management thread.
|
||||
def weakref_cb(_, q=self._result_queue):
|
||||
q.put(None)
|
||||
if self._queue_management_thread is None:
|
||||
# Start the processes so that their sentinels are known.
|
||||
self._adjust_process_count()
|
||||
self._queue_management_thread = threading.Thread(
|
||||
target=_queue_management_worker,
|
||||
args=(weakref.ref(self, weakref_cb),
|
||||
self._processes,
|
||||
self._pending_work_items,
|
||||
self._work_ids,
|
||||
self._call_queue,
|
||||
self._result_queue))
|
||||
self._queue_management_thread.daemon = True
|
||||
self._queue_management_thread.start()
|
||||
_threads_queues[self._queue_management_thread] = self._result_queue
|
||||
|
||||
def _adjust_process_count(self):
|
||||
for _ in range(len(self._processes), self._max_workers):
|
||||
p = multiprocessing.Process(
|
||||
target=_process_worker,
|
||||
args=(self._call_queue,
|
||||
self._result_queue))
|
||||
p.start()
|
||||
self._processes[p.pid] = p
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
with self._shutdown_lock:
|
||||
if self._broken:
|
||||
raise BrokenProcessPool('A child process terminated '
|
||||
'abruptly, the process pool is not usable anymore')
|
||||
if self._shutdown_thread:
|
||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
||||
|
||||
f = _base.Future()
|
||||
w = _WorkItem(f, fn, args, kwargs)
|
||||
|
||||
self._pending_work_items[self._queue_count] = w
|
||||
self._work_ids.put(self._queue_count)
|
||||
self._queue_count += 1
|
||||
# Wake up queue management thread
|
||||
self._result_queue.put(None)
|
||||
|
||||
self._start_queue_management_thread()
|
||||
return f
|
||||
submit.__doc__ = _base.Executor.submit.__doc__
|
||||
|
||||
def map(self, fn, *iterables, timeout=None, chunksize=1):
|
||||
"""Returns an iterator equivalent to map(fn, iter).
|
||||
|
||||
Args:
|
||||
fn: A callable that will take as many arguments as there are
|
||||
passed iterables.
|
||||
timeout: The maximum number of seconds to wait. If None, then there
|
||||
is no limit on the wait time.
|
||||
chunksize: If greater than one, the iterables will be chopped into
|
||||
chunks of size chunksize and submitted to the process pool.
|
||||
If set to one, the items in the list will be sent one at a time.
|
||||
|
||||
Returns:
|
||||
An iterator equivalent to: map(func, *iterables) but the calls may
|
||||
be evaluated out-of-order.
|
||||
|
||||
Raises:
|
||||
TimeoutError: If the entire result iterator could not be generated
|
||||
before the given timeout.
|
||||
Exception: If fn(*args) raises for any values.
|
||||
"""
|
||||
if chunksize < 1:
|
||||
raise ValueError("chunksize must be >= 1.")
|
||||
|
||||
results = super().map(partial(_process_chunk, fn),
|
||||
_get_chunks(*iterables, chunksize=chunksize),
|
||||
timeout=timeout)
|
||||
return _chain_from_iterable_of_lists(results)
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
with self._shutdown_lock:
|
||||
self._shutdown_thread = True
|
||||
if self._queue_management_thread:
|
||||
# Wake up queue management thread
|
||||
self._result_queue.put(None)
|
||||
if wait:
|
||||
self._queue_management_thread.join()
|
||||
# To reduce the risk of opening too many files, remove references to
|
||||
# objects that use file descriptors.
|
||||
self._queue_management_thread = None
|
||||
self._call_queue = None
|
||||
self._result_queue = None
|
||||
self._processes = None
|
||||
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
||||
|
||||
atexit.register(_python_exit)
|
153
third_party/python/Lib/concurrent/futures/thread.py
vendored
Normal file
153
third_party/python/Lib/concurrent/futures/thread.py
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Implements ThreadPoolExecutor."""
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
import atexit
|
||||
from concurrent.futures import _base
|
||||
import itertools
|
||||
import queue
|
||||
import threading
|
||||
import weakref
|
||||
import os
|
||||
|
||||
# Workers are created as daemon threads. This is done to allow the interpreter
|
||||
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
|
||||
# pool (i.e. shutdown() was not called). However, allowing workers to die with
|
||||
# the interpreter has two undesirable properties:
|
||||
# - The workers would still be running during interpreter shutdown,
|
||||
# meaning that they would fail in unpredictable ways.
|
||||
# - The workers could be killed while evaluating a work item, which could
|
||||
# be bad if the callable being evaluated has external side-effects e.g.
|
||||
# writing to a file.
|
||||
#
|
||||
# To work around this problem, an exit handler is installed which tells the
|
||||
# workers to exit when their work queues are empty and then waits until the
|
||||
# threads finish.
|
||||
|
||||
_threads_queues = weakref.WeakKeyDictionary()
|
||||
_shutdown = False
|
||||
|
||||
def _python_exit():
|
||||
global _shutdown
|
||||
_shutdown = True
|
||||
items = list(_threads_queues.items())
|
||||
for t, q in items:
|
||||
q.put(None)
|
||||
for t, q in items:
|
||||
t.join()
|
||||
|
||||
atexit.register(_python_exit)
|
||||
|
||||
class _WorkItem(object):
|
||||
def __init__(self, future, fn, args, kwargs):
|
||||
self.future = future
|
||||
self.fn = fn
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
def run(self):
|
||||
if not self.future.set_running_or_notify_cancel():
|
||||
return
|
||||
|
||||
try:
|
||||
result = self.fn(*self.args, **self.kwargs)
|
||||
except BaseException as exc:
|
||||
self.future.set_exception(exc)
|
||||
# Break a reference cycle with the exception 'exc'
|
||||
self = None
|
||||
else:
|
||||
self.future.set_result(result)
|
||||
|
||||
def _worker(executor_reference, work_queue):
|
||||
try:
|
||||
while True:
|
||||
work_item = work_queue.get(block=True)
|
||||
if work_item is not None:
|
||||
work_item.run()
|
||||
# Delete references to object. See issue16284
|
||||
del work_item
|
||||
continue
|
||||
executor = executor_reference()
|
||||
# Exit if:
|
||||
# - The interpreter is shutting down OR
|
||||
# - The executor that owns the worker has been collected OR
|
||||
# - The executor that owns the worker has been shutdown.
|
||||
if _shutdown or executor is None or executor._shutdown:
|
||||
# Notice other workers
|
||||
work_queue.put(None)
|
||||
return
|
||||
del executor
|
||||
except BaseException:
|
||||
_base.LOGGER.critical('Exception in worker', exc_info=True)
|
||||
|
||||
class ThreadPoolExecutor(_base.Executor):
|
||||
|
||||
# Used to assign unique thread names when thread_name_prefix is not supplied.
|
||||
_counter = itertools.count().__next__
|
||||
|
||||
def __init__(self, max_workers=None, thread_name_prefix=''):
|
||||
"""Initializes a new ThreadPoolExecutor instance.
|
||||
|
||||
Args:
|
||||
max_workers: The maximum number of threads that can be used to
|
||||
execute the given calls.
|
||||
thread_name_prefix: An optional name prefix to give our threads.
|
||||
"""
|
||||
if max_workers is None:
|
||||
# Use this number because ThreadPoolExecutor is often
|
||||
# used to overlap I/O instead of CPU work.
|
||||
max_workers = (os.cpu_count() or 1) * 5
|
||||
if max_workers <= 0:
|
||||
raise ValueError("max_workers must be greater than 0")
|
||||
|
||||
self._max_workers = max_workers
|
||||
self._work_queue = queue.Queue()
|
||||
self._threads = set()
|
||||
self._shutdown = False
|
||||
self._shutdown_lock = threading.Lock()
|
||||
self._thread_name_prefix = (thread_name_prefix or
|
||||
("ThreadPoolExecutor-%d" % self._counter()))
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
with self._shutdown_lock:
|
||||
if self._shutdown:
|
||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
||||
|
||||
f = _base.Future()
|
||||
w = _WorkItem(f, fn, args, kwargs)
|
||||
|
||||
self._work_queue.put(w)
|
||||
self._adjust_thread_count()
|
||||
return f
|
||||
submit.__doc__ = _base.Executor.submit.__doc__
|
||||
|
||||
def _adjust_thread_count(self):
|
||||
# When the executor gets lost, the weakref callback will wake up
|
||||
# the worker threads.
|
||||
def weakref_cb(_, q=self._work_queue):
|
||||
q.put(None)
|
||||
# TODO(bquinlan): Should avoid creating new threads if there are more
|
||||
# idle threads than items in the work queue.
|
||||
num_threads = len(self._threads)
|
||||
if num_threads < self._max_workers:
|
||||
thread_name = '%s_%d' % (self._thread_name_prefix or self,
|
||||
num_threads)
|
||||
t = threading.Thread(name=thread_name, target=_worker,
|
||||
args=(weakref.ref(self, weakref_cb),
|
||||
self._work_queue))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self._threads.add(t)
|
||||
_threads_queues[t] = self._work_queue
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
with self._shutdown_lock:
|
||||
self._shutdown = True
|
||||
self._work_queue.put(None)
|
||||
if wait:
|
||||
for t in self._threads:
|
||||
t.join()
|
||||
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
1342
third_party/python/Lib/configparser.py
vendored
Normal file
1342
third_party/python/Lib/configparser.py
vendored
Normal file
File diff suppressed because it is too large
Load diff
384
third_party/python/Lib/contextlib.py
vendored
Normal file
384
third_party/python/Lib/contextlib.py
vendored
Normal file
|
@ -0,0 +1,384 @@
|
|||
"""Utilities for with-statement contexts. See PEP 343."""
|
||||
import abc
|
||||
import sys
|
||||
import _collections_abc
|
||||
from collections import deque
|
||||
from functools import wraps
|
||||
|
||||
__all__ = ["contextmanager", "closing", "AbstractContextManager",
|
||||
"ContextDecorator", "ExitStack", "redirect_stdout",
|
||||
"redirect_stderr", "suppress"]
|
||||
|
||||
|
||||
class AbstractContextManager(abc.ABC):
|
||||
|
||||
"""An abstract base class for context managers."""
|
||||
|
||||
def __enter__(self):
|
||||
"""Return `self` upon entering the runtime context."""
|
||||
return self
|
||||
|
||||
@abc.abstractmethod
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Raise any exception triggered within the runtime context."""
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is AbstractContextManager:
|
||||
return _collections_abc._check_methods(C, "__enter__", "__exit__")
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class ContextDecorator(object):
|
||||
"A base class or mixin that enables context managers to work as decorators."
|
||||
|
||||
def _recreate_cm(self):
|
||||
"""Return a recreated instance of self.
|
||||
|
||||
Allows an otherwise one-shot context manager like
|
||||
_GeneratorContextManager to support use as
|
||||
a decorator via implicit recreation.
|
||||
|
||||
This is a private interface just for _GeneratorContextManager.
|
||||
See issue #11647 for details.
|
||||
"""
|
||||
return self
|
||||
|
||||
def __call__(self, func):
|
||||
@wraps(func)
|
||||
def inner(*args, **kwds):
|
||||
with self._recreate_cm():
|
||||
return func(*args, **kwds)
|
||||
return inner
|
||||
|
||||
|
||||
class _GeneratorContextManager(ContextDecorator, AbstractContextManager):
|
||||
"""Helper for @contextmanager decorator."""
|
||||
|
||||
def __init__(self, func, args, kwds):
|
||||
self.gen = func(*args, **kwds)
|
||||
self.func, self.args, self.kwds = func, args, kwds
|
||||
# Issue 19330: ensure context manager instances have good docstrings
|
||||
doc = getattr(func, "__doc__", None)
|
||||
if doc is None:
|
||||
doc = type(self).__doc__
|
||||
self.__doc__ = doc
|
||||
# Unfortunately, this still doesn't provide good help output when
|
||||
# inspecting the created context manager instances, since pydoc
|
||||
# currently bypasses the instance docstring and shows the docstring
|
||||
# for the class instead.
|
||||
# See http://bugs.python.org/issue19404 for more details.
|
||||
|
||||
def _recreate_cm(self):
|
||||
# _GCM instances are one-shot context managers, so the
|
||||
# CM must be recreated each time a decorated function is
|
||||
# called
|
||||
return self.__class__(self.func, self.args, self.kwds)
|
||||
|
||||
def __enter__(self):
|
||||
try:
|
||||
return next(self.gen)
|
||||
except StopIteration:
|
||||
raise RuntimeError("generator didn't yield") from None
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if type is None:
|
||||
try:
|
||||
next(self.gen)
|
||||
except StopIteration:
|
||||
return False
|
||||
else:
|
||||
raise RuntimeError("generator didn't stop")
|
||||
else:
|
||||
if value is None:
|
||||
# Need to force instantiation so we can reliably
|
||||
# tell if we get the same exception back
|
||||
value = type()
|
||||
try:
|
||||
self.gen.throw(type, value, traceback)
|
||||
except StopIteration as exc:
|
||||
# Suppress StopIteration *unless* it's the same exception that
|
||||
# was passed to throw(). This prevents a StopIteration
|
||||
# raised inside the "with" statement from being suppressed.
|
||||
return exc is not value
|
||||
except RuntimeError as exc:
|
||||
# Don't re-raise the passed in exception. (issue27122)
|
||||
if exc is value:
|
||||
return False
|
||||
# Likewise, avoid suppressing if a StopIteration exception
|
||||
# was passed to throw() and later wrapped into a RuntimeError
|
||||
# (see PEP 479).
|
||||
if type is StopIteration and exc.__cause__ is value:
|
||||
return False
|
||||
raise
|
||||
except:
|
||||
# only re-raise if it's *not* the exception that was
|
||||
# passed to throw(), because __exit__() must not raise
|
||||
# an exception unless __exit__() itself failed. But throw()
|
||||
# has to raise the exception to signal propagation, so this
|
||||
# fixes the impedance mismatch between the throw() protocol
|
||||
# and the __exit__() protocol.
|
||||
#
|
||||
if sys.exc_info()[1] is value:
|
||||
return False
|
||||
raise
|
||||
raise RuntimeError("generator didn't stop after throw()")
|
||||
|
||||
|
||||
def contextmanager(func):
|
||||
"""@contextmanager decorator.
|
||||
|
||||
Typical usage:
|
||||
|
||||
@contextmanager
|
||||
def some_generator(<arguments>):
|
||||
<setup>
|
||||
try:
|
||||
yield <value>
|
||||
finally:
|
||||
<cleanup>
|
||||
|
||||
This makes this:
|
||||
|
||||
with some_generator(<arguments>) as <variable>:
|
||||
<body>
|
||||
|
||||
equivalent to this:
|
||||
|
||||
<setup>
|
||||
try:
|
||||
<variable> = <value>
|
||||
<body>
|
||||
finally:
|
||||
<cleanup>
|
||||
|
||||
"""
|
||||
@wraps(func)
|
||||
def helper(*args, **kwds):
|
||||
return _GeneratorContextManager(func, args, kwds)
|
||||
return helper
|
||||
|
||||
|
||||
class closing(AbstractContextManager):
|
||||
"""Context to automatically close something at the end of a block.
|
||||
|
||||
Code like this:
|
||||
|
||||
with closing(<module>.open(<arguments>)) as f:
|
||||
<block>
|
||||
|
||||
is equivalent to this:
|
||||
|
||||
f = <module>.open(<arguments>)
|
||||
try:
|
||||
<block>
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
"""
|
||||
def __init__(self, thing):
|
||||
self.thing = thing
|
||||
def __enter__(self):
|
||||
return self.thing
|
||||
def __exit__(self, *exc_info):
|
||||
self.thing.close()
|
||||
|
||||
|
||||
class _RedirectStream(AbstractContextManager):
|
||||
|
||||
_stream = None
|
||||
|
||||
def __init__(self, new_target):
|
||||
self._new_target = new_target
|
||||
# We use a list of old targets to make this CM re-entrant
|
||||
self._old_targets = []
|
||||
|
||||
def __enter__(self):
|
||||
self._old_targets.append(getattr(sys, self._stream))
|
||||
setattr(sys, self._stream, self._new_target)
|
||||
return self._new_target
|
||||
|
||||
def __exit__(self, exctype, excinst, exctb):
|
||||
setattr(sys, self._stream, self._old_targets.pop())
|
||||
|
||||
|
||||
class redirect_stdout(_RedirectStream):
|
||||
"""Context manager for temporarily redirecting stdout to another file.
|
||||
|
||||
# How to send help() to stderr
|
||||
with redirect_stdout(sys.stderr):
|
||||
help(dir)
|
||||
|
||||
# How to write help() to a file
|
||||
with open('help.txt', 'w') as f:
|
||||
with redirect_stdout(f):
|
||||
help(pow)
|
||||
"""
|
||||
|
||||
_stream = "stdout"
|
||||
|
||||
|
||||
class redirect_stderr(_RedirectStream):
|
||||
"""Context manager for temporarily redirecting stderr to another file."""
|
||||
|
||||
_stream = "stderr"
|
||||
|
||||
|
||||
class suppress(AbstractContextManager):
|
||||
"""Context manager to suppress specified exceptions
|
||||
|
||||
After the exception is suppressed, execution proceeds with the next
|
||||
statement following the with statement.
|
||||
|
||||
with suppress(FileNotFoundError):
|
||||
os.remove(somefile)
|
||||
# Execution still resumes here if the file was already removed
|
||||
"""
|
||||
|
||||
def __init__(self, *exceptions):
|
||||
self._exceptions = exceptions
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exctype, excinst, exctb):
|
||||
# Unlike isinstance and issubclass, CPython exception handling
|
||||
# currently only looks at the concrete type hierarchy (ignoring
|
||||
# the instance and subclass checking hooks). While Guido considers
|
||||
# that a bug rather than a feature, it's a fairly hard one to fix
|
||||
# due to various internal implementation details. suppress provides
|
||||
# the simpler issubclass based semantics, rather than trying to
|
||||
# exactly reproduce the limitations of the CPython interpreter.
|
||||
#
|
||||
# See http://bugs.python.org/issue12029 for more details
|
||||
return exctype is not None and issubclass(exctype, self._exceptions)
|
||||
|
||||
|
||||
# Inspired by discussions on http://bugs.python.org/issue13585
|
||||
class ExitStack(AbstractContextManager):
|
||||
"""Context manager for dynamic management of a stack of exit callbacks
|
||||
|
||||
For example:
|
||||
|
||||
with ExitStack() as stack:
|
||||
files = [stack.enter_context(open(fname)) for fname in filenames]
|
||||
# All opened files will automatically be closed at the end of
|
||||
# the with statement, even if attempts to open files later
|
||||
# in the list raise an exception
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self._exit_callbacks = deque()
|
||||
|
||||
def pop_all(self):
|
||||
"""Preserve the context stack by transferring it to a new instance"""
|
||||
new_stack = type(self)()
|
||||
new_stack._exit_callbacks = self._exit_callbacks
|
||||
self._exit_callbacks = deque()
|
||||
return new_stack
|
||||
|
||||
def _push_cm_exit(self, cm, cm_exit):
|
||||
"""Helper to correctly register callbacks to __exit__ methods"""
|
||||
def _exit_wrapper(*exc_details):
|
||||
return cm_exit(cm, *exc_details)
|
||||
_exit_wrapper.__self__ = cm
|
||||
self.push(_exit_wrapper)
|
||||
|
||||
def push(self, exit):
|
||||
"""Registers a callback with the standard __exit__ method signature
|
||||
|
||||
Can suppress exceptions the same way __exit__ methods can.
|
||||
|
||||
Also accepts any object with an __exit__ method (registering a call
|
||||
to the method instead of the object itself)
|
||||
"""
|
||||
# We use an unbound method rather than a bound method to follow
|
||||
# the standard lookup behaviour for special methods
|
||||
_cb_type = type(exit)
|
||||
try:
|
||||
exit_method = _cb_type.__exit__
|
||||
except AttributeError:
|
||||
# Not a context manager, so assume its a callable
|
||||
self._exit_callbacks.append(exit)
|
||||
else:
|
||||
self._push_cm_exit(exit, exit_method)
|
||||
return exit # Allow use as a decorator
|
||||
|
||||
def callback(self, callback, *args, **kwds):
|
||||
"""Registers an arbitrary callback and arguments.
|
||||
|
||||
Cannot suppress exceptions.
|
||||
"""
|
||||
def _exit_wrapper(exc_type, exc, tb):
|
||||
callback(*args, **kwds)
|
||||
# We changed the signature, so using @wraps is not appropriate, but
|
||||
# setting __wrapped__ may still help with introspection
|
||||
_exit_wrapper.__wrapped__ = callback
|
||||
self.push(_exit_wrapper)
|
||||
return callback # Allow use as a decorator
|
||||
|
||||
def enter_context(self, cm):
|
||||
"""Enters the supplied context manager
|
||||
|
||||
If successful, also pushes its __exit__ method as a callback and
|
||||
returns the result of the __enter__ method.
|
||||
"""
|
||||
# We look up the special methods on the type to match the with statement
|
||||
_cm_type = type(cm)
|
||||
_exit = _cm_type.__exit__
|
||||
result = _cm_type.__enter__(cm)
|
||||
self._push_cm_exit(cm, _exit)
|
||||
return result
|
||||
|
||||
def close(self):
|
||||
"""Immediately unwind the context stack"""
|
||||
self.__exit__(None, None, None)
|
||||
|
||||
def __exit__(self, *exc_details):
|
||||
received_exc = exc_details[0] is not None
|
||||
|
||||
# We manipulate the exception state so it behaves as though
|
||||
# we were actually nesting multiple with statements
|
||||
frame_exc = sys.exc_info()[1]
|
||||
def _fix_exception_context(new_exc, old_exc):
|
||||
# Context may not be correct, so find the end of the chain
|
||||
while 1:
|
||||
exc_context = new_exc.__context__
|
||||
if exc_context is old_exc:
|
||||
# Context is already set correctly (see issue 20317)
|
||||
return
|
||||
if exc_context is None or exc_context is frame_exc:
|
||||
break
|
||||
new_exc = exc_context
|
||||
# Change the end of the chain to point to the exception
|
||||
# we expect it to reference
|
||||
new_exc.__context__ = old_exc
|
||||
|
||||
# Callbacks are invoked in LIFO order to match the behaviour of
|
||||
# nested context managers
|
||||
suppressed_exc = False
|
||||
pending_raise = False
|
||||
while self._exit_callbacks:
|
||||
cb = self._exit_callbacks.pop()
|
||||
try:
|
||||
if cb(*exc_details):
|
||||
suppressed_exc = True
|
||||
pending_raise = False
|
||||
exc_details = (None, None, None)
|
||||
except:
|
||||
new_exc_details = sys.exc_info()
|
||||
# simulate the stack of exceptions by setting the context
|
||||
_fix_exception_context(new_exc_details[1], exc_details[1])
|
||||
pending_raise = True
|
||||
exc_details = new_exc_details
|
||||
if pending_raise:
|
||||
try:
|
||||
# bare "raise exc_details[1]" replaces our carefully
|
||||
# set-up context
|
||||
fixed_ctx = exc_details[1].__context__
|
||||
raise exc_details[1]
|
||||
except BaseException:
|
||||
exc_details[1].__context__ = fixed_ctx
|
||||
raise
|
||||
return received_exc and suppressed_exc
|
313
third_party/python/Lib/copy.py
vendored
Normal file
313
third_party/python/Lib/copy.py
vendored
Normal file
|
@ -0,0 +1,313 @@
|
|||
"""Generic (shallow and deep) copying operations.
|
||||
|
||||
Interface summary:
|
||||
|
||||
import copy
|
||||
|
||||
x = copy.copy(y) # make a shallow copy of y
|
||||
x = copy.deepcopy(y) # make a deep copy of y
|
||||
|
||||
For module specific errors, copy.Error is raised.
|
||||
|
||||
The difference between shallow and deep copying is only relevant for
|
||||
compound objects (objects that contain other objects, like lists or
|
||||
class instances).
|
||||
|
||||
- A shallow copy constructs a new compound object and then (to the
|
||||
extent possible) inserts *the same objects* into it that the
|
||||
original contains.
|
||||
|
||||
- A deep copy constructs a new compound object and then, recursively,
|
||||
inserts *copies* into it of the objects found in the original.
|
||||
|
||||
Two problems often exist with deep copy operations that don't exist
|
||||
with shallow copy operations:
|
||||
|
||||
a) recursive objects (compound objects that, directly or indirectly,
|
||||
contain a reference to themselves) may cause a recursive loop
|
||||
|
||||
b) because deep copy copies *everything* it may copy too much, e.g.
|
||||
administrative data structures that should be shared even between
|
||||
copies
|
||||
|
||||
Python's deep copy operation avoids these problems by:
|
||||
|
||||
a) keeping a table of objects already copied during the current
|
||||
copying pass
|
||||
|
||||
b) letting user-defined classes override the copying operation or the
|
||||
set of components copied
|
||||
|
||||
This version does not copy types like module, class, function, method,
|
||||
nor stack trace, stack frame, nor file, socket, window, nor array, nor
|
||||
any similar types.
|
||||
|
||||
Classes can use the same interfaces to control copying that they use
|
||||
to control pickling: they can define methods called __getinitargs__(),
|
||||
__getstate__() and __setstate__(). See the documentation for module
|
||||
"pickle" for information on these methods.
|
||||
"""
|
||||
|
||||
import types
|
||||
import weakref
|
||||
from copyreg import dispatch_table
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
error = Error # backward compatibility
|
||||
|
||||
try:
|
||||
from org.python.core import PyStringMap
|
||||
except ImportError:
|
||||
PyStringMap = None
|
||||
|
||||
__all__ = ["Error", "copy", "deepcopy"]
|
||||
|
||||
def copy(x):
|
||||
"""Shallow copy operation on arbitrary Python objects.
|
||||
|
||||
See the module's __doc__ string for more info.
|
||||
"""
|
||||
|
||||
cls = type(x)
|
||||
|
||||
copier = _copy_dispatch.get(cls)
|
||||
if copier:
|
||||
return copier(x)
|
||||
|
||||
try:
|
||||
issc = issubclass(cls, type)
|
||||
except TypeError: # cls is not a class
|
||||
issc = False
|
||||
if issc:
|
||||
# treat it as a regular class:
|
||||
return _copy_immutable(x)
|
||||
|
||||
copier = getattr(cls, "__copy__", None)
|
||||
if copier:
|
||||
return copier(x)
|
||||
|
||||
reductor = dispatch_table.get(cls)
|
||||
if reductor:
|
||||
rv = reductor(x)
|
||||
else:
|
||||
reductor = getattr(x, "__reduce_ex__", None)
|
||||
if reductor:
|
||||
rv = reductor(4)
|
||||
else:
|
||||
reductor = getattr(x, "__reduce__", None)
|
||||
if reductor:
|
||||
rv = reductor()
|
||||
else:
|
||||
raise Error("un(shallow)copyable object of type %s" % cls)
|
||||
|
||||
if isinstance(rv, str):
|
||||
return x
|
||||
return _reconstruct(x, None, *rv)
|
||||
|
||||
|
||||
_copy_dispatch = d = {}
|
||||
|
||||
def _copy_immutable(x):
|
||||
return x
|
||||
for t in (type(None), int, float, bool, complex, str, tuple,
|
||||
bytes, frozenset, type, range, slice,
|
||||
types.BuiltinFunctionType, type(Ellipsis), type(NotImplemented),
|
||||
types.FunctionType, weakref.ref):
|
||||
d[t] = _copy_immutable
|
||||
t = getattr(types, "CodeType", None)
|
||||
if t is not None:
|
||||
d[t] = _copy_immutable
|
||||
|
||||
d[list] = list.copy
|
||||
d[dict] = dict.copy
|
||||
d[set] = set.copy
|
||||
d[bytearray] = bytearray.copy
|
||||
|
||||
if PyStringMap is not None:
|
||||
d[PyStringMap] = PyStringMap.copy
|
||||
|
||||
del d, t
|
||||
|
||||
def deepcopy(x, memo=None, _nil=[]):
|
||||
"""Deep copy operation on arbitrary Python objects.
|
||||
|
||||
See the module's __doc__ string for more info.
|
||||
"""
|
||||
|
||||
if memo is None:
|
||||
memo = {}
|
||||
|
||||
d = id(x)
|
||||
y = memo.get(d, _nil)
|
||||
if y is not _nil:
|
||||
return y
|
||||
|
||||
cls = type(x)
|
||||
|
||||
copier = _deepcopy_dispatch.get(cls)
|
||||
if copier:
|
||||
y = copier(x, memo)
|
||||
else:
|
||||
try:
|
||||
issc = issubclass(cls, type)
|
||||
except TypeError: # cls is not a class (old Boost; see SF #502085)
|
||||
issc = 0
|
||||
if issc:
|
||||
y = _deepcopy_atomic(x, memo)
|
||||
else:
|
||||
copier = getattr(x, "__deepcopy__", None)
|
||||
if copier:
|
||||
y = copier(memo)
|
||||
else:
|
||||
reductor = dispatch_table.get(cls)
|
||||
if reductor:
|
||||
rv = reductor(x)
|
||||
else:
|
||||
reductor = getattr(x, "__reduce_ex__", None)
|
||||
if reductor:
|
||||
rv = reductor(4)
|
||||
else:
|
||||
reductor = getattr(x, "__reduce__", None)
|
||||
if reductor:
|
||||
rv = reductor()
|
||||
else:
|
||||
raise Error(
|
||||
"un(deep)copyable object of type %s" % cls)
|
||||
if isinstance(rv, str):
|
||||
y = x
|
||||
else:
|
||||
y = _reconstruct(x, memo, *rv)
|
||||
|
||||
# If is its own copy, don't memoize.
|
||||
if y is not x:
|
||||
memo[d] = y
|
||||
_keep_alive(x, memo) # Make sure x lives at least as long as d
|
||||
return y
|
||||
|
||||
_deepcopy_dispatch = d = {}
|
||||
|
||||
def _deepcopy_atomic(x, memo):
|
||||
return x
|
||||
d[type(None)] = _deepcopy_atomic
|
||||
d[type(Ellipsis)] = _deepcopy_atomic
|
||||
d[type(NotImplemented)] = _deepcopy_atomic
|
||||
d[int] = _deepcopy_atomic
|
||||
d[float] = _deepcopy_atomic
|
||||
d[bool] = _deepcopy_atomic
|
||||
d[complex] = _deepcopy_atomic
|
||||
d[bytes] = _deepcopy_atomic
|
||||
d[str] = _deepcopy_atomic
|
||||
try:
|
||||
d[types.CodeType] = _deepcopy_atomic
|
||||
except AttributeError:
|
||||
pass
|
||||
d[type] = _deepcopy_atomic
|
||||
d[types.BuiltinFunctionType] = _deepcopy_atomic
|
||||
d[types.FunctionType] = _deepcopy_atomic
|
||||
d[weakref.ref] = _deepcopy_atomic
|
||||
|
||||
def _deepcopy_list(x, memo, deepcopy=deepcopy):
|
||||
y = []
|
||||
memo[id(x)] = y
|
||||
append = y.append
|
||||
for a in x:
|
||||
append(deepcopy(a, memo))
|
||||
return y
|
||||
d[list] = _deepcopy_list
|
||||
|
||||
def _deepcopy_tuple(x, memo, deepcopy=deepcopy):
|
||||
y = [deepcopy(a, memo) for a in x]
|
||||
# We're not going to put the tuple in the memo, but it's still important we
|
||||
# check for it, in case the tuple contains recursive mutable structures.
|
||||
try:
|
||||
return memo[id(x)]
|
||||
except KeyError:
|
||||
pass
|
||||
for k, j in zip(x, y):
|
||||
if k is not j:
|
||||
y = tuple(y)
|
||||
break
|
||||
else:
|
||||
y = x
|
||||
return y
|
||||
d[tuple] = _deepcopy_tuple
|
||||
|
||||
def _deepcopy_dict(x, memo, deepcopy=deepcopy):
|
||||
y = {}
|
||||
memo[id(x)] = y
|
||||
for key, value in x.items():
|
||||
y[deepcopy(key, memo)] = deepcopy(value, memo)
|
||||
return y
|
||||
d[dict] = _deepcopy_dict
|
||||
if PyStringMap is not None:
|
||||
d[PyStringMap] = _deepcopy_dict
|
||||
|
||||
def _deepcopy_method(x, memo): # Copy instance methods
|
||||
return type(x)(x.__func__, deepcopy(x.__self__, memo))
|
||||
d[types.MethodType] = _deepcopy_method
|
||||
|
||||
del d
|
||||
|
||||
def _keep_alive(x, memo):
|
||||
"""Keeps a reference to the object x in the memo.
|
||||
|
||||
Because we remember objects by their id, we have
|
||||
to assure that possibly temporary objects are kept
|
||||
alive by referencing them.
|
||||
We store a reference at the id of the memo, which should
|
||||
normally not be used unless someone tries to deepcopy
|
||||
the memo itself...
|
||||
"""
|
||||
try:
|
||||
memo[id(memo)].append(x)
|
||||
except KeyError:
|
||||
# aha, this is the first one :-)
|
||||
memo[id(memo)]=[x]
|
||||
|
||||
def _reconstruct(x, memo, func, args,
|
||||
state=None, listiter=None, dictiter=None,
|
||||
deepcopy=deepcopy):
|
||||
deep = memo is not None
|
||||
if deep and args:
|
||||
args = (deepcopy(arg, memo) for arg in args)
|
||||
y = func(*args)
|
||||
if deep:
|
||||
memo[id(x)] = y
|
||||
|
||||
if state is not None:
|
||||
if deep:
|
||||
state = deepcopy(state, memo)
|
||||
if hasattr(y, '__setstate__'):
|
||||
y.__setstate__(state)
|
||||
else:
|
||||
if isinstance(state, tuple) and len(state) == 2:
|
||||
state, slotstate = state
|
||||
else:
|
||||
slotstate = None
|
||||
if state is not None:
|
||||
y.__dict__.update(state)
|
||||
if slotstate is not None:
|
||||
for key, value in slotstate.items():
|
||||
setattr(y, key, value)
|
||||
|
||||
if listiter is not None:
|
||||
if deep:
|
||||
for item in listiter:
|
||||
item = deepcopy(item, memo)
|
||||
y.append(item)
|
||||
else:
|
||||
for item in listiter:
|
||||
y.append(item)
|
||||
if dictiter is not None:
|
||||
if deep:
|
||||
for key, value in dictiter:
|
||||
key = deepcopy(key, memo)
|
||||
value = deepcopy(value, memo)
|
||||
y[key] = value
|
||||
else:
|
||||
for key, value in dictiter:
|
||||
y[key] = value
|
||||
return y
|
||||
|
||||
del types, weakref, PyStringMap
|
206
third_party/python/Lib/copyreg.py
vendored
Normal file
206
third_party/python/Lib/copyreg.py
vendored
Normal file
|
@ -0,0 +1,206 @@
|
|||
"""Helper to provide extensibility for pickle.
|
||||
|
||||
This is only useful to add pickle support for extension types defined in
|
||||
C, not for instances of user-defined classes.
|
||||
"""
|
||||
|
||||
__all__ = ["pickle", "constructor",
|
||||
"add_extension", "remove_extension", "clear_extension_cache"]
|
||||
|
||||
dispatch_table = {}
|
||||
|
||||
def pickle(ob_type, pickle_function, constructor_ob=None):
|
||||
if not callable(pickle_function):
|
||||
raise TypeError("reduction functions must be callable")
|
||||
dispatch_table[ob_type] = pickle_function
|
||||
|
||||
# The constructor_ob function is a vestige of safe for unpickling.
|
||||
# There is no reason for the caller to pass it anymore.
|
||||
if constructor_ob is not None:
|
||||
constructor(constructor_ob)
|
||||
|
||||
def constructor(object):
|
||||
if not callable(object):
|
||||
raise TypeError("constructors must be callable")
|
||||
|
||||
# Example: provide pickling support for complex numbers.
|
||||
|
||||
try:
|
||||
complex
|
||||
except NameError:
|
||||
pass
|
||||
else:
|
||||
|
||||
def pickle_complex(c):
|
||||
return complex, (c.real, c.imag)
|
||||
|
||||
pickle(complex, pickle_complex, complex)
|
||||
|
||||
# Support for pickling new-style objects
|
||||
|
||||
def _reconstructor(cls, base, state):
|
||||
if base is object:
|
||||
obj = object.__new__(cls)
|
||||
else:
|
||||
obj = base.__new__(cls, state)
|
||||
if base.__init__ != object.__init__:
|
||||
base.__init__(obj, state)
|
||||
return obj
|
||||
|
||||
_HEAPTYPE = 1<<9
|
||||
|
||||
# Python code for object.__reduce_ex__ for protocols 0 and 1
|
||||
|
||||
def _reduce_ex(self, proto):
|
||||
assert proto < 2
|
||||
for base in self.__class__.__mro__:
|
||||
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
|
||||
break
|
||||
else:
|
||||
base = object # not really reachable
|
||||
if base is object:
|
||||
state = None
|
||||
else:
|
||||
if base is self.__class__:
|
||||
raise TypeError("can't pickle %s objects" % base.__name__)
|
||||
state = base(self)
|
||||
args = (self.__class__, base, state)
|
||||
try:
|
||||
getstate = self.__getstate__
|
||||
except AttributeError:
|
||||
if getattr(self, "__slots__", None):
|
||||
raise TypeError("a class that defines __slots__ without "
|
||||
"defining __getstate__ cannot be pickled")
|
||||
try:
|
||||
dict = self.__dict__
|
||||
except AttributeError:
|
||||
dict = None
|
||||
else:
|
||||
dict = getstate()
|
||||
if dict:
|
||||
return _reconstructor, args, dict
|
||||
else:
|
||||
return _reconstructor, args
|
||||
|
||||
# Helper for __reduce_ex__ protocol 2
|
||||
|
||||
def __newobj__(cls, *args):
|
||||
return cls.__new__(cls, *args)
|
||||
|
||||
def __newobj_ex__(cls, args, kwargs):
|
||||
"""Used by pickle protocol 4, instead of __newobj__ to allow classes with
|
||||
keyword-only arguments to be pickled correctly.
|
||||
"""
|
||||
return cls.__new__(cls, *args, **kwargs)
|
||||
|
||||
def _slotnames(cls):
|
||||
"""Return a list of slot names for a given class.
|
||||
|
||||
This needs to find slots defined by the class and its bases, so we
|
||||
can't simply return the __slots__ attribute. We must walk down
|
||||
the Method Resolution Order and concatenate the __slots__ of each
|
||||
class found there. (This assumes classes don't modify their
|
||||
__slots__ attribute to misrepresent their slots after the class is
|
||||
defined.)
|
||||
"""
|
||||
|
||||
# Get the value from a cache in the class if possible
|
||||
names = cls.__dict__.get("__slotnames__")
|
||||
if names is not None:
|
||||
return names
|
||||
|
||||
# Not cached -- calculate the value
|
||||
names = []
|
||||
if not hasattr(cls, "__slots__"):
|
||||
# This class has no slots
|
||||
pass
|
||||
else:
|
||||
# Slots found -- gather slot names from all base classes
|
||||
for c in cls.__mro__:
|
||||
if "__slots__" in c.__dict__:
|
||||
slots = c.__dict__['__slots__']
|
||||
# if class has a single slot, it can be given as a string
|
||||
if isinstance(slots, str):
|
||||
slots = (slots,)
|
||||
for name in slots:
|
||||
# special descriptors
|
||||
if name in ("__dict__", "__weakref__"):
|
||||
continue
|
||||
# mangled names
|
||||
elif name.startswith('__') and not name.endswith('__'):
|
||||
stripped = c.__name__.lstrip('_')
|
||||
if stripped:
|
||||
names.append('_%s%s' % (stripped, name))
|
||||
else:
|
||||
names.append(name)
|
||||
else:
|
||||
names.append(name)
|
||||
|
||||
# Cache the outcome in the class if at all possible
|
||||
try:
|
||||
cls.__slotnames__ = names
|
||||
except:
|
||||
pass # But don't die if we can't
|
||||
|
||||
return names
|
||||
|
||||
# A registry of extension codes. This is an ad-hoc compression
|
||||
# mechanism. Whenever a global reference to <module>, <name> is about
|
||||
# to be pickled, the (<module>, <name>) tuple is looked up here to see
|
||||
# if it is a registered extension code for it. Extension codes are
|
||||
# universal, so that the meaning of a pickle does not depend on
|
||||
# context. (There are also some codes reserved for local use that
|
||||
# don't have this restriction.) Codes are positive ints; 0 is
|
||||
# reserved.
|
||||
|
||||
_extension_registry = {} # key -> code
|
||||
_inverted_registry = {} # code -> key
|
||||
_extension_cache = {} # code -> object
|
||||
# Don't ever rebind those names: pickling grabs a reference to them when
|
||||
# it's initialized, and won't see a rebinding.
|
||||
|
||||
def add_extension(module, name, code):
|
||||
"""Register an extension code."""
|
||||
code = int(code)
|
||||
if not 1 <= code <= 0x7fffffff:
|
||||
raise ValueError("code out of range")
|
||||
key = (module, name)
|
||||
if (_extension_registry.get(key) == code and
|
||||
_inverted_registry.get(code) == key):
|
||||
return # Redundant registrations are benign
|
||||
if key in _extension_registry:
|
||||
raise ValueError("key %s is already registered with code %s" %
|
||||
(key, _extension_registry[key]))
|
||||
if code in _inverted_registry:
|
||||
raise ValueError("code %s is already in use for key %s" %
|
||||
(code, _inverted_registry[code]))
|
||||
_extension_registry[key] = code
|
||||
_inverted_registry[code] = key
|
||||
|
||||
def remove_extension(module, name, code):
|
||||
"""Unregister an extension code. For testing only."""
|
||||
key = (module, name)
|
||||
if (_extension_registry.get(key) != code or
|
||||
_inverted_registry.get(code) != key):
|
||||
raise ValueError("key %s is not registered with code %s" %
|
||||
(key, code))
|
||||
del _extension_registry[key]
|
||||
del _inverted_registry[code]
|
||||
if code in _extension_cache:
|
||||
del _extension_cache[code]
|
||||
|
||||
def clear_extension_cache():
|
||||
_extension_cache.clear()
|
||||
|
||||
# Standard extension code assignments
|
||||
|
||||
# Reserved ranges
|
||||
|
||||
# First Last Count Purpose
|
||||
# 1 127 127 Reserved for Python standard library
|
||||
# 128 191 64 Reserved for Zope
|
||||
# 192 239 48 Reserved for 3rd parties
|
||||
# 240 255 16 Reserved for private use (will never be assigned)
|
||||
# 256 Inf Inf Reserved for future assignment
|
||||
|
||||
# Extension codes are assigned by the Python Software Foundation.
|
61
third_party/python/Lib/crypt.py
vendored
Normal file
61
third_party/python/Lib/crypt.py
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
"""Wrapper to the POSIX crypt library call and associated functionality."""
|
||||
|
||||
import _crypt
|
||||
import string as _string
|
||||
from random import SystemRandom as _SystemRandom
|
||||
from collections import namedtuple as _namedtuple
|
||||
|
||||
|
||||
_saltchars = _string.ascii_letters + _string.digits + './'
|
||||
_sr = _SystemRandom()
|
||||
|
||||
|
||||
class _Method(_namedtuple('_Method', 'name ident salt_chars total_size')):
|
||||
|
||||
"""Class representing a salt method per the Modular Crypt Format or the
|
||||
legacy 2-character crypt method."""
|
||||
|
||||
def __repr__(self):
|
||||
return '<crypt.METHOD_{}>'.format(self.name)
|
||||
|
||||
|
||||
def mksalt(method=None):
|
||||
"""Generate a salt for the specified method.
|
||||
|
||||
If not specified, the strongest available method will be used.
|
||||
|
||||
"""
|
||||
if method is None:
|
||||
method = methods[0]
|
||||
s = '${}$'.format(method.ident) if method.ident else ''
|
||||
s += ''.join(_sr.choice(_saltchars) for char in range(method.salt_chars))
|
||||
return s
|
||||
|
||||
|
||||
def crypt(word, salt=None):
|
||||
"""Return a string representing the one-way hash of a password, with a salt
|
||||
prepended.
|
||||
|
||||
If ``salt`` is not specified or is ``None``, the strongest
|
||||
available method will be selected and a salt generated. Otherwise,
|
||||
``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as
|
||||
returned by ``crypt.mksalt()``.
|
||||
|
||||
"""
|
||||
if salt is None or isinstance(salt, _Method):
|
||||
salt = mksalt(salt)
|
||||
return _crypt.crypt(word, salt)
|
||||
|
||||
|
||||
# available salting/crypto methods
|
||||
METHOD_CRYPT = _Method('CRYPT', None, 2, 13)
|
||||
METHOD_MD5 = _Method('MD5', '1', 8, 34)
|
||||
METHOD_SHA256 = _Method('SHA256', '5', 16, 63)
|
||||
METHOD_SHA512 = _Method('SHA512', '6', 16, 106)
|
||||
|
||||
methods = []
|
||||
for _method in (METHOD_SHA512, METHOD_SHA256, METHOD_MD5, METHOD_CRYPT):
|
||||
_result = crypt('', _method)
|
||||
if _result and len(_result) == _method.total_size:
|
||||
methods.append(_method)
|
||||
del _result, _method
|
449
third_party/python/Lib/csv.py
vendored
Normal file
449
third_party/python/Lib/csv.py
vendored
Normal file
|
@ -0,0 +1,449 @@
|
|||
|
||||
"""
|
||||
csv.py - read/write/investigate CSV files
|
||||
"""
|
||||
|
||||
import re
|
||||
from _csv import Error, __version__, writer, reader, register_dialect, \
|
||||
unregister_dialect, get_dialect, list_dialects, \
|
||||
field_size_limit, \
|
||||
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
|
||||
__doc__
|
||||
from _csv import Dialect as _Dialect
|
||||
|
||||
from collections import OrderedDict
|
||||
from io import StringIO
|
||||
|
||||
__all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
|
||||
"Error", "Dialect", "__doc__", "excel", "excel_tab",
|
||||
"field_size_limit", "reader", "writer",
|
||||
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
|
||||
"unregister_dialect", "__version__", "DictReader", "DictWriter",
|
||||
"unix_dialect"]
|
||||
|
||||
class Dialect:
|
||||
"""Describe a CSV dialect.
|
||||
|
||||
This must be subclassed (see csv.excel). Valid attributes are:
|
||||
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
|
||||
lineterminator, quoting.
|
||||
|
||||
"""
|
||||
_name = ""
|
||||
_valid = False
|
||||
# placeholders
|
||||
delimiter = None
|
||||
quotechar = None
|
||||
escapechar = None
|
||||
doublequote = None
|
||||
skipinitialspace = None
|
||||
lineterminator = None
|
||||
quoting = None
|
||||
|
||||
def __init__(self):
|
||||
if self.__class__ != Dialect:
|
||||
self._valid = True
|
||||
self._validate()
|
||||
|
||||
def _validate(self):
|
||||
try:
|
||||
_Dialect(self)
|
||||
except TypeError as e:
|
||||
# We do this for compatibility with py2.3
|
||||
raise Error(str(e))
|
||||
|
||||
class excel(Dialect):
|
||||
"""Describe the usual properties of Excel-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\r\n'
|
||||
quoting = QUOTE_MINIMAL
|
||||
register_dialect("excel", excel)
|
||||
|
||||
class excel_tab(excel):
|
||||
"""Describe the usual properties of Excel-generated TAB-delimited files."""
|
||||
delimiter = '\t'
|
||||
register_dialect("excel-tab", excel_tab)
|
||||
|
||||
class unix_dialect(Dialect):
|
||||
"""Describe the usual properties of Unix-generated CSV files."""
|
||||
delimiter = ','
|
||||
quotechar = '"'
|
||||
doublequote = True
|
||||
skipinitialspace = False
|
||||
lineterminator = '\n'
|
||||
quoting = QUOTE_ALL
|
||||
register_dialect("unix", unix_dialect)
|
||||
|
||||
|
||||
class DictReader:
|
||||
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
|
||||
dialect="excel", *args, **kwds):
|
||||
self._fieldnames = fieldnames # list of keys for the dict
|
||||
self.restkey = restkey # key to catch long rows
|
||||
self.restval = restval # default value for short rows
|
||||
self.reader = reader(f, dialect, *args, **kwds)
|
||||
self.dialect = dialect
|
||||
self.line_num = 0
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
@property
|
||||
def fieldnames(self):
|
||||
if self._fieldnames is None:
|
||||
try:
|
||||
self._fieldnames = next(self.reader)
|
||||
except StopIteration:
|
||||
pass
|
||||
self.line_num = self.reader.line_num
|
||||
return self._fieldnames
|
||||
|
||||
@fieldnames.setter
|
||||
def fieldnames(self, value):
|
||||
self._fieldnames = value
|
||||
|
||||
def __next__(self):
|
||||
if self.line_num == 0:
|
||||
# Used only for its side effect.
|
||||
self.fieldnames
|
||||
row = next(self.reader)
|
||||
self.line_num = self.reader.line_num
|
||||
|
||||
# unlike the basic reader, we prefer not to return blanks,
|
||||
# because we will typically wind up with a dict full of None
|
||||
# values
|
||||
while row == []:
|
||||
row = next(self.reader)
|
||||
d = OrderedDict(zip(self.fieldnames, row))
|
||||
lf = len(self.fieldnames)
|
||||
lr = len(row)
|
||||
if lf < lr:
|
||||
d[self.restkey] = row[lf:]
|
||||
elif lf > lr:
|
||||
for key in self.fieldnames[lr:]:
|
||||
d[key] = self.restval
|
||||
return d
|
||||
|
||||
|
||||
class DictWriter:
|
||||
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
|
||||
dialect="excel", *args, **kwds):
|
||||
self.fieldnames = fieldnames # list of keys for the dict
|
||||
self.restval = restval # for writing short dicts
|
||||
if extrasaction.lower() not in ("raise", "ignore"):
|
||||
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
|
||||
% extrasaction)
|
||||
self.extrasaction = extrasaction
|
||||
self.writer = writer(f, dialect, *args, **kwds)
|
||||
|
||||
def writeheader(self):
|
||||
header = dict(zip(self.fieldnames, self.fieldnames))
|
||||
self.writerow(header)
|
||||
|
||||
def _dict_to_list(self, rowdict):
|
||||
if self.extrasaction == "raise":
|
||||
wrong_fields = rowdict.keys() - self.fieldnames
|
||||
if wrong_fields:
|
||||
raise ValueError("dict contains fields not in fieldnames: "
|
||||
+ ", ".join([repr(x) for x in wrong_fields]))
|
||||
return (rowdict.get(key, self.restval) for key in self.fieldnames)
|
||||
|
||||
def writerow(self, rowdict):
|
||||
return self.writer.writerow(self._dict_to_list(rowdict))
|
||||
|
||||
def writerows(self, rowdicts):
|
||||
return self.writer.writerows(map(self._dict_to_list, rowdicts))
|
||||
|
||||
# Guard Sniffer's type checking against builds that exclude complex()
|
||||
try:
|
||||
complex
|
||||
except NameError:
|
||||
complex = float
|
||||
|
||||
class Sniffer:
|
||||
'''
|
||||
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
|
||||
Returns a Dialect object.
|
||||
'''
|
||||
def __init__(self):
|
||||
# in case there is more than one possible delimiter
|
||||
self.preferred = [',', '\t', ';', ' ', ':']
|
||||
|
||||
|
||||
def sniff(self, sample, delimiters=None):
|
||||
"""
|
||||
Returns a dialect (or None) corresponding to the sample
|
||||
"""
|
||||
|
||||
quotechar, doublequote, delimiter, skipinitialspace = \
|
||||
self._guess_quote_and_delimiter(sample, delimiters)
|
||||
if not delimiter:
|
||||
delimiter, skipinitialspace = self._guess_delimiter(sample,
|
||||
delimiters)
|
||||
|
||||
if not delimiter:
|
||||
raise Error("Could not determine delimiter")
|
||||
|
||||
class dialect(Dialect):
|
||||
_name = "sniffed"
|
||||
lineterminator = '\r\n'
|
||||
quoting = QUOTE_MINIMAL
|
||||
# escapechar = ''
|
||||
|
||||
dialect.doublequote = doublequote
|
||||
dialect.delimiter = delimiter
|
||||
# _csv.reader won't accept a quotechar of ''
|
||||
dialect.quotechar = quotechar or '"'
|
||||
dialect.skipinitialspace = skipinitialspace
|
||||
|
||||
return dialect
|
||||
|
||||
|
||||
def _guess_quote_and_delimiter(self, data, delimiters):
|
||||
"""
|
||||
Looks for text enclosed between two identical quotes
|
||||
(the probable quotechar) which are preceded and followed
|
||||
by the same character (the probable delimiter).
|
||||
For example:
|
||||
,'some text',
|
||||
The quote with the most wins, same with the delimiter.
|
||||
If there is no quotechar the delimiter can't be determined
|
||||
this way.
|
||||
"""
|
||||
|
||||
matches = []
|
||||
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
|
||||
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
|
||||
r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
|
||||
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
|
||||
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
|
||||
matches = regexp.findall(data)
|
||||
if matches:
|
||||
break
|
||||
|
||||
if not matches:
|
||||
# (quotechar, doublequote, delimiter, skipinitialspace)
|
||||
return ('', False, None, 0)
|
||||
quotes = {}
|
||||
delims = {}
|
||||
spaces = 0
|
||||
groupindex = regexp.groupindex
|
||||
for m in matches:
|
||||
n = groupindex['quote'] - 1
|
||||
key = m[n]
|
||||
if key:
|
||||
quotes[key] = quotes.get(key, 0) + 1
|
||||
try:
|
||||
n = groupindex['delim'] - 1
|
||||
key = m[n]
|
||||
except KeyError:
|
||||
continue
|
||||
if key and (delimiters is None or key in delimiters):
|
||||
delims[key] = delims.get(key, 0) + 1
|
||||
try:
|
||||
n = groupindex['space'] - 1
|
||||
except KeyError:
|
||||
continue
|
||||
if m[n]:
|
||||
spaces += 1
|
||||
|
||||
quotechar = max(quotes, key=quotes.get)
|
||||
|
||||
if delims:
|
||||
delim = max(delims, key=delims.get)
|
||||
skipinitialspace = delims[delim] == spaces
|
||||
if delim == '\n': # most likely a file with a single column
|
||||
delim = ''
|
||||
else:
|
||||
# there is *no* delimiter, it's a single column of quoted data
|
||||
delim = ''
|
||||
skipinitialspace = 0
|
||||
|
||||
# if we see an extra quote between delimiters, we've got a
|
||||
# double quoted format
|
||||
dq_regexp = re.compile(
|
||||
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
|
||||
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
|
||||
|
||||
|
||||
|
||||
if dq_regexp.search(data):
|
||||
doublequote = True
|
||||
else:
|
||||
doublequote = False
|
||||
|
||||
return (quotechar, doublequote, delim, skipinitialspace)
|
||||
|
||||
|
||||
def _guess_delimiter(self, data, delimiters):
|
||||
"""
|
||||
The delimiter /should/ occur the same number of times on
|
||||
each row. However, due to malformed data, it may not. We don't want
|
||||
an all or nothing approach, so we allow for small variations in this
|
||||
number.
|
||||
1) build a table of the frequency of each character on every line.
|
||||
2) build a table of frequencies of this frequency (meta-frequency?),
|
||||
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
|
||||
7 times in 2 rows'
|
||||
3) use the mode of the meta-frequency to determine the /expected/
|
||||
frequency for that character
|
||||
4) find out how often the character actually meets that goal
|
||||
5) the character that best meets its goal is the delimiter
|
||||
For performance reasons, the data is evaluated in chunks, so it can
|
||||
try and evaluate the smallest portion of the data possible, evaluating
|
||||
additional chunks as necessary.
|
||||
"""
|
||||
|
||||
data = list(filter(None, data.split('\n')))
|
||||
|
||||
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
|
||||
|
||||
# build frequency tables
|
||||
chunkLength = min(10, len(data))
|
||||
iteration = 0
|
||||
charFrequency = {}
|
||||
modes = {}
|
||||
delims = {}
|
||||
start, end = 0, min(chunkLength, len(data))
|
||||
while start < len(data):
|
||||
iteration += 1
|
||||
for line in data[start:end]:
|
||||
for char in ascii:
|
||||
metaFrequency = charFrequency.get(char, {})
|
||||
# must count even if frequency is 0
|
||||
freq = line.count(char)
|
||||
# value is the mode
|
||||
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
|
||||
charFrequency[char] = metaFrequency
|
||||
|
||||
for char in charFrequency.keys():
|
||||
items = list(charFrequency[char].items())
|
||||
if len(items) == 1 and items[0][0] == 0:
|
||||
continue
|
||||
# get the mode of the frequencies
|
||||
if len(items) > 1:
|
||||
modes[char] = max(items, key=lambda x: x[1])
|
||||
# adjust the mode - subtract the sum of all
|
||||
# other frequencies
|
||||
items.remove(modes[char])
|
||||
modes[char] = (modes[char][0], modes[char][1]
|
||||
- sum(item[1] for item in items))
|
||||
else:
|
||||
modes[char] = items[0]
|
||||
|
||||
# build a list of possible delimiters
|
||||
modeList = modes.items()
|
||||
total = float(chunkLength * iteration)
|
||||
# (rows of consistent data) / (number of rows) = 100%
|
||||
consistency = 1.0
|
||||
# minimum consistency threshold
|
||||
threshold = 0.9
|
||||
while len(delims) == 0 and consistency >= threshold:
|
||||
for k, v in modeList:
|
||||
if v[0] > 0 and v[1] > 0:
|
||||
if ((v[1]/total) >= consistency and
|
||||
(delimiters is None or k in delimiters)):
|
||||
delims[k] = v
|
||||
consistency -= 0.01
|
||||
|
||||
if len(delims) == 1:
|
||||
delim = list(delims.keys())[0]
|
||||
skipinitialspace = (data[0].count(delim) ==
|
||||
data[0].count("%c " % delim))
|
||||
return (delim, skipinitialspace)
|
||||
|
||||
# analyze another chunkLength lines
|
||||
start = end
|
||||
end += chunkLength
|
||||
|
||||
if not delims:
|
||||
return ('', 0)
|
||||
|
||||
# if there's more than one, fall back to a 'preferred' list
|
||||
if len(delims) > 1:
|
||||
for d in self.preferred:
|
||||
if d in delims.keys():
|
||||
skipinitialspace = (data[0].count(d) ==
|
||||
data[0].count("%c " % d))
|
||||
return (d, skipinitialspace)
|
||||
|
||||
# nothing else indicates a preference, pick the character that
|
||||
# dominates(?)
|
||||
items = [(v,k) for (k,v) in delims.items()]
|
||||
items.sort()
|
||||
delim = items[-1][1]
|
||||
|
||||
skipinitialspace = (data[0].count(delim) ==
|
||||
data[0].count("%c " % delim))
|
||||
return (delim, skipinitialspace)
|
||||
|
||||
|
||||
def has_header(self, sample):
|
||||
# Creates a dictionary of types of data in each column. If any
|
||||
# column is of a single type (say, integers), *except* for the first
|
||||
# row, then the first row is presumed to be labels. If the type
|
||||
# can't be determined, it is assumed to be a string in which case
|
||||
# the length of the string is the determining factor: if all of the
|
||||
# rows except for the first are the same length, it's a header.
|
||||
# Finally, a 'vote' is taken at the end for each column, adding or
|
||||
# subtracting from the likelihood of the first row being a header.
|
||||
|
||||
rdr = reader(StringIO(sample), self.sniff(sample))
|
||||
|
||||
header = next(rdr) # assume first row is header
|
||||
|
||||
columns = len(header)
|
||||
columnTypes = {}
|
||||
for i in range(columns): columnTypes[i] = None
|
||||
|
||||
checked = 0
|
||||
for row in rdr:
|
||||
# arbitrary number of rows to check, to keep it sane
|
||||
if checked > 20:
|
||||
break
|
||||
checked += 1
|
||||
|
||||
if len(row) != columns:
|
||||
continue # skip rows that have irregular number of columns
|
||||
|
||||
for col in list(columnTypes.keys()):
|
||||
|
||||
for thisType in [int, float, complex]:
|
||||
try:
|
||||
thisType(row[col])
|
||||
break
|
||||
except (ValueError, OverflowError):
|
||||
pass
|
||||
else:
|
||||
# fallback to length of string
|
||||
thisType = len(row[col])
|
||||
|
||||
if thisType != columnTypes[col]:
|
||||
if columnTypes[col] is None: # add new column type
|
||||
columnTypes[col] = thisType
|
||||
else:
|
||||
# type is inconsistent, remove column from
|
||||
# consideration
|
||||
del columnTypes[col]
|
||||
|
||||
# finally, compare results against first row and "vote"
|
||||
# on whether it's a header
|
||||
hasHeader = 0
|
||||
for col, colType in columnTypes.items():
|
||||
if type(colType) == type(0): # it's a length
|
||||
if len(header[col]) != colType:
|
||||
hasHeader += 1
|
||||
else:
|
||||
hasHeader -= 1
|
||||
else: # attempt typecast
|
||||
try:
|
||||
colType(header[col])
|
||||
except (ValueError, TypeError):
|
||||
hasHeader += 1
|
||||
else:
|
||||
hasHeader -= 1
|
||||
|
||||
return hasHeader > 0
|
538
third_party/python/Lib/ctypes/__init__.py
vendored
Normal file
538
third_party/python/Lib/ctypes/__init__.py
vendored
Normal file
|
@ -0,0 +1,538 @@
|
|||
"""create and manipulate C data types in Python"""
|
||||
|
||||
import os as _os, sys as _sys
|
||||
|
||||
__version__ = "1.1.0"
|
||||
|
||||
from _ctypes import Union, Structure, Array
|
||||
from _ctypes import _Pointer
|
||||
from _ctypes import CFuncPtr as _CFuncPtr
|
||||
from _ctypes import __version__ as _ctypes_version
|
||||
from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
|
||||
from _ctypes import ArgumentError
|
||||
|
||||
from struct import calcsize as _calcsize
|
||||
|
||||
if __version__ != _ctypes_version:
|
||||
raise Exception("Version number mismatch", __version__, _ctypes_version)
|
||||
|
||||
if _os.name == "nt":
|
||||
from _ctypes import FormatError
|
||||
|
||||
DEFAULT_MODE = RTLD_LOCAL
|
||||
if _os.name == "posix" and _sys.platform == "darwin":
|
||||
# On OS X 10.3, we use RTLD_GLOBAL as default mode
|
||||
# because RTLD_LOCAL does not work at least on some
|
||||
# libraries. OS X 10.3 is Darwin 7, so we check for
|
||||
# that.
|
||||
|
||||
if int(_os.uname().release.split('.')[0]) < 8:
|
||||
DEFAULT_MODE = RTLD_GLOBAL
|
||||
|
||||
from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
|
||||
FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI, \
|
||||
FUNCFLAG_USE_ERRNO as _FUNCFLAG_USE_ERRNO, \
|
||||
FUNCFLAG_USE_LASTERROR as _FUNCFLAG_USE_LASTERROR
|
||||
|
||||
# WINOLEAPI -> HRESULT
|
||||
# WINOLEAPI_(type)
|
||||
#
|
||||
# STDMETHODCALLTYPE
|
||||
#
|
||||
# STDMETHOD(name)
|
||||
# STDMETHOD_(type, name)
|
||||
#
|
||||
# STDAPICALLTYPE
|
||||
|
||||
def create_string_buffer(init, size=None):
|
||||
"""create_string_buffer(aBytes) -> character array
|
||||
create_string_buffer(anInteger) -> character array
|
||||
create_string_buffer(aBytes, anInteger) -> character array
|
||||
"""
|
||||
if isinstance(init, bytes):
|
||||
if size is None:
|
||||
size = len(init)+1
|
||||
buftype = c_char * size
|
||||
buf = buftype()
|
||||
buf.value = init
|
||||
return buf
|
||||
elif isinstance(init, int):
|
||||
buftype = c_char * init
|
||||
buf = buftype()
|
||||
return buf
|
||||
raise TypeError(init)
|
||||
|
||||
def c_buffer(init, size=None):
|
||||
## "deprecated, use create_string_buffer instead"
|
||||
## import warnings
|
||||
## warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
|
||||
## DeprecationWarning, stacklevel=2)
|
||||
return create_string_buffer(init, size)
|
||||
|
||||
_c_functype_cache = {}
|
||||
def CFUNCTYPE(restype, *argtypes, **kw):
|
||||
"""CFUNCTYPE(restype, *argtypes,
|
||||
use_errno=False, use_last_error=False) -> function prototype.
|
||||
|
||||
restype: the result type
|
||||
argtypes: a sequence specifying the argument types
|
||||
|
||||
The function prototype can be called in different ways to create a
|
||||
callable object:
|
||||
|
||||
prototype(integer address) -> foreign function
|
||||
prototype(callable) -> create and return a C callable function from callable
|
||||
prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
|
||||
prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
|
||||
prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
|
||||
"""
|
||||
flags = _FUNCFLAG_CDECL
|
||||
if kw.pop("use_errno", False):
|
||||
flags |= _FUNCFLAG_USE_ERRNO
|
||||
if kw.pop("use_last_error", False):
|
||||
flags |= _FUNCFLAG_USE_LASTERROR
|
||||
if kw:
|
||||
raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
|
||||
try:
|
||||
return _c_functype_cache[(restype, argtypes, flags)]
|
||||
except KeyError:
|
||||
class CFunctionType(_CFuncPtr):
|
||||
_argtypes_ = argtypes
|
||||
_restype_ = restype
|
||||
_flags_ = flags
|
||||
_c_functype_cache[(restype, argtypes, flags)] = CFunctionType
|
||||
return CFunctionType
|
||||
|
||||
if _os.name == "nt":
|
||||
from _ctypes import LoadLibrary as _dlopen
|
||||
from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
|
||||
|
||||
_win_functype_cache = {}
|
||||
def WINFUNCTYPE(restype, *argtypes, **kw):
|
||||
# docstring set later (very similar to CFUNCTYPE.__doc__)
|
||||
flags = _FUNCFLAG_STDCALL
|
||||
if kw.pop("use_errno", False):
|
||||
flags |= _FUNCFLAG_USE_ERRNO
|
||||
if kw.pop("use_last_error", False):
|
||||
flags |= _FUNCFLAG_USE_LASTERROR
|
||||
if kw:
|
||||
raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
|
||||
try:
|
||||
return _win_functype_cache[(restype, argtypes, flags)]
|
||||
except KeyError:
|
||||
class WinFunctionType(_CFuncPtr):
|
||||
_argtypes_ = argtypes
|
||||
_restype_ = restype
|
||||
_flags_ = flags
|
||||
_win_functype_cache[(restype, argtypes, flags)] = WinFunctionType
|
||||
return WinFunctionType
|
||||
if WINFUNCTYPE.__doc__:
|
||||
WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
|
||||
|
||||
elif _os.name == "posix":
|
||||
from _ctypes import dlopen as _dlopen
|
||||
|
||||
from _ctypes import sizeof, byref, addressof, alignment, resize
|
||||
from _ctypes import get_errno, set_errno
|
||||
from _ctypes import _SimpleCData
|
||||
|
||||
def _check_size(typ, typecode=None):
|
||||
# Check if sizeof(ctypes_type) against struct.calcsize. This
|
||||
# should protect somewhat against a misconfigured libffi.
|
||||
from struct import calcsize
|
||||
if typecode is None:
|
||||
# Most _type_ codes are the same as used in struct
|
||||
typecode = typ._type_
|
||||
actual, required = sizeof(typ), calcsize(typecode)
|
||||
if actual != required:
|
||||
raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
|
||||
(typ, actual, required))
|
||||
|
||||
class py_object(_SimpleCData):
|
||||
_type_ = "O"
|
||||
def __repr__(self):
|
||||
try:
|
||||
return super().__repr__()
|
||||
except ValueError:
|
||||
return "%s(<NULL>)" % type(self).__name__
|
||||
_check_size(py_object, "P")
|
||||
|
||||
class c_short(_SimpleCData):
|
||||
_type_ = "h"
|
||||
_check_size(c_short)
|
||||
|
||||
class c_ushort(_SimpleCData):
|
||||
_type_ = "H"
|
||||
_check_size(c_ushort)
|
||||
|
||||
class c_long(_SimpleCData):
|
||||
_type_ = "l"
|
||||
_check_size(c_long)
|
||||
|
||||
class c_ulong(_SimpleCData):
|
||||
_type_ = "L"
|
||||
_check_size(c_ulong)
|
||||
|
||||
if _calcsize("i") == _calcsize("l"):
|
||||
# if int and long have the same size, make c_int an alias for c_long
|
||||
c_int = c_long
|
||||
c_uint = c_ulong
|
||||
else:
|
||||
class c_int(_SimpleCData):
|
||||
_type_ = "i"
|
||||
_check_size(c_int)
|
||||
|
||||
class c_uint(_SimpleCData):
|
||||
_type_ = "I"
|
||||
_check_size(c_uint)
|
||||
|
||||
class c_float(_SimpleCData):
|
||||
_type_ = "f"
|
||||
_check_size(c_float)
|
||||
|
||||
class c_double(_SimpleCData):
|
||||
_type_ = "d"
|
||||
_check_size(c_double)
|
||||
|
||||
class c_longdouble(_SimpleCData):
|
||||
_type_ = "g"
|
||||
if sizeof(c_longdouble) == sizeof(c_double):
|
||||
c_longdouble = c_double
|
||||
|
||||
if _calcsize("l") == _calcsize("q"):
|
||||
# if long and long long have the same size, make c_longlong an alias for c_long
|
||||
c_longlong = c_long
|
||||
c_ulonglong = c_ulong
|
||||
else:
|
||||
class c_longlong(_SimpleCData):
|
||||
_type_ = "q"
|
||||
_check_size(c_longlong)
|
||||
|
||||
class c_ulonglong(_SimpleCData):
|
||||
_type_ = "Q"
|
||||
## def from_param(cls, val):
|
||||
## return ('d', float(val), val)
|
||||
## from_param = classmethod(from_param)
|
||||
_check_size(c_ulonglong)
|
||||
|
||||
class c_ubyte(_SimpleCData):
|
||||
_type_ = "B"
|
||||
c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
|
||||
# backward compatibility:
|
||||
##c_uchar = c_ubyte
|
||||
_check_size(c_ubyte)
|
||||
|
||||
class c_byte(_SimpleCData):
|
||||
_type_ = "b"
|
||||
c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
|
||||
_check_size(c_byte)
|
||||
|
||||
class c_char(_SimpleCData):
|
||||
_type_ = "c"
|
||||
c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
|
||||
_check_size(c_char)
|
||||
|
||||
class c_char_p(_SimpleCData):
|
||||
_type_ = "z"
|
||||
def __repr__(self):
|
||||
return "%s(%s)" % (self.__class__.__name__, c_void_p.from_buffer(self).value)
|
||||
_check_size(c_char_p, "P")
|
||||
|
||||
class c_void_p(_SimpleCData):
|
||||
_type_ = "P"
|
||||
c_voidp = c_void_p # backwards compatibility (to a bug)
|
||||
_check_size(c_void_p)
|
||||
|
||||
class c_bool(_SimpleCData):
|
||||
_type_ = "?"
|
||||
|
||||
from _ctypes import POINTER, pointer, _pointer_type_cache
|
||||
|
||||
class c_wchar_p(_SimpleCData):
|
||||
_type_ = "Z"
|
||||
def __repr__(self):
|
||||
return "%s(%s)" % (self.__class__.__name__, c_void_p.from_buffer(self).value)
|
||||
|
||||
class c_wchar(_SimpleCData):
|
||||
_type_ = "u"
|
||||
|
||||
def _reset_cache():
|
||||
_pointer_type_cache.clear()
|
||||
_c_functype_cache.clear()
|
||||
if _os.name == "nt":
|
||||
_win_functype_cache.clear()
|
||||
# _SimpleCData.c_wchar_p_from_param
|
||||
POINTER(c_wchar).from_param = c_wchar_p.from_param
|
||||
# _SimpleCData.c_char_p_from_param
|
||||
POINTER(c_char).from_param = c_char_p.from_param
|
||||
_pointer_type_cache[None] = c_void_p
|
||||
# XXX for whatever reasons, creating the first instance of a callback
|
||||
# function is needed for the unittests on Win64 to succeed. This MAY
|
||||
# be a compiler bug, since the problem occurs only when _ctypes is
|
||||
# compiled with the MS SDK compiler. Or an uninitialized variable?
|
||||
CFUNCTYPE(c_int)(lambda: None)
|
||||
|
||||
def create_unicode_buffer(init, size=None):
|
||||
"""create_unicode_buffer(aString) -> character array
|
||||
create_unicode_buffer(anInteger) -> character array
|
||||
create_unicode_buffer(aString, anInteger) -> character array
|
||||
"""
|
||||
if isinstance(init, str):
|
||||
if size is None:
|
||||
size = len(init)+1
|
||||
buftype = c_wchar * size
|
||||
buf = buftype()
|
||||
buf.value = init
|
||||
return buf
|
||||
elif isinstance(init, int):
|
||||
buftype = c_wchar * init
|
||||
buf = buftype()
|
||||
return buf
|
||||
raise TypeError(init)
|
||||
|
||||
|
||||
# XXX Deprecated
|
||||
def SetPointerType(pointer, cls):
|
||||
if _pointer_type_cache.get(cls, None) is not None:
|
||||
raise RuntimeError("This type already exists in the cache")
|
||||
if id(pointer) not in _pointer_type_cache:
|
||||
raise RuntimeError("What's this???")
|
||||
pointer.set_type(cls)
|
||||
_pointer_type_cache[cls] = pointer
|
||||
del _pointer_type_cache[id(pointer)]
|
||||
|
||||
# XXX Deprecated
|
||||
def ARRAY(typ, len):
|
||||
return typ * len
|
||||
|
||||
################################################################
|
||||
|
||||
|
||||
class CDLL(object):
|
||||
"""An instance of this class represents a loaded dll/shared
|
||||
library, exporting functions using the standard C calling
|
||||
convention (named 'cdecl' on Windows).
|
||||
|
||||
The exported functions can be accessed as attributes, or by
|
||||
indexing with the function name. Examples:
|
||||
|
||||
<obj>.qsort -> callable object
|
||||
<obj>['qsort'] -> callable object
|
||||
|
||||
Calling the functions releases the Python GIL during the call and
|
||||
reacquires it afterwards.
|
||||
"""
|
||||
_func_flags_ = _FUNCFLAG_CDECL
|
||||
_func_restype_ = c_int
|
||||
# default values for repr
|
||||
_name = '<uninitialized>'
|
||||
_handle = 0
|
||||
_FuncPtr = None
|
||||
|
||||
def __init__(self, name, mode=DEFAULT_MODE, handle=None,
|
||||
use_errno=False,
|
||||
use_last_error=False):
|
||||
self._name = name
|
||||
flags = self._func_flags_
|
||||
if use_errno:
|
||||
flags |= _FUNCFLAG_USE_ERRNO
|
||||
if use_last_error:
|
||||
flags |= _FUNCFLAG_USE_LASTERROR
|
||||
|
||||
class _FuncPtr(_CFuncPtr):
|
||||
_flags_ = flags
|
||||
_restype_ = self._func_restype_
|
||||
self._FuncPtr = _FuncPtr
|
||||
|
||||
if handle is None:
|
||||
self._handle = _dlopen(self._name, mode)
|
||||
else:
|
||||
self._handle = handle
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s '%s', handle %x at %#x>" % \
|
||||
(self.__class__.__name__, self._name,
|
||||
(self._handle & (_sys.maxsize*2 + 1)),
|
||||
id(self) & (_sys.maxsize*2 + 1))
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name.startswith('__') and name.endswith('__'):
|
||||
raise AttributeError(name)
|
||||
func = self.__getitem__(name)
|
||||
setattr(self, name, func)
|
||||
return func
|
||||
|
||||
def __getitem__(self, name_or_ordinal):
|
||||
func = self._FuncPtr((name_or_ordinal, self))
|
||||
if not isinstance(name_or_ordinal, int):
|
||||
func.__name__ = name_or_ordinal
|
||||
return func
|
||||
|
||||
class PyDLL(CDLL):
|
||||
"""This class represents the Python library itself. It allows
|
||||
accessing Python API functions. The GIL is not released, and
|
||||
Python exceptions are handled correctly.
|
||||
"""
|
||||
_func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
|
||||
|
||||
if _os.name == "nt":
|
||||
|
||||
class WinDLL(CDLL):
|
||||
"""This class represents a dll exporting functions using the
|
||||
Windows stdcall calling convention.
|
||||
"""
|
||||
_func_flags_ = _FUNCFLAG_STDCALL
|
||||
|
||||
# XXX Hm, what about HRESULT as normal parameter?
|
||||
# Mustn't it derive from c_long then?
|
||||
from _ctypes import _check_HRESULT, _SimpleCData
|
||||
class HRESULT(_SimpleCData):
|
||||
_type_ = "l"
|
||||
# _check_retval_ is called with the function's result when it
|
||||
# is used as restype. It checks for the FAILED bit, and
|
||||
# raises an OSError if it is set.
|
||||
#
|
||||
# The _check_retval_ method is implemented in C, so that the
|
||||
# method definition itself is not included in the traceback
|
||||
# when it raises an error - that is what we want (and Python
|
||||
# doesn't have a way to raise an exception in the caller's
|
||||
# frame).
|
||||
_check_retval_ = _check_HRESULT
|
||||
|
||||
class OleDLL(CDLL):
|
||||
"""This class represents a dll exporting functions using the
|
||||
Windows stdcall calling convention, and returning HRESULT.
|
||||
HRESULT error values are automatically raised as OSError
|
||||
exceptions.
|
||||
"""
|
||||
_func_flags_ = _FUNCFLAG_STDCALL
|
||||
_func_restype_ = HRESULT
|
||||
|
||||
class LibraryLoader(object):
|
||||
def __init__(self, dlltype):
|
||||
self._dlltype = dlltype
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == '_':
|
||||
raise AttributeError(name)
|
||||
dll = self._dlltype(name)
|
||||
setattr(self, name, dll)
|
||||
return dll
|
||||
|
||||
def __getitem__(self, name):
|
||||
return getattr(self, name)
|
||||
|
||||
def LoadLibrary(self, name):
|
||||
return self._dlltype(name)
|
||||
|
||||
cdll = LibraryLoader(CDLL)
|
||||
pydll = LibraryLoader(PyDLL)
|
||||
|
||||
if _os.name == "nt":
|
||||
pythonapi = PyDLL("python dll", None, _sys.dllhandle)
|
||||
elif _sys.platform == "cygwin":
|
||||
pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
|
||||
else:
|
||||
pythonapi = PyDLL(None)
|
||||
|
||||
|
||||
if _os.name == "nt":
|
||||
windll = LibraryLoader(WinDLL)
|
||||
oledll = LibraryLoader(OleDLL)
|
||||
|
||||
if _os.name == "nt":
|
||||
GetLastError = windll.kernel32.GetLastError
|
||||
else:
|
||||
GetLastError = windll.coredll.GetLastError
|
||||
from _ctypes import get_last_error, set_last_error
|
||||
|
||||
def WinError(code=None, descr=None):
|
||||
if code is None:
|
||||
code = GetLastError()
|
||||
if descr is None:
|
||||
descr = FormatError(code).strip()
|
||||
return OSError(None, descr, None, code)
|
||||
|
||||
if sizeof(c_uint) == sizeof(c_void_p):
|
||||
c_size_t = c_uint
|
||||
c_ssize_t = c_int
|
||||
elif sizeof(c_ulong) == sizeof(c_void_p):
|
||||
c_size_t = c_ulong
|
||||
c_ssize_t = c_long
|
||||
elif sizeof(c_ulonglong) == sizeof(c_void_p):
|
||||
c_size_t = c_ulonglong
|
||||
c_ssize_t = c_longlong
|
||||
|
||||
# functions
|
||||
|
||||
from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
|
||||
|
||||
## void *memmove(void *, const void *, size_t);
|
||||
memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
|
||||
|
||||
## void *memset(void *, int, size_t)
|
||||
memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
|
||||
|
||||
def PYFUNCTYPE(restype, *argtypes):
|
||||
class CFunctionType(_CFuncPtr):
|
||||
_argtypes_ = argtypes
|
||||
_restype_ = restype
|
||||
_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
|
||||
return CFunctionType
|
||||
|
||||
_cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr)
|
||||
def cast(obj, typ):
|
||||
return _cast(obj, obj, typ)
|
||||
|
||||
_string_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
|
||||
def string_at(ptr, size=-1):
|
||||
"""string_at(addr[, size]) -> string
|
||||
|
||||
Return the string at addr."""
|
||||
return _string_at(ptr, size)
|
||||
|
||||
try:
|
||||
from _ctypes import _wstring_at_addr
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
_wstring_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
|
||||
def wstring_at(ptr, size=-1):
|
||||
"""wstring_at(addr[, size]) -> string
|
||||
|
||||
Return the string at addr."""
|
||||
return _wstring_at(ptr, size)
|
||||
|
||||
|
||||
if _os.name == "nt": # COM stuff
|
||||
def DllGetClassObject(rclsid, riid, ppv):
|
||||
try:
|
||||
ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
|
||||
except ImportError:
|
||||
return -2147221231 # CLASS_E_CLASSNOTAVAILABLE
|
||||
else:
|
||||
return ccom.DllGetClassObject(rclsid, riid, ppv)
|
||||
|
||||
def DllCanUnloadNow():
|
||||
try:
|
||||
ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
|
||||
except ImportError:
|
||||
return 0 # S_OK
|
||||
return ccom.DllCanUnloadNow()
|
||||
|
||||
from ctypes._endian import BigEndianStructure, LittleEndianStructure
|
||||
|
||||
# Fill in specifically-sized types
|
||||
c_int8 = c_byte
|
||||
c_uint8 = c_ubyte
|
||||
for kind in [c_short, c_int, c_long, c_longlong]:
|
||||
if sizeof(kind) == 2: c_int16 = kind
|
||||
elif sizeof(kind) == 4: c_int32 = kind
|
||||
elif sizeof(kind) == 8: c_int64 = kind
|
||||
for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
|
||||
if sizeof(kind) == 2: c_uint16 = kind
|
||||
elif sizeof(kind) == 4: c_uint32 = kind
|
||||
elif sizeof(kind) == 8: c_uint64 = kind
|
||||
del(kind)
|
||||
|
||||
_reset_cache()
|
61
third_party/python/Lib/ctypes/_endian.py
vendored
Normal file
61
third_party/python/Lib/ctypes/_endian.py
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
import sys
|
||||
from ctypes import *
|
||||
|
||||
_array_type = type(Array)
|
||||
|
||||
def _other_endian(typ):
|
||||
"""Return the type with the 'other' byte order. Simple types like
|
||||
c_int and so on already have __ctype_be__ and __ctype_le__
|
||||
attributes which contain the types, for more complicated types
|
||||
arrays and structures are supported.
|
||||
"""
|
||||
# check _OTHER_ENDIAN attribute (present if typ is primitive type)
|
||||
if hasattr(typ, _OTHER_ENDIAN):
|
||||
return getattr(typ, _OTHER_ENDIAN)
|
||||
# if typ is array
|
||||
if isinstance(typ, _array_type):
|
||||
return _other_endian(typ._type_) * typ._length_
|
||||
# if typ is structure
|
||||
if issubclass(typ, Structure):
|
||||
return typ
|
||||
raise TypeError("This type does not support other endian: %s" % typ)
|
||||
|
||||
class _swapped_meta(type(Structure)):
|
||||
def __setattr__(self, attrname, value):
|
||||
if attrname == "_fields_":
|
||||
fields = []
|
||||
for desc in value:
|
||||
name = desc[0]
|
||||
typ = desc[1]
|
||||
rest = desc[2:]
|
||||
fields.append((name, _other_endian(typ)) + rest)
|
||||
value = fields
|
||||
super().__setattr__(attrname, value)
|
||||
|
||||
################################################################
|
||||
|
||||
# Note: The Structure metaclass checks for the *presence* (not the
|
||||
# value!) of a _swapped_bytes_ attribute to determine the bit order in
|
||||
# structures containing bit fields.
|
||||
|
||||
if sys.byteorder == "little":
|
||||
_OTHER_ENDIAN = "__ctype_be__"
|
||||
|
||||
LittleEndianStructure = Structure
|
||||
|
||||
class BigEndianStructure(Structure, metaclass=_swapped_meta):
|
||||
"""Structure with big endian byte order"""
|
||||
__slots__ = ()
|
||||
_swappedbytes_ = None
|
||||
|
||||
elif sys.byteorder == "big":
|
||||
_OTHER_ENDIAN = "__ctype_le__"
|
||||
|
||||
BigEndianStructure = Structure
|
||||
class LittleEndianStructure(Structure, metaclass=_swapped_meta):
|
||||
"""Structure with little endian byte order"""
|
||||
__slots__ = ()
|
||||
_swappedbytes_ = None
|
||||
|
||||
else:
|
||||
raise RuntimeError("Invalid byteorder")
|
7
third_party/python/Lib/ctypes/macholib/README.ctypes
vendored
Normal file
7
third_party/python/Lib/ctypes/macholib/README.ctypes
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
Files in this directory come from Bob Ippolito's py2app.
|
||||
|
||||
License: Any components of the py2app suite may be distributed under
|
||||
the MIT or PSF open source licenses.
|
||||
|
||||
This is version 1.0, SVN revision 789, from 2006/01/25.
|
||||
The main repository is http://svn.red-bean.com/bob/macholib/trunk/macholib/
|
9
third_party/python/Lib/ctypes/macholib/__init__.py
vendored
Normal file
9
third_party/python/Lib/ctypes/macholib/__init__.py
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
"""
|
||||
Enough Mach-O to make your head spin.
|
||||
|
||||
See the relevant header files in /usr/include/mach-o
|
||||
|
||||
And also Apple's documentation.
|
||||
"""
|
||||
|
||||
__version__ = '1.0'
|
159
third_party/python/Lib/ctypes/macholib/dyld.py
vendored
Normal file
159
third_party/python/Lib/ctypes/macholib/dyld.py
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
"""
|
||||
dyld emulation
|
||||
"""
|
||||
|
||||
import os
|
||||
from ctypes.macholib.framework import framework_info
|
||||
from ctypes.macholib.dylib import dylib_info
|
||||
from itertools import *
|
||||
|
||||
__all__ = [
|
||||
'dyld_find', 'framework_find',
|
||||
'framework_info', 'dylib_info',
|
||||
]
|
||||
|
||||
# These are the defaults as per man dyld(1)
|
||||
#
|
||||
DEFAULT_FRAMEWORK_FALLBACK = [
|
||||
os.path.expanduser("~/Library/Frameworks"),
|
||||
"/Library/Frameworks",
|
||||
"/Network/Library/Frameworks",
|
||||
"/System/Library/Frameworks",
|
||||
]
|
||||
|
||||
DEFAULT_LIBRARY_FALLBACK = [
|
||||
os.path.expanduser("~/lib"),
|
||||
"/usr/local/lib",
|
||||
"/lib",
|
||||
"/usr/lib",
|
||||
]
|
||||
|
||||
def dyld_env(env, var):
|
||||
if env is None:
|
||||
env = os.environ
|
||||
rval = env.get(var)
|
||||
if rval is None:
|
||||
return []
|
||||
return rval.split(':')
|
||||
|
||||
def dyld_image_suffix(env=None):
|
||||
if env is None:
|
||||
env = os.environ
|
||||
return env.get('DYLD_IMAGE_SUFFIX')
|
||||
|
||||
def dyld_framework_path(env=None):
|
||||
return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
|
||||
|
||||
def dyld_library_path(env=None):
|
||||
return dyld_env(env, 'DYLD_LIBRARY_PATH')
|
||||
|
||||
def dyld_fallback_framework_path(env=None):
|
||||
return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
|
||||
|
||||
def dyld_fallback_library_path(env=None):
|
||||
return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
|
||||
|
||||
def dyld_image_suffix_search(iterator, env=None):
|
||||
"""For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
|
||||
suffix = dyld_image_suffix(env)
|
||||
if suffix is None:
|
||||
return iterator
|
||||
def _inject(iterator=iterator, suffix=suffix):
|
||||
for path in iterator:
|
||||
if path.endswith('.dylib'):
|
||||
yield path[:-len('.dylib')] + suffix + '.dylib'
|
||||
else:
|
||||
yield path + suffix
|
||||
yield path
|
||||
return _inject()
|
||||
|
||||
def dyld_override_search(name, env=None):
|
||||
# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
|
||||
# framework name, use the first file that exists in the framework
|
||||
# path if any. If there is none go on to search the DYLD_LIBRARY_PATH
|
||||
# if any.
|
||||
|
||||
framework = framework_info(name)
|
||||
|
||||
if framework is not None:
|
||||
for path in dyld_framework_path(env):
|
||||
yield os.path.join(path, framework['name'])
|
||||
|
||||
# If DYLD_LIBRARY_PATH is set then use the first file that exists
|
||||
# in the path. If none use the original name.
|
||||
for path in dyld_library_path(env):
|
||||
yield os.path.join(path, os.path.basename(name))
|
||||
|
||||
def dyld_executable_path_search(name, executable_path=None):
|
||||
# If we haven't done any searching and found a library and the
|
||||
# dylib_name starts with "@executable_path/" then construct the
|
||||
# library name.
|
||||
if name.startswith('@executable_path/') and executable_path is not None:
|
||||
yield os.path.join(executable_path, name[len('@executable_path/'):])
|
||||
|
||||
def dyld_default_search(name, env=None):
|
||||
yield name
|
||||
|
||||
framework = framework_info(name)
|
||||
|
||||
if framework is not None:
|
||||
fallback_framework_path = dyld_fallback_framework_path(env)
|
||||
for path in fallback_framework_path:
|
||||
yield os.path.join(path, framework['name'])
|
||||
|
||||
fallback_library_path = dyld_fallback_library_path(env)
|
||||
for path in fallback_library_path:
|
||||
yield os.path.join(path, os.path.basename(name))
|
||||
|
||||
if framework is not None and not fallback_framework_path:
|
||||
for path in DEFAULT_FRAMEWORK_FALLBACK:
|
||||
yield os.path.join(path, framework['name'])
|
||||
|
||||
if not fallback_library_path:
|
||||
for path in DEFAULT_LIBRARY_FALLBACK:
|
||||
yield os.path.join(path, os.path.basename(name))
|
||||
|
||||
def dyld_find(name, executable_path=None, env=None):
|
||||
"""
|
||||
Find a library or framework using dyld semantics
|
||||
"""
|
||||
for path in dyld_image_suffix_search(chain(
|
||||
dyld_override_search(name, env),
|
||||
dyld_executable_path_search(name, executable_path),
|
||||
dyld_default_search(name, env),
|
||||
), env):
|
||||
if os.path.isfile(path):
|
||||
return path
|
||||
raise ValueError("dylib %s could not be found" % (name,))
|
||||
|
||||
def framework_find(fn, executable_path=None, env=None):
|
||||
"""
|
||||
Find a framework using dyld semantics in a very loose manner.
|
||||
|
||||
Will take input such as:
|
||||
Python
|
||||
Python.framework
|
||||
Python.framework/Versions/Current
|
||||
"""
|
||||
error = None
|
||||
try:
|
||||
return dyld_find(fn, executable_path=executable_path, env=env)
|
||||
except ValueError as e:
|
||||
error = e
|
||||
fmwk_index = fn.rfind('.framework')
|
||||
if fmwk_index == -1:
|
||||
fmwk_index = len(fn)
|
||||
fn += '.framework'
|
||||
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
|
||||
try:
|
||||
return dyld_find(fn, executable_path=executable_path, env=env)
|
||||
except ValueError:
|
||||
raise error
|
||||
|
||||
def test_dyld_find():
|
||||
env = {}
|
||||
assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
|
||||
assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_dyld_find()
|
63
third_party/python/Lib/ctypes/macholib/dylib.py
vendored
Normal file
63
third_party/python/Lib/ctypes/macholib/dylib.py
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
"""
|
||||
Generic dylib path manipulation
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ['dylib_info']
|
||||
|
||||
DYLIB_RE = re.compile(r"""(?x)
|
||||
(?P<location>^.*)(?:^|/)
|
||||
(?P<name>
|
||||
(?P<shortname>\w+?)
|
||||
(?:\.(?P<version>[^._]+))?
|
||||
(?:_(?P<suffix>[^._]+))?
|
||||
\.dylib$
|
||||
)
|
||||
""")
|
||||
|
||||
def dylib_info(filename):
|
||||
"""
|
||||
A dylib name can take one of the following four forms:
|
||||
Location/Name.SomeVersion_Suffix.dylib
|
||||
Location/Name.SomeVersion.dylib
|
||||
Location/Name_Suffix.dylib
|
||||
Location/Name.dylib
|
||||
|
||||
returns None if not found or a mapping equivalent to:
|
||||
dict(
|
||||
location='Location',
|
||||
name='Name.SomeVersion_Suffix.dylib',
|
||||
shortname='Name',
|
||||
version='SomeVersion',
|
||||
suffix='Suffix',
|
||||
)
|
||||
|
||||
Note that SomeVersion and Suffix are optional and may be None
|
||||
if not present.
|
||||
"""
|
||||
is_dylib = DYLIB_RE.match(filename)
|
||||
if not is_dylib:
|
||||
return None
|
||||
return is_dylib.groupdict()
|
||||
|
||||
|
||||
def test_dylib_info():
|
||||
def d(location=None, name=None, shortname=None, version=None, suffix=None):
|
||||
return dict(
|
||||
location=location,
|
||||
name=name,
|
||||
shortname=shortname,
|
||||
version=version,
|
||||
suffix=suffix
|
||||
)
|
||||
assert dylib_info('completely/invalid') is None
|
||||
assert dylib_info('completely/invalide_debug') is None
|
||||
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
|
||||
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
|
||||
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
|
||||
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
|
||||
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_dylib_info()
|
2
third_party/python/Lib/ctypes/macholib/fetch_macholib
vendored
Executable file
2
third_party/python/Lib/ctypes/macholib/fetch_macholib
vendored
Executable file
|
@ -0,0 +1,2 @@
|
|||
#!/bin/sh
|
||||
svn export --force http://svn.red-bean.com/bob/macholib/trunk/macholib/ .
|
1
third_party/python/Lib/ctypes/macholib/fetch_macholib.bat
vendored
Normal file
1
third_party/python/Lib/ctypes/macholib/fetch_macholib.bat
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
svn export --force http://svn.red-bean.com/bob/macholib/trunk/macholib/ .
|
65
third_party/python/Lib/ctypes/macholib/framework.py
vendored
Normal file
65
third_party/python/Lib/ctypes/macholib/framework.py
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
"""
|
||||
Generic framework path manipulation
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ['framework_info']
|
||||
|
||||
STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
|
||||
(?P<location>^.*)(?:^|/)
|
||||
(?P<name>
|
||||
(?P<shortname>\w+).framework/
|
||||
(?:Versions/(?P<version>[^/]+)/)?
|
||||
(?P=shortname)
|
||||
(?:_(?P<suffix>[^_]+))?
|
||||
)$
|
||||
""")
|
||||
|
||||
def framework_info(filename):
|
||||
"""
|
||||
A framework name can take one of the following four forms:
|
||||
Location/Name.framework/Versions/SomeVersion/Name_Suffix
|
||||
Location/Name.framework/Versions/SomeVersion/Name
|
||||
Location/Name.framework/Name_Suffix
|
||||
Location/Name.framework/Name
|
||||
|
||||
returns None if not found, or a mapping equivalent to:
|
||||
dict(
|
||||
location='Location',
|
||||
name='Name.framework/Versions/SomeVersion/Name_Suffix',
|
||||
shortname='Name',
|
||||
version='SomeVersion',
|
||||
suffix='Suffix',
|
||||
)
|
||||
|
||||
Note that SomeVersion and Suffix are optional and may be None
|
||||
if not present
|
||||
"""
|
||||
is_framework = STRICT_FRAMEWORK_RE.match(filename)
|
||||
if not is_framework:
|
||||
return None
|
||||
return is_framework.groupdict()
|
||||
|
||||
def test_framework_info():
|
||||
def d(location=None, name=None, shortname=None, version=None, suffix=None):
|
||||
return dict(
|
||||
location=location,
|
||||
name=name,
|
||||
shortname=shortname,
|
||||
version=version,
|
||||
suffix=suffix
|
||||
)
|
||||
assert framework_info('completely/invalid') is None
|
||||
assert framework_info('completely/invalid/_debug') is None
|
||||
assert framework_info('P/F.framework') is None
|
||||
assert framework_info('P/F.framework/_debug') is None
|
||||
assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
|
||||
assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
|
||||
assert framework_info('P/F.framework/Versions') is None
|
||||
assert framework_info('P/F.framework/Versions/A') is None
|
||||
assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
|
||||
assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_framework_info()
|
14
third_party/python/Lib/ctypes/test/__init__.py
vendored
Normal file
14
third_party/python/Lib/ctypes/test/__init__.py
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
import os
|
||||
import unittest
|
||||
from test import support
|
||||
|
||||
# skip tests if _ctypes was not built
|
||||
ctypes = support.import_module('ctypes')
|
||||
ctypes_symbols = dir(ctypes)
|
||||
|
||||
def need_symbol(name):
|
||||
return unittest.skipUnless(name in ctypes_symbols,
|
||||
'{!r} is required'.format(name))
|
||||
|
||||
def load_tests(*args):
|
||||
return support.load_package_tests(os.path.dirname(__file__), *args)
|
4
third_party/python/Lib/ctypes/test/__main__.py
vendored
Normal file
4
third_party/python/Lib/ctypes/test/__main__.py
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
from ctypes.test import load_tests
|
||||
import unittest
|
||||
|
||||
unittest.main()
|
73
third_party/python/Lib/ctypes/test/test_anon.py
vendored
Normal file
73
third_party/python/Lib/ctypes/test/test_anon.py
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
import unittest
|
||||
import test.support
|
||||
from ctypes import *
|
||||
|
||||
class AnonTest(unittest.TestCase):
|
||||
|
||||
def test_anon(self):
|
||||
class ANON(Union):
|
||||
_fields_ = [("a", c_int),
|
||||
("b", c_int)]
|
||||
|
||||
class Y(Structure):
|
||||
_fields_ = [("x", c_int),
|
||||
("_", ANON),
|
||||
("y", c_int)]
|
||||
_anonymous_ = ["_"]
|
||||
|
||||
self.assertEqual(Y.a.offset, sizeof(c_int))
|
||||
self.assertEqual(Y.b.offset, sizeof(c_int))
|
||||
|
||||
self.assertEqual(ANON.a.offset, 0)
|
||||
self.assertEqual(ANON.b.offset, 0)
|
||||
|
||||
def test_anon_nonseq(self):
|
||||
# TypeError: _anonymous_ must be a sequence
|
||||
self.assertRaises(TypeError,
|
||||
lambda: type(Structure)("Name",
|
||||
(Structure,),
|
||||
{"_fields_": [], "_anonymous_": 42}))
|
||||
|
||||
def test_anon_nonmember(self):
|
||||
# AttributeError: type object 'Name' has no attribute 'x'
|
||||
self.assertRaises(AttributeError,
|
||||
lambda: type(Structure)("Name",
|
||||
(Structure,),
|
||||
{"_fields_": [],
|
||||
"_anonymous_": ["x"]}))
|
||||
|
||||
@test.support.cpython_only
|
||||
def test_issue31490(self):
|
||||
# There shouldn't be an assertion failure in case the class has an
|
||||
# attribute whose name is specified in _anonymous_ but not in _fields_.
|
||||
|
||||
# AttributeError: 'x' is specified in _anonymous_ but not in _fields_
|
||||
with self.assertRaises(AttributeError):
|
||||
class Name(Structure):
|
||||
_fields_ = []
|
||||
_anonymous_ = ["x"]
|
||||
x = 42
|
||||
|
||||
def test_nested(self):
|
||||
class ANON_S(Structure):
|
||||
_fields_ = [("a", c_int)]
|
||||
|
||||
class ANON_U(Union):
|
||||
_fields_ = [("_", ANON_S),
|
||||
("b", c_int)]
|
||||
_anonymous_ = ["_"]
|
||||
|
||||
class Y(Structure):
|
||||
_fields_ = [("x", c_int),
|
||||
("_", ANON_U),
|
||||
("y", c_int)]
|
||||
_anonymous_ = ["_"]
|
||||
|
||||
self.assertEqual(Y.x.offset, 0)
|
||||
self.assertEqual(Y.a.offset, sizeof(c_int))
|
||||
self.assertEqual(Y.b.offset, sizeof(c_int))
|
||||
self.assertEqual(Y._.offset, sizeof(c_int))
|
||||
self.assertEqual(Y.y.offset, sizeof(c_int) * 2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
64
third_party/python/Lib/ctypes/test/test_array_in_pointer.py
vendored
Normal file
64
third_party/python/Lib/ctypes/test/test_array_in_pointer.py
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
import unittest
|
||||
from ctypes import *
|
||||
from binascii import hexlify
|
||||
import re
|
||||
|
||||
def dump(obj):
|
||||
# helper function to dump memory contents in hex, with a hyphen
|
||||
# between the bytes.
|
||||
h = hexlify(memoryview(obj)).decode()
|
||||
return re.sub(r"(..)", r"\1-", h)[:-1]
|
||||
|
||||
|
||||
class Value(Structure):
|
||||
_fields_ = [("val", c_byte)]
|
||||
|
||||
class Container(Structure):
|
||||
_fields_ = [("pvalues", POINTER(Value))]
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
def test(self):
|
||||
# create an array of 4 values
|
||||
val_array = (Value * 4)()
|
||||
|
||||
# create a container, which holds a pointer to the pvalues array.
|
||||
c = Container()
|
||||
c.pvalues = val_array
|
||||
|
||||
# memory contains 4 NUL bytes now, that's correct
|
||||
self.assertEqual("00-00-00-00", dump(val_array))
|
||||
|
||||
# set the values of the array through the pointer:
|
||||
for i in range(4):
|
||||
c.pvalues[i].val = i + 1
|
||||
|
||||
values = [c.pvalues[i].val for i in range(4)]
|
||||
|
||||
# These are the expected results: here s the bug!
|
||||
self.assertEqual(
|
||||
(values, dump(val_array)),
|
||||
([1, 2, 3, 4], "01-02-03-04")
|
||||
)
|
||||
|
||||
def test_2(self):
|
||||
|
||||
val_array = (Value * 4)()
|
||||
|
||||
# memory contains 4 NUL bytes now, that's correct
|
||||
self.assertEqual("00-00-00-00", dump(val_array))
|
||||
|
||||
ptr = cast(val_array, POINTER(Value))
|
||||
# set the values of the array through the pointer:
|
||||
for i in range(4):
|
||||
ptr[i].val = i + 1
|
||||
|
||||
values = [ptr[i].val for i in range(4)]
|
||||
|
||||
# These are the expected results: here s the bug!
|
||||
self.assertEqual(
|
||||
(values, dump(val_array)),
|
||||
([1, 2, 3, 4], "01-02-03-04")
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
192
third_party/python/Lib/ctypes/test/test_arrays.py
vendored
Normal file
192
third_party/python/Lib/ctypes/test/test_arrays.py
vendored
Normal file
|
@ -0,0 +1,192 @@
|
|||
import unittest
|
||||
from test.support import bigmemtest, _2G
|
||||
import sys
|
||||
from ctypes import *
|
||||
|
||||
from ctypes.test import need_symbol
|
||||
|
||||
formats = "bBhHiIlLqQfd"
|
||||
|
||||
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
|
||||
c_long, c_ulonglong, c_float, c_double, c_longdouble
|
||||
|
||||
class ArrayTestCase(unittest.TestCase):
|
||||
def test_simple(self):
|
||||
# create classes holding simple numeric types, and check
|
||||
# various properties.
|
||||
|
||||
init = list(range(15, 25))
|
||||
|
||||
for fmt in formats:
|
||||
alen = len(init)
|
||||
int_array = ARRAY(fmt, alen)
|
||||
|
||||
ia = int_array(*init)
|
||||
# length of instance ok?
|
||||
self.assertEqual(len(ia), alen)
|
||||
|
||||
# slot values ok?
|
||||
values = [ia[i] for i in range(alen)]
|
||||
self.assertEqual(values, init)
|
||||
|
||||
# out-of-bounds accesses should be caught
|
||||
with self.assertRaises(IndexError): ia[alen]
|
||||
with self.assertRaises(IndexError): ia[-alen-1]
|
||||
|
||||
# change the items
|
||||
from operator import setitem
|
||||
new_values = list(range(42, 42+alen))
|
||||
[setitem(ia, n, new_values[n]) for n in range(alen)]
|
||||
values = [ia[i] for i in range(alen)]
|
||||
self.assertEqual(values, new_values)
|
||||
|
||||
# are the items initialized to 0?
|
||||
ia = int_array()
|
||||
values = [ia[i] for i in range(alen)]
|
||||
self.assertEqual(values, [0] * alen)
|
||||
|
||||
# Too many initializers should be caught
|
||||
self.assertRaises(IndexError, int_array, *range(alen*2))
|
||||
|
||||
CharArray = ARRAY(c_char, 3)
|
||||
|
||||
ca = CharArray(b"a", b"b", b"c")
|
||||
|
||||
# Should this work? It doesn't:
|
||||
# CharArray("abc")
|
||||
self.assertRaises(TypeError, CharArray, "abc")
|
||||
|
||||
self.assertEqual(ca[0], b"a")
|
||||
self.assertEqual(ca[1], b"b")
|
||||
self.assertEqual(ca[2], b"c")
|
||||
self.assertEqual(ca[-3], b"a")
|
||||
self.assertEqual(ca[-2], b"b")
|
||||
self.assertEqual(ca[-1], b"c")
|
||||
|
||||
self.assertEqual(len(ca), 3)
|
||||
|
||||
# cannot delete items
|
||||
from operator import delitem
|
||||
self.assertRaises(TypeError, delitem, ca, 0)
|
||||
|
||||
def test_numeric_arrays(self):
|
||||
|
||||
alen = 5
|
||||
|
||||
numarray = ARRAY(c_int, alen)
|
||||
|
||||
na = numarray()
|
||||
values = [na[i] for i in range(alen)]
|
||||
self.assertEqual(values, [0] * alen)
|
||||
|
||||
na = numarray(*[c_int()] * alen)
|
||||
values = [na[i] for i in range(alen)]
|
||||
self.assertEqual(values, [0]*alen)
|
||||
|
||||
na = numarray(1, 2, 3, 4, 5)
|
||||
values = [i for i in na]
|
||||
self.assertEqual(values, [1, 2, 3, 4, 5])
|
||||
|
||||
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
|
||||
values = [i for i in na]
|
||||
self.assertEqual(values, [1, 2, 3, 4, 5])
|
||||
|
||||
def test_classcache(self):
|
||||
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
|
||||
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
|
||||
|
||||
def test_from_address(self):
|
||||
# Failed with 0.9.8, reported by JUrner
|
||||
p = create_string_buffer(b"foo")
|
||||
sz = (c_char * 3).from_address(addressof(p))
|
||||
self.assertEqual(sz[:], b"foo")
|
||||
self.assertEqual(sz[::], b"foo")
|
||||
self.assertEqual(sz[::-1], b"oof")
|
||||
self.assertEqual(sz[::3], b"f")
|
||||
self.assertEqual(sz[1:4:2], b"o")
|
||||
self.assertEqual(sz.value, b"foo")
|
||||
|
||||
@need_symbol('create_unicode_buffer')
|
||||
def test_from_addressW(self):
|
||||
p = create_unicode_buffer("foo")
|
||||
sz = (c_wchar * 3).from_address(addressof(p))
|
||||
self.assertEqual(sz[:], "foo")
|
||||
self.assertEqual(sz[::], "foo")
|
||||
self.assertEqual(sz[::-1], "oof")
|
||||
self.assertEqual(sz[::3], "f")
|
||||
self.assertEqual(sz[1:4:2], "o")
|
||||
self.assertEqual(sz.value, "foo")
|
||||
|
||||
def test_cache(self):
|
||||
# Array types are cached internally in the _ctypes extension,
|
||||
# in a WeakValueDictionary. Make sure the array type is
|
||||
# removed from the cache when the itemtype goes away. This
|
||||
# test will not fail, but will show a leak in the testsuite.
|
||||
|
||||
# Create a new type:
|
||||
class my_int(c_int):
|
||||
pass
|
||||
# Create a new array type based on it:
|
||||
t1 = my_int * 1
|
||||
t2 = my_int * 1
|
||||
self.assertIs(t1, t2)
|
||||
|
||||
def test_subclass(self):
|
||||
class T(Array):
|
||||
_type_ = c_int
|
||||
_length_ = 13
|
||||
class U(T):
|
||||
pass
|
||||
class V(U):
|
||||
pass
|
||||
class W(V):
|
||||
pass
|
||||
class X(T):
|
||||
_type_ = c_short
|
||||
class Y(T):
|
||||
_length_ = 187
|
||||
|
||||
for c in [T, U, V, W]:
|
||||
self.assertEqual(c._type_, c_int)
|
||||
self.assertEqual(c._length_, 13)
|
||||
self.assertEqual(c()._type_, c_int)
|
||||
self.assertEqual(c()._length_, 13)
|
||||
|
||||
self.assertEqual(X._type_, c_short)
|
||||
self.assertEqual(X._length_, 13)
|
||||
self.assertEqual(X()._type_, c_short)
|
||||
self.assertEqual(X()._length_, 13)
|
||||
|
||||
self.assertEqual(Y._type_, c_int)
|
||||
self.assertEqual(Y._length_, 187)
|
||||
self.assertEqual(Y()._type_, c_int)
|
||||
self.assertEqual(Y()._length_, 187)
|
||||
|
||||
def test_bad_subclass(self):
|
||||
import sys
|
||||
|
||||
with self.assertRaises(AttributeError):
|
||||
class T(Array):
|
||||
pass
|
||||
with self.assertRaises(AttributeError):
|
||||
class T(Array):
|
||||
_type_ = c_int
|
||||
with self.assertRaises(AttributeError):
|
||||
class T(Array):
|
||||
_length_ = 13
|
||||
with self.assertRaises(OverflowError):
|
||||
class T(Array):
|
||||
_type_ = c_int
|
||||
_length_ = sys.maxsize * 2
|
||||
with self.assertRaises(AttributeError):
|
||||
class T(Array):
|
||||
_type_ = c_int
|
||||
_length_ = 1.87
|
||||
|
||||
@unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
|
||||
@bigmemtest(size=_2G, memuse=1, dry_run=False)
|
||||
def test_large_array(self, size):
|
||||
c_char * size
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
230
third_party/python/Lib/ctypes/test/test_as_parameter.py
vendored
Normal file
230
third_party/python/Lib/ctypes/test/test_as_parameter.py
vendored
Normal file
|
@ -0,0 +1,230 @@
|
|||
import unittest
|
||||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
import _ctypes_test
|
||||
|
||||
dll = CDLL(_ctypes_test.__file__)
|
||||
|
||||
try:
|
||||
CALLBACK_FUNCTYPE = WINFUNCTYPE
|
||||
except NameError:
|
||||
# fake to enable this test on Linux
|
||||
CALLBACK_FUNCTYPE = CFUNCTYPE
|
||||
|
||||
class POINT(Structure):
|
||||
_fields_ = [("x", c_int), ("y", c_int)]
|
||||
|
||||
class BasicWrapTestCase(unittest.TestCase):
|
||||
def wrap(self, param):
|
||||
return param
|
||||
|
||||
@need_symbol('c_wchar')
|
||||
def test_wchar_parm(self):
|
||||
f = dll._testfunc_i_bhilfd
|
||||
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
|
||||
result = f(self.wrap(1), self.wrap("x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
|
||||
self.assertEqual(result, 139)
|
||||
self.assertIs(type(result), int)
|
||||
|
||||
def test_pointers(self):
|
||||
f = dll._testfunc_p_p
|
||||
f.restype = POINTER(c_int)
|
||||
f.argtypes = [POINTER(c_int)]
|
||||
|
||||
# This only works if the value c_int(42) passed to the
|
||||
# function is still alive while the pointer (the result) is
|
||||
# used.
|
||||
|
||||
v = c_int(42)
|
||||
|
||||
self.assertEqual(pointer(v).contents.value, 42)
|
||||
result = f(self.wrap(pointer(v)))
|
||||
self.assertEqual(type(result), POINTER(c_int))
|
||||
self.assertEqual(result.contents.value, 42)
|
||||
|
||||
# This on works...
|
||||
result = f(self.wrap(pointer(v)))
|
||||
self.assertEqual(result.contents.value, v.value)
|
||||
|
||||
p = pointer(c_int(99))
|
||||
result = f(self.wrap(p))
|
||||
self.assertEqual(result.contents.value, 99)
|
||||
|
||||
def test_shorts(self):
|
||||
f = dll._testfunc_callback_i_if
|
||||
|
||||
args = []
|
||||
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
|
||||
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
|
||||
|
||||
def callback(v):
|
||||
args.append(v)
|
||||
return v
|
||||
|
||||
CallBack = CFUNCTYPE(c_int, c_int)
|
||||
|
||||
cb = CallBack(callback)
|
||||
f(self.wrap(2**18), self.wrap(cb))
|
||||
self.assertEqual(args, expected)
|
||||
|
||||
################################################################
|
||||
|
||||
def test_callbacks(self):
|
||||
f = dll._testfunc_callback_i_if
|
||||
f.restype = c_int
|
||||
f.argtypes = None
|
||||
|
||||
MyCallback = CFUNCTYPE(c_int, c_int)
|
||||
|
||||
def callback(value):
|
||||
#print "called back with", value
|
||||
return value
|
||||
|
||||
cb = MyCallback(callback)
|
||||
|
||||
result = f(self.wrap(-10), self.wrap(cb))
|
||||
self.assertEqual(result, -18)
|
||||
|
||||
# test with prototype
|
||||
f.argtypes = [c_int, MyCallback]
|
||||
cb = MyCallback(callback)
|
||||
|
||||
result = f(self.wrap(-10), self.wrap(cb))
|
||||
self.assertEqual(result, -18)
|
||||
|
||||
result = f(self.wrap(-10), self.wrap(cb))
|
||||
self.assertEqual(result, -18)
|
||||
|
||||
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
|
||||
|
||||
# check that the prototype works: we call f with wrong
|
||||
# argument types
|
||||
cb = AnotherCallback(callback)
|
||||
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
|
||||
|
||||
def test_callbacks_2(self):
|
||||
# Can also use simple datatypes as argument type specifiers
|
||||
# for the callback function.
|
||||
# In this case the call receives an instance of that type
|
||||
f = dll._testfunc_callback_i_if
|
||||
f.restype = c_int
|
||||
|
||||
MyCallback = CFUNCTYPE(c_int, c_int)
|
||||
|
||||
f.argtypes = [c_int, MyCallback]
|
||||
|
||||
def callback(value):
|
||||
#print "called back with", value
|
||||
self.assertEqual(type(value), int)
|
||||
return value
|
||||
|
||||
cb = MyCallback(callback)
|
||||
result = f(self.wrap(-10), self.wrap(cb))
|
||||
self.assertEqual(result, -18)
|
||||
|
||||
def test_longlong_callbacks(self):
|
||||
|
||||
f = dll._testfunc_callback_q_qf
|
||||
f.restype = c_longlong
|
||||
|
||||
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
|
||||
|
||||
f.argtypes = [c_longlong, MyCallback]
|
||||
|
||||
def callback(value):
|
||||
self.assertIsInstance(value, int)
|
||||
return value & 0x7FFFFFFF
|
||||
|
||||
cb = MyCallback(callback)
|
||||
|
||||
self.assertEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
|
||||
|
||||
def test_byval(self):
|
||||
# without prototype
|
||||
ptin = POINT(1, 2)
|
||||
ptout = POINT()
|
||||
# EXPORT int _testfunc_byval(point in, point *pout)
|
||||
result = dll._testfunc_byval(ptin, byref(ptout))
|
||||
got = result, ptout.x, ptout.y
|
||||
expected = 3, 1, 2
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
# with prototype
|
||||
ptin = POINT(101, 102)
|
||||
ptout = POINT()
|
||||
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
|
||||
dll._testfunc_byval.restype = c_int
|
||||
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
|
||||
got = result, ptout.x, ptout.y
|
||||
expected = 203, 101, 102
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
def test_struct_return_2H(self):
|
||||
class S2H(Structure):
|
||||
_fields_ = [("x", c_short),
|
||||
("y", c_short)]
|
||||
dll.ret_2h_func.restype = S2H
|
||||
dll.ret_2h_func.argtypes = [S2H]
|
||||
inp = S2H(99, 88)
|
||||
s2h = dll.ret_2h_func(self.wrap(inp))
|
||||
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
|
||||
|
||||
# Test also that the original struct was unmodified (i.e. was passed by
|
||||
# value)
|
||||
self.assertEqual((inp.x, inp.y), (99, 88))
|
||||
|
||||
def test_struct_return_8H(self):
|
||||
class S8I(Structure):
|
||||
_fields_ = [("a", c_int),
|
||||
("b", c_int),
|
||||
("c", c_int),
|
||||
("d", c_int),
|
||||
("e", c_int),
|
||||
("f", c_int),
|
||||
("g", c_int),
|
||||
("h", c_int)]
|
||||
dll.ret_8i_func.restype = S8I
|
||||
dll.ret_8i_func.argtypes = [S8I]
|
||||
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
|
||||
s8i = dll.ret_8i_func(self.wrap(inp))
|
||||
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
|
||||
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
|
||||
|
||||
def test_recursive_as_param(self):
|
||||
from ctypes import c_int
|
||||
|
||||
class A(object):
|
||||
pass
|
||||
|
||||
a = A()
|
||||
a._as_parameter_ = a
|
||||
with self.assertRaises(RecursionError):
|
||||
c_int.from_param(a)
|
||||
|
||||
|
||||
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
class AsParamWrapper(object):
|
||||
def __init__(self, param):
|
||||
self._as_parameter_ = param
|
||||
|
||||
class AsParamWrapperTestCase(BasicWrapTestCase):
|
||||
wrap = AsParamWrapper
|
||||
|
||||
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
class AsParamPropertyWrapper(object):
|
||||
def __init__(self, param):
|
||||
self._param = param
|
||||
|
||||
def getParameter(self):
|
||||
return self._param
|
||||
_as_parameter_ = property(getParameter)
|
||||
|
||||
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
|
||||
wrap = AsParamPropertyWrapper
|
||||
|
||||
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
290
third_party/python/Lib/ctypes/test/test_bitfields.py
vendored
Normal file
290
third_party/python/Lib/ctypes/test/test_bitfields.py
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
import unittest
|
||||
import os
|
||||
|
||||
import _ctypes_test
|
||||
|
||||
class BITS(Structure):
|
||||
_fields_ = [("A", c_int, 1),
|
||||
("B", c_int, 2),
|
||||
("C", c_int, 3),
|
||||
("D", c_int, 4),
|
||||
("E", c_int, 5),
|
||||
("F", c_int, 6),
|
||||
("G", c_int, 7),
|
||||
("H", c_int, 8),
|
||||
("I", c_int, 9),
|
||||
|
||||
("M", c_short, 1),
|
||||
("N", c_short, 2),
|
||||
("O", c_short, 3),
|
||||
("P", c_short, 4),
|
||||
("Q", c_short, 5),
|
||||
("R", c_short, 6),
|
||||
("S", c_short, 7)]
|
||||
|
||||
func = CDLL(_ctypes_test.__file__).unpack_bitfields
|
||||
func.argtypes = POINTER(BITS), c_char
|
||||
|
||||
##for n in "ABCDEFGHIMNOPQRS":
|
||||
## print n, hex(getattr(BITS, n).size), getattr(BITS, n).offset
|
||||
|
||||
class C_Test(unittest.TestCase):
|
||||
|
||||
def test_ints(self):
|
||||
for i in range(512):
|
||||
for name in "ABCDEFGHI":
|
||||
b = BITS()
|
||||
setattr(b, name, i)
|
||||
self.assertEqual(getattr(b, name), func(byref(b), name.encode('ascii')))
|
||||
|
||||
def test_shorts(self):
|
||||
for i in range(256):
|
||||
for name in "MNOPQRS":
|
||||
b = BITS()
|
||||
setattr(b, name, i)
|
||||
self.assertEqual(getattr(b, name), func(byref(b), name.encode('ascii')))
|
||||
|
||||
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
|
||||
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
|
||||
int_types = unsigned_int_types + signed_int_types
|
||||
|
||||
class BitFieldTest(unittest.TestCase):
|
||||
|
||||
def test_longlong(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_longlong, 1),
|
||||
("b", c_longlong, 62),
|
||||
("c", c_longlong, 1)]
|
||||
|
||||
self.assertEqual(sizeof(X), sizeof(c_longlong))
|
||||
x = X()
|
||||
x.a, x.b, x.c = -1, 7, -1
|
||||
self.assertEqual((x.a, x.b, x.c), (-1, 7, -1))
|
||||
|
||||
def test_ulonglong(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_ulonglong, 1),
|
||||
("b", c_ulonglong, 62),
|
||||
("c", c_ulonglong, 1)]
|
||||
|
||||
self.assertEqual(sizeof(X), sizeof(c_longlong))
|
||||
x = X()
|
||||
self.assertEqual((x.a, x.b, x.c), (0, 0, 0))
|
||||
x.a, x.b, x.c = 7, 7, 7
|
||||
self.assertEqual((x.a, x.b, x.c), (1, 7, 1))
|
||||
|
||||
def test_signed(self):
|
||||
for c_typ in signed_int_types:
|
||||
class X(Structure):
|
||||
_fields_ = [("dummy", c_typ),
|
||||
("a", c_typ, 3),
|
||||
("b", c_typ, 3),
|
||||
("c", c_typ, 1)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_typ)*2)
|
||||
|
||||
x = X()
|
||||
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
|
||||
x.a = -1
|
||||
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, -1, 0, 0))
|
||||
x.a, x.b = 0, -1
|
||||
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, -1, 0))
|
||||
|
||||
|
||||
def test_unsigned(self):
|
||||
for c_typ in unsigned_int_types:
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_typ, 3),
|
||||
("b", c_typ, 3),
|
||||
("c", c_typ, 1)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_typ))
|
||||
|
||||
x = X()
|
||||
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 0, 0))
|
||||
x.a = -1
|
||||
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 7, 0, 0))
|
||||
x.a, x.b = 0, -1
|
||||
self.assertEqual((c_typ, x.a, x.b, x.c), (c_typ, 0, 7, 0))
|
||||
|
||||
|
||||
def fail_fields(self, *fields):
|
||||
return self.get_except(type(Structure), "X", (),
|
||||
{"_fields_": fields})
|
||||
|
||||
def test_nonint_types(self):
|
||||
# bit fields are not allowed on non-integer types.
|
||||
result = self.fail_fields(("a", c_char_p, 1))
|
||||
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char_p'))
|
||||
|
||||
result = self.fail_fields(("a", c_void_p, 1))
|
||||
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_void_p'))
|
||||
|
||||
if c_int != c_long:
|
||||
result = self.fail_fields(("a", POINTER(c_int), 1))
|
||||
self.assertEqual(result, (TypeError, 'bit fields not allowed for type LP_c_int'))
|
||||
|
||||
result = self.fail_fields(("a", c_char, 1))
|
||||
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char'))
|
||||
|
||||
class Dummy(Structure):
|
||||
_fields_ = []
|
||||
|
||||
result = self.fail_fields(("a", Dummy, 1))
|
||||
self.assertEqual(result, (TypeError, 'bit fields not allowed for type Dummy'))
|
||||
|
||||
@need_symbol('c_wchar')
|
||||
def test_c_wchar(self):
|
||||
result = self.fail_fields(("a", c_wchar, 1))
|
||||
self.assertEqual(result,
|
||||
(TypeError, 'bit fields not allowed for type c_wchar'))
|
||||
|
||||
def test_single_bitfield_size(self):
|
||||
for c_typ in int_types:
|
||||
result = self.fail_fields(("a", c_typ, -1))
|
||||
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
|
||||
|
||||
result = self.fail_fields(("a", c_typ, 0))
|
||||
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
|
||||
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_typ, 1)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_typ))
|
||||
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_typ))
|
||||
|
||||
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
|
||||
self.assertEqual(result, (ValueError, 'number of bits invalid for bit field'))
|
||||
|
||||
def test_multi_bitfields_size(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_short, 1),
|
||||
("b", c_short, 14),
|
||||
("c", c_short, 1)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_short))
|
||||
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_short, 1),
|
||||
("a1", c_short),
|
||||
("b", c_short, 14),
|
||||
("c", c_short, 1)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_short)*3)
|
||||
self.assertEqual(X.a.offset, 0)
|
||||
self.assertEqual(X.a1.offset, sizeof(c_short))
|
||||
self.assertEqual(X.b.offset, sizeof(c_short)*2)
|
||||
self.assertEqual(X.c.offset, sizeof(c_short)*2)
|
||||
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_short, 3),
|
||||
("b", c_short, 14),
|
||||
("c", c_short, 14)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_short)*3)
|
||||
self.assertEqual(X.a.offset, sizeof(c_short)*0)
|
||||
self.assertEqual(X.b.offset, sizeof(c_short)*1)
|
||||
self.assertEqual(X.c.offset, sizeof(c_short)*2)
|
||||
|
||||
|
||||
def get_except(self, func, *args, **kw):
|
||||
try:
|
||||
func(*args, **kw)
|
||||
except Exception as detail:
|
||||
return detail.__class__, str(detail)
|
||||
|
||||
def test_mixed_1(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_byte, 4),
|
||||
("b", c_int, 4)]
|
||||
if os.name == "nt":
|
||||
self.assertEqual(sizeof(X), sizeof(c_int)*2)
|
||||
else:
|
||||
self.assertEqual(sizeof(X), sizeof(c_int))
|
||||
|
||||
def test_mixed_2(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_byte, 4),
|
||||
("b", c_int, 32)]
|
||||
self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int))
|
||||
|
||||
def test_mixed_3(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_byte, 4),
|
||||
("b", c_ubyte, 4)]
|
||||
self.assertEqual(sizeof(X), sizeof(c_byte))
|
||||
|
||||
def test_mixed_4(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_short, 4),
|
||||
("b", c_short, 4),
|
||||
("c", c_int, 24),
|
||||
("d", c_short, 4),
|
||||
("e", c_short, 4),
|
||||
("f", c_int, 24)]
|
||||
# MSVC does NOT combine c_short and c_int into one field, GCC
|
||||
# does (unless GCC is run with '-mms-bitfields' which
|
||||
# produces code compatible with MSVC).
|
||||
if os.name == "nt":
|
||||
self.assertEqual(sizeof(X), sizeof(c_int) * 4)
|
||||
else:
|
||||
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
|
||||
|
||||
def test_anon_bitfields(self):
|
||||
# anonymous bit-fields gave a strange error message
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_byte, 4),
|
||||
("b", c_ubyte, 4)]
|
||||
class Y(Structure):
|
||||
_anonymous_ = ["_"]
|
||||
_fields_ = [("_", X)]
|
||||
|
||||
@need_symbol('c_uint32')
|
||||
def test_uint32(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_uint32, 32)]
|
||||
x = X()
|
||||
x.a = 10
|
||||
self.assertEqual(x.a, 10)
|
||||
x.a = 0xFDCBA987
|
||||
self.assertEqual(x.a, 0xFDCBA987)
|
||||
|
||||
@need_symbol('c_uint64')
|
||||
def test_uint64(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_uint64, 64)]
|
||||
x = X()
|
||||
x.a = 10
|
||||
self.assertEqual(x.a, 10)
|
||||
x.a = 0xFEDCBA9876543211
|
||||
self.assertEqual(x.a, 0xFEDCBA9876543211)
|
||||
|
||||
@need_symbol('c_uint32')
|
||||
def test_uint32_swap_little_endian(self):
|
||||
# Issue #23319
|
||||
class Little(LittleEndianStructure):
|
||||
_fields_ = [("a", c_uint32, 24),
|
||||
("b", c_uint32, 4),
|
||||
("c", c_uint32, 4)]
|
||||
b = bytearray(4)
|
||||
x = Little.from_buffer(b)
|
||||
x.a = 0xabcdef
|
||||
x.b = 1
|
||||
x.c = 2
|
||||
self.assertEqual(b, b'\xef\xcd\xab\x21')
|
||||
|
||||
@need_symbol('c_uint32')
|
||||
def test_uint32_swap_big_endian(self):
|
||||
# Issue #23319
|
||||
class Big(BigEndianStructure):
|
||||
_fields_ = [("a", c_uint32, 24),
|
||||
("b", c_uint32, 4),
|
||||
("c", c_uint32, 4)]
|
||||
b = bytearray(4)
|
||||
x = Big.from_buffer(b)
|
||||
x.a = 0xabcdef
|
||||
x.b = 1
|
||||
x.c = 2
|
||||
self.assertEqual(b, b'\xab\xcd\xef\x12')
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
64
third_party/python/Lib/ctypes/test/test_buffers.py
vendored
Normal file
64
third_party/python/Lib/ctypes/test/test_buffers.py
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
import unittest
|
||||
|
||||
class StringBufferTestCase(unittest.TestCase):
|
||||
|
||||
def test_buffer(self):
|
||||
b = create_string_buffer(32)
|
||||
self.assertEqual(len(b), 32)
|
||||
self.assertEqual(sizeof(b), 32 * sizeof(c_char))
|
||||
self.assertIs(type(b[0]), bytes)
|
||||
|
||||
b = create_string_buffer(b"abc")
|
||||
self.assertEqual(len(b), 4) # trailing nul char
|
||||
self.assertEqual(sizeof(b), 4 * sizeof(c_char))
|
||||
self.assertIs(type(b[0]), bytes)
|
||||
self.assertEqual(b[0], b"a")
|
||||
self.assertEqual(b[:], b"abc\0")
|
||||
self.assertEqual(b[::], b"abc\0")
|
||||
self.assertEqual(b[::-1], b"\0cba")
|
||||
self.assertEqual(b[::2], b"ac")
|
||||
self.assertEqual(b[::5], b"a")
|
||||
|
||||
self.assertRaises(TypeError, create_string_buffer, "abc")
|
||||
|
||||
def test_buffer_interface(self):
|
||||
self.assertEqual(len(bytearray(create_string_buffer(0))), 0)
|
||||
self.assertEqual(len(bytearray(create_string_buffer(1))), 1)
|
||||
|
||||
@need_symbol('c_wchar')
|
||||
def test_unicode_buffer(self):
|
||||
b = create_unicode_buffer(32)
|
||||
self.assertEqual(len(b), 32)
|
||||
self.assertEqual(sizeof(b), 32 * sizeof(c_wchar))
|
||||
self.assertIs(type(b[0]), str)
|
||||
|
||||
b = create_unicode_buffer("abc")
|
||||
self.assertEqual(len(b), 4) # trailing nul char
|
||||
self.assertEqual(sizeof(b), 4 * sizeof(c_wchar))
|
||||
self.assertIs(type(b[0]), str)
|
||||
self.assertEqual(b[0], "a")
|
||||
self.assertEqual(b[:], "abc\0")
|
||||
self.assertEqual(b[::], "abc\0")
|
||||
self.assertEqual(b[::-1], "\0cba")
|
||||
self.assertEqual(b[::2], "ac")
|
||||
self.assertEqual(b[::5], "a")
|
||||
|
||||
self.assertRaises(TypeError, create_unicode_buffer, b"abc")
|
||||
|
||||
@need_symbol('c_wchar')
|
||||
def test_unicode_conversion(self):
|
||||
b = create_unicode_buffer("abc")
|
||||
self.assertEqual(len(b), 4) # trailing nul char
|
||||
self.assertEqual(sizeof(b), 4 * sizeof(c_wchar))
|
||||
self.assertIs(type(b[0]), str)
|
||||
self.assertEqual(b[0], "a")
|
||||
self.assertEqual(b[:], "abc\0")
|
||||
self.assertEqual(b[::], "abc\0")
|
||||
self.assertEqual(b[::-1], "\0cba")
|
||||
self.assertEqual(b[::2], "ac")
|
||||
self.assertEqual(b[::5], "a")
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
66
third_party/python/Lib/ctypes/test/test_bytes.py
vendored
Normal file
66
third_party/python/Lib/ctypes/test/test_bytes.py
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
"""Test where byte objects are accepted"""
|
||||
import unittest
|
||||
import sys
|
||||
from ctypes import *
|
||||
|
||||
class BytesTest(unittest.TestCase):
|
||||
def test_c_char(self):
|
||||
x = c_char(b"x")
|
||||
self.assertRaises(TypeError, c_char, "x")
|
||||
x.value = b"y"
|
||||
with self.assertRaises(TypeError):
|
||||
x.value = "y"
|
||||
c_char.from_param(b"x")
|
||||
self.assertRaises(TypeError, c_char.from_param, "x")
|
||||
self.assertIn('xbd', repr(c_char.from_param(b"\xbd")))
|
||||
(c_char * 3)(b"a", b"b", b"c")
|
||||
self.assertRaises(TypeError, c_char * 3, "a", "b", "c")
|
||||
|
||||
def test_c_wchar(self):
|
||||
x = c_wchar("x")
|
||||
self.assertRaises(TypeError, c_wchar, b"x")
|
||||
x.value = "y"
|
||||
with self.assertRaises(TypeError):
|
||||
x.value = b"y"
|
||||
c_wchar.from_param("x")
|
||||
self.assertRaises(TypeError, c_wchar.from_param, b"x")
|
||||
(c_wchar * 3)("a", "b", "c")
|
||||
self.assertRaises(TypeError, c_wchar * 3, b"a", b"b", b"c")
|
||||
|
||||
def test_c_char_p(self):
|
||||
c_char_p(b"foo bar")
|
||||
self.assertRaises(TypeError, c_char_p, "foo bar")
|
||||
|
||||
def test_c_wchar_p(self):
|
||||
c_wchar_p("foo bar")
|
||||
self.assertRaises(TypeError, c_wchar_p, b"foo bar")
|
||||
|
||||
def test_struct(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_char * 3)]
|
||||
|
||||
x = X(b"abc")
|
||||
self.assertRaises(TypeError, X, "abc")
|
||||
self.assertEqual(x.a, b"abc")
|
||||
self.assertEqual(type(x.a), bytes)
|
||||
|
||||
def test_struct_W(self):
|
||||
class X(Structure):
|
||||
_fields_ = [("a", c_wchar * 3)]
|
||||
|
||||
x = X("abc")
|
||||
self.assertRaises(TypeError, X, b"abc")
|
||||
self.assertEqual(x.a, "abc")
|
||||
self.assertEqual(type(x.a), str)
|
||||
|
||||
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
|
||||
def test_BSTR(self):
|
||||
from _ctypes import _SimpleCData
|
||||
class BSTR(_SimpleCData):
|
||||
_type_ = "X"
|
||||
|
||||
BSTR("abc")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
315
third_party/python/Lib/ctypes/test/test_byteswap.py
vendored
Normal file
315
third_party/python/Lib/ctypes/test/test_byteswap.py
vendored
Normal file
|
@ -0,0 +1,315 @@
|
|||
import sys, unittest, struct, math, ctypes
|
||||
from binascii import hexlify
|
||||
|
||||
from ctypes import *
|
||||
|
||||
def bin(s):
|
||||
return hexlify(memoryview(s)).decode().upper()
|
||||
|
||||
# Each *simple* type that supports different byte orders has an
|
||||
# __ctype_be__ attribute that specifies the same type in BIG ENDIAN
|
||||
# byte order, and a __ctype_le__ attribute that is the same type in
|
||||
# LITTLE ENDIAN byte order.
|
||||
#
|
||||
# For Structures and Unions, these types are created on demand.
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
@unittest.skip('test disabled')
|
||||
def test_X(self):
|
||||
print(sys.byteorder, file=sys.stderr)
|
||||
for i in range(32):
|
||||
bits = BITS()
|
||||
setattr(bits, "i%s" % i, 1)
|
||||
dump(bits)
|
||||
|
||||
def test_slots(self):
|
||||
class BigPoint(BigEndianStructure):
|
||||
__slots__ = ()
|
||||
_fields_ = [("x", c_int), ("y", c_int)]
|
||||
|
||||
class LowPoint(LittleEndianStructure):
|
||||
__slots__ = ()
|
||||
_fields_ = [("x", c_int), ("y", c_int)]
|
||||
|
||||
big = BigPoint()
|
||||
little = LowPoint()
|
||||
big.x = 4
|
||||
big.y = 2
|
||||
little.x = 2
|
||||
little.y = 4
|
||||
with self.assertRaises(AttributeError):
|
||||
big.z = 42
|
||||
with self.assertRaises(AttributeError):
|
||||
little.z = 24
|
||||
|
||||
def test_endian_short(self):
|
||||
if sys.byteorder == "little":
|
||||
self.assertIs(c_short.__ctype_le__, c_short)
|
||||
self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
|
||||
else:
|
||||
self.assertIs(c_short.__ctype_be__, c_short)
|
||||
self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
|
||||
s = c_short.__ctype_be__(0x1234)
|
||||
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
|
||||
self.assertEqual(bin(s), "1234")
|
||||
self.assertEqual(s.value, 0x1234)
|
||||
|
||||
s = c_short.__ctype_le__(0x1234)
|
||||
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
|
||||
self.assertEqual(bin(s), "3412")
|
||||
self.assertEqual(s.value, 0x1234)
|
||||
|
||||
s = c_ushort.__ctype_be__(0x1234)
|
||||
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
|
||||
self.assertEqual(bin(s), "1234")
|
||||
self.assertEqual(s.value, 0x1234)
|
||||
|
||||
s = c_ushort.__ctype_le__(0x1234)
|
||||
self.assertEqual(bin(struct.pack("<h", 0x1234)), "3412")
|
||||
self.assertEqual(bin(s), "3412")
|
||||
self.assertEqual(s.value, 0x1234)
|
||||
|
||||
def test_endian_int(self):
|
||||
if sys.byteorder == "little":
|
||||
self.assertIs(c_int.__ctype_le__, c_int)
|
||||
self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
|
||||
else:
|
||||
self.assertIs(c_int.__ctype_be__, c_int)
|
||||
self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
|
||||
|
||||
s = c_int.__ctype_be__(0x12345678)
|
||||
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
|
||||
self.assertEqual(bin(s), "12345678")
|
||||
self.assertEqual(s.value, 0x12345678)
|
||||
|
||||
s = c_int.__ctype_le__(0x12345678)
|
||||
self.assertEqual(bin(struct.pack("<i", 0x12345678)), "78563412")
|
||||
self.assertEqual(bin(s), "78563412")
|
||||
self.assertEqual(s.value, 0x12345678)
|
||||
|
||||
s = c_uint.__ctype_be__(0x12345678)
|
||||
self.assertEqual(bin(struct.pack(">I", 0x12345678)), "12345678")
|
||||
self.assertEqual(bin(s), "12345678")
|
||||
self.assertEqual(s.value, 0x12345678)
|
||||
|
||||
s = c_uint.__ctype_le__(0x12345678)
|
||||
self.assertEqual(bin(struct.pack("<I", 0x12345678)), "78563412")
|
||||
self.assertEqual(bin(s), "78563412")
|
||||
self.assertEqual(s.value, 0x12345678)
|
||||
|
||||
def test_endian_longlong(self):
|
||||
if sys.byteorder == "little":
|
||||
self.assertIs(c_longlong.__ctype_le__, c_longlong)
|
||||
self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
|
||||
else:
|
||||
self.assertIs(c_longlong.__ctype_be__, c_longlong)
|
||||
self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
|
||||
|
||||
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
|
||||
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
|
||||
self.assertEqual(bin(s), "1234567890ABCDEF")
|
||||
self.assertEqual(s.value, 0x1234567890ABCDEF)
|
||||
|
||||
s = c_longlong.__ctype_le__(0x1234567890ABCDEF)
|
||||
self.assertEqual(bin(struct.pack("<q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
|
||||
self.assertEqual(bin(s), "EFCDAB9078563412")
|
||||
self.assertEqual(s.value, 0x1234567890ABCDEF)
|
||||
|
||||
s = c_ulonglong.__ctype_be__(0x1234567890ABCDEF)
|
||||
self.assertEqual(bin(struct.pack(">Q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
|
||||
self.assertEqual(bin(s), "1234567890ABCDEF")
|
||||
self.assertEqual(s.value, 0x1234567890ABCDEF)
|
||||
|
||||
s = c_ulonglong.__ctype_le__(0x1234567890ABCDEF)
|
||||
self.assertEqual(bin(struct.pack("<Q", 0x1234567890ABCDEF)), "EFCDAB9078563412")
|
||||
self.assertEqual(bin(s), "EFCDAB9078563412")
|
||||
self.assertEqual(s.value, 0x1234567890ABCDEF)
|
||||
|
||||
def test_endian_float(self):
|
||||
if sys.byteorder == "little":
|
||||
self.assertIs(c_float.__ctype_le__, c_float)
|
||||
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
|
||||
else:
|
||||
self.assertIs(c_float.__ctype_be__, c_float)
|
||||
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
|
||||
s = c_float(math.pi)
|
||||
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
|
||||
# Hm, what's the precision of a float compared to a double?
|
||||
self.assertAlmostEqual(s.value, math.pi, places=6)
|
||||
s = c_float.__ctype_le__(math.pi)
|
||||
self.assertAlmostEqual(s.value, math.pi, places=6)
|
||||
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
|
||||
s = c_float.__ctype_be__(math.pi)
|
||||
self.assertAlmostEqual(s.value, math.pi, places=6)
|
||||
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
|
||||
|
||||
def test_endian_double(self):
|
||||
if sys.byteorder == "little":
|
||||
self.assertIs(c_double.__ctype_le__, c_double)
|
||||
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
|
||||
else:
|
||||
self.assertIs(c_double.__ctype_be__, c_double)
|
||||
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
|
||||
s = c_double(math.pi)
|
||||
self.assertEqual(s.value, math.pi)
|
||||
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
|
||||
s = c_double.__ctype_le__(math.pi)
|
||||
self.assertEqual(s.value, math.pi)
|
||||
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
|
||||
s = c_double.__ctype_be__(math.pi)
|
||||
self.assertEqual(s.value, math.pi)
|
||||
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
|
||||
|
||||
def test_endian_other(self):
|
||||
self.assertIs(c_byte.__ctype_le__, c_byte)
|
||||
self.assertIs(c_byte.__ctype_be__, c_byte)
|
||||
|
||||
self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
|
||||
self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
|
||||
|
||||
self.assertIs(c_char.__ctype_le__, c_char)
|
||||
self.assertIs(c_char.__ctype_be__, c_char)
|
||||
|
||||
def test_struct_fields_1(self):
|
||||
if sys.byteorder == "little":
|
||||
base = BigEndianStructure
|
||||
else:
|
||||
base = LittleEndianStructure
|
||||
|
||||
class T(base):
|
||||
pass
|
||||
_fields_ = [("a", c_ubyte),
|
||||
("b", c_byte),
|
||||
("c", c_short),
|
||||
("d", c_ushort),
|
||||
("e", c_int),
|
||||
("f", c_uint),
|
||||
("g", c_long),
|
||||
("h", c_ulong),
|
||||
("i", c_longlong),
|
||||
("k", c_ulonglong),
|
||||
("l", c_float),
|
||||
("m", c_double),
|
||||
("n", c_char),
|
||||
|
||||
("b1", c_byte, 3),
|
||||
("b2", c_byte, 3),
|
||||
("b3", c_byte, 2),
|
||||
("a", c_int * 3 * 3 * 3)]
|
||||
T._fields_ = _fields_
|
||||
|
||||
# these fields do not support different byte order:
|
||||
for typ in c_wchar, c_void_p, POINTER(c_int):
|
||||
_fields_.append(("x", typ))
|
||||
class T(base):
|
||||
pass
|
||||
self.assertRaises(TypeError, setattr, T, "_fields_", [("x", typ)])
|
||||
|
||||
def test_struct_struct(self):
|
||||
# nested structures with different byteorders
|
||||
|
||||
# create nested structures with given byteorders and set memory to data
|
||||
|
||||
for nested, data in (
|
||||
(BigEndianStructure, b'\0\0\0\1\0\0\0\2'),
|
||||
(LittleEndianStructure, b'\1\0\0\0\2\0\0\0'),
|
||||
):
|
||||
for parent in (
|
||||
BigEndianStructure,
|
||||
LittleEndianStructure,
|
||||
Structure,
|
||||
):
|
||||
class NestedStructure(nested):
|
||||
_fields_ = [("x", c_uint32),
|
||||
("y", c_uint32)]
|
||||
|
||||
class TestStructure(parent):
|
||||
_fields_ = [("point", NestedStructure)]
|
||||
|
||||
self.assertEqual(len(data), sizeof(TestStructure))
|
||||
ptr = POINTER(TestStructure)
|
||||
s = cast(data, ptr)[0]
|
||||
del ctypes._pointer_type_cache[TestStructure]
|
||||
self.assertEqual(s.point.x, 1)
|
||||
self.assertEqual(s.point.y, 2)
|
||||
|
||||
def test_struct_fields_2(self):
|
||||
# standard packing in struct uses no alignment.
|
||||
# So, we have to align using pad bytes.
|
||||
#
|
||||
# Unaligned accesses will crash Python (on those platforms that
|
||||
# don't allow it, like sparc solaris).
|
||||
if sys.byteorder == "little":
|
||||
base = BigEndianStructure
|
||||
fmt = ">bxhid"
|
||||
else:
|
||||
base = LittleEndianStructure
|
||||
fmt = "<bxhid"
|
||||
|
||||
class S(base):
|
||||
_fields_ = [("b", c_byte),
|
||||
("h", c_short),
|
||||
("i", c_int),
|
||||
("d", c_double)]
|
||||
|
||||
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
|
||||
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
|
||||
self.assertEqual(bin(s1), bin(s2))
|
||||
|
||||
def test_unaligned_nonnative_struct_fields(self):
|
||||
if sys.byteorder == "little":
|
||||
base = BigEndianStructure
|
||||
fmt = ">b h xi xd"
|
||||
else:
|
||||
base = LittleEndianStructure
|
||||
fmt = "<b h xi xd"
|
||||
|
||||
class S(base):
|
||||
_pack_ = 1
|
||||
_fields_ = [("b", c_byte),
|
||||
|
||||
("h", c_short),
|
||||
|
||||
("_1", c_byte),
|
||||
("i", c_int),
|
||||
|
||||
("_2", c_byte),
|
||||
("d", c_double)]
|
||||
|
||||
s1 = S()
|
||||
s1.b = 0x12
|
||||
s1.h = 0x1234
|
||||
s1.i = 0x12345678
|
||||
s1.d = 3.14
|
||||
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
|
||||
self.assertEqual(bin(s1), bin(s2))
|
||||
|
||||
def test_unaligned_native_struct_fields(self):
|
||||
if sys.byteorder == "little":
|
||||
fmt = "<b h xi xd"
|
||||
else:
|
||||
base = LittleEndianStructure
|
||||
fmt = ">b h xi xd"
|
||||
|
||||
class S(Structure):
|
||||
_pack_ = 1
|
||||
_fields_ = [("b", c_byte),
|
||||
|
||||
("h", c_short),
|
||||
|
||||
("_1", c_byte),
|
||||
("i", c_int),
|
||||
|
||||
("_2", c_byte),
|
||||
("d", c_double)]
|
||||
|
||||
s1 = S()
|
||||
s1.b = 0x12
|
||||
s1.h = 0x1234
|
||||
s1.i = 0x12345678
|
||||
s1.d = 3.14
|
||||
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
|
||||
self.assertEqual(bin(s1), bin(s2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
293
third_party/python/Lib/ctypes/test/test_callbacks.py
vendored
Normal file
293
third_party/python/Lib/ctypes/test/test_callbacks.py
vendored
Normal file
|
@ -0,0 +1,293 @@
|
|||
import functools
|
||||
import unittest
|
||||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
import _ctypes_test
|
||||
|
||||
class Callbacks(unittest.TestCase):
|
||||
functype = CFUNCTYPE
|
||||
|
||||
## def tearDown(self):
|
||||
## import gc
|
||||
## gc.collect()
|
||||
|
||||
def callback(self, *args):
|
||||
self.got_args = args
|
||||
return args[-1]
|
||||
|
||||
def check_type(self, typ, arg):
|
||||
PROTO = self.functype.__func__(typ, typ)
|
||||
result = PROTO(self.callback)(arg)
|
||||
if typ == c_float:
|
||||
self.assertAlmostEqual(result, arg, places=5)
|
||||
else:
|
||||
self.assertEqual(self.got_args, (arg,))
|
||||
self.assertEqual(result, arg)
|
||||
|
||||
PROTO = self.functype.__func__(typ, c_byte, typ)
|
||||
result = PROTO(self.callback)(-3, arg)
|
||||
if typ == c_float:
|
||||
self.assertAlmostEqual(result, arg, places=5)
|
||||
else:
|
||||
self.assertEqual(self.got_args, (-3, arg))
|
||||
self.assertEqual(result, arg)
|
||||
|
||||
################
|
||||
|
||||
def test_byte(self):
|
||||
self.check_type(c_byte, 42)
|
||||
self.check_type(c_byte, -42)
|
||||
|
||||
def test_ubyte(self):
|
||||
self.check_type(c_ubyte, 42)
|
||||
|
||||
def test_short(self):
|
||||
self.check_type(c_short, 42)
|
||||
self.check_type(c_short, -42)
|
||||
|
||||
def test_ushort(self):
|
||||
self.check_type(c_ushort, 42)
|
||||
|
||||
def test_int(self):
|
||||
self.check_type(c_int, 42)
|
||||
self.check_type(c_int, -42)
|
||||
|
||||
def test_uint(self):
|
||||
self.check_type(c_uint, 42)
|
||||
|
||||
def test_long(self):
|
||||
self.check_type(c_long, 42)
|
||||
self.check_type(c_long, -42)
|
||||
|
||||
def test_ulong(self):
|
||||
self.check_type(c_ulong, 42)
|
||||
|
||||
def test_longlong(self):
|
||||
self.check_type(c_longlong, 42)
|
||||
self.check_type(c_longlong, -42)
|
||||
|
||||
def test_ulonglong(self):
|
||||
self.check_type(c_ulonglong, 42)
|
||||
|
||||
def test_float(self):
|
||||
# only almost equal: double -> float -> double
|
||||
import math
|
||||
self.check_type(c_float, math.e)
|
||||
self.check_type(c_float, -math.e)
|
||||
|
||||
def test_double(self):
|
||||
self.check_type(c_double, 3.14)
|
||||
self.check_type(c_double, -3.14)
|
||||
|
||||
def test_longdouble(self):
|
||||
self.check_type(c_longdouble, 3.14)
|
||||
self.check_type(c_longdouble, -3.14)
|
||||
|
||||
def test_char(self):
|
||||
self.check_type(c_char, b"x")
|
||||
self.check_type(c_char, b"a")
|
||||
|
||||
# disabled: would now (correctly) raise a RuntimeWarning about
|
||||
# a memory leak. A callback function cannot return a non-integral
|
||||
# C type without causing a memory leak.
|
||||
@unittest.skip('test disabled')
|
||||
def test_char_p(self):
|
||||
self.check_type(c_char_p, "abc")
|
||||
self.check_type(c_char_p, "def")
|
||||
|
||||
def test_pyobject(self):
|
||||
o = ()
|
||||
from sys import getrefcount as grc
|
||||
for o in (), [], object():
|
||||
initial = grc(o)
|
||||
# This call leaks a reference to 'o'...
|
||||
self.check_type(py_object, o)
|
||||
before = grc(o)
|
||||
# ...but this call doesn't leak any more. Where is the refcount?
|
||||
self.check_type(py_object, o)
|
||||
after = grc(o)
|
||||
self.assertEqual((after, o), (before, o))
|
||||
|
||||
def test_unsupported_restype_1(self):
|
||||
# Only "fundamental" result types are supported for callback
|
||||
# functions, the type must have a non-NULL stgdict->setfunc.
|
||||
# POINTER(c_double), for example, is not supported.
|
||||
|
||||
prototype = self.functype.__func__(POINTER(c_double))
|
||||
# The type is checked when the prototype is called
|
||||
self.assertRaises(TypeError, prototype, lambda: None)
|
||||
|
||||
def test_unsupported_restype_2(self):
|
||||
prototype = self.functype.__func__(object)
|
||||
self.assertRaises(TypeError, prototype, lambda: None)
|
||||
|
||||
def test_issue_7959(self):
|
||||
proto = self.functype.__func__(None)
|
||||
|
||||
class X(object):
|
||||
def func(self): pass
|
||||
def __init__(self):
|
||||
self.v = proto(self.func)
|
||||
|
||||
import gc
|
||||
for i in range(32):
|
||||
X()
|
||||
gc.collect()
|
||||
live = [x for x in gc.get_objects()
|
||||
if isinstance(x, X)]
|
||||
self.assertEqual(len(live), 0)
|
||||
|
||||
def test_issue12483(self):
|
||||
import gc
|
||||
class Nasty:
|
||||
def __del__(self):
|
||||
gc.collect()
|
||||
CFUNCTYPE(None)(lambda x=Nasty(): None)
|
||||
|
||||
|
||||
@need_symbol('WINFUNCTYPE')
|
||||
class StdcallCallbacks(Callbacks):
|
||||
try:
|
||||
functype = WINFUNCTYPE
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
################################################################
|
||||
|
||||
class SampleCallbacksTestCase(unittest.TestCase):
|
||||
|
||||
def test_integrate(self):
|
||||
# Derived from some then non-working code, posted by David Foster
|
||||
dll = CDLL(_ctypes_test.__file__)
|
||||
|
||||
# The function prototype called by 'integrate': double func(double);
|
||||
CALLBACK = CFUNCTYPE(c_double, c_double)
|
||||
|
||||
# The integrate function itself, exposed from the _ctypes_test dll
|
||||
integrate = dll.integrate
|
||||
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
|
||||
integrate.restype = c_double
|
||||
|
||||
def func(x):
|
||||
return x**2
|
||||
|
||||
result = integrate(0.0, 1.0, CALLBACK(func), 10)
|
||||
diff = abs(result - 1./3.)
|
||||
|
||||
self.assertLess(diff, 0.01, "%s not less than 0.01" % diff)
|
||||
|
||||
def test_issue_8959_a(self):
|
||||
from ctypes.util import find_library
|
||||
libc_path = find_library("c")
|
||||
if not libc_path:
|
||||
self.skipTest('could not find libc')
|
||||
libc = CDLL(libc_path)
|
||||
|
||||
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
|
||||
def cmp_func(a, b):
|
||||
return a[0] - b[0]
|
||||
|
||||
array = (c_int * 5)(5, 1, 99, 7, 33)
|
||||
|
||||
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
|
||||
self.assertEqual(array[:], [1, 5, 7, 33, 99])
|
||||
|
||||
@need_symbol('WINFUNCTYPE')
|
||||
def test_issue_8959_b(self):
|
||||
from ctypes.wintypes import BOOL, HWND, LPARAM
|
||||
global windowCount
|
||||
windowCount = 0
|
||||
|
||||
@WINFUNCTYPE(BOOL, HWND, LPARAM)
|
||||
def EnumWindowsCallbackFunc(hwnd, lParam):
|
||||
global windowCount
|
||||
windowCount += 1
|
||||
return True #Allow windows to keep enumerating
|
||||
|
||||
windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
|
||||
|
||||
def test_callback_register_int(self):
|
||||
# Issue #8275: buggy handling of callback args under Win64
|
||||
# NOTE: should be run on release builds as well
|
||||
dll = CDLL(_ctypes_test.__file__)
|
||||
CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int)
|
||||
# All this function does is call the callback with its args squared
|
||||
func = dll._testfunc_cbk_reg_int
|
||||
func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK)
|
||||
func.restype = c_int
|
||||
|
||||
def callback(a, b, c, d, e):
|
||||
return a + b + c + d + e
|
||||
|
||||
result = func(2, 3, 4, 5, 6, CALLBACK(callback))
|
||||
self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6))
|
||||
|
||||
def test_callback_register_double(self):
|
||||
# Issue #8275: buggy handling of callback args under Win64
|
||||
# NOTE: should be run on release builds as well
|
||||
dll = CDLL(_ctypes_test.__file__)
|
||||
CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double,
|
||||
c_double, c_double)
|
||||
# All this function does is call the callback with its args squared
|
||||
func = dll._testfunc_cbk_reg_double
|
||||
func.argtypes = (c_double, c_double, c_double,
|
||||
c_double, c_double, CALLBACK)
|
||||
func.restype = c_double
|
||||
|
||||
def callback(a, b, c, d, e):
|
||||
return a + b + c + d + e
|
||||
|
||||
result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback))
|
||||
self.assertEqual(result,
|
||||
callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5))
|
||||
|
||||
def test_callback_large_struct(self):
|
||||
class Check: pass
|
||||
|
||||
# This should mirror the structure in Modules/_ctypes/_ctypes_test.c
|
||||
class X(Structure):
|
||||
_fields_ = [
|
||||
('first', c_ulong),
|
||||
('second', c_ulong),
|
||||
('third', c_ulong),
|
||||
]
|
||||
|
||||
def callback(check, s):
|
||||
check.first = s.first
|
||||
check.second = s.second
|
||||
check.third = s.third
|
||||
# See issue #29565.
|
||||
# The structure should be passed by value, so
|
||||
# any changes to it should not be reflected in
|
||||
# the value passed
|
||||
s.first = s.second = s.third = 0x0badf00d
|
||||
|
||||
check = Check()
|
||||
s = X()
|
||||
s.first = 0xdeadbeef
|
||||
s.second = 0xcafebabe
|
||||
s.third = 0x0bad1dea
|
||||
|
||||
CALLBACK = CFUNCTYPE(None, X)
|
||||
dll = CDLL(_ctypes_test.__file__)
|
||||
func = dll._testfunc_cbk_large_struct
|
||||
func.argtypes = (X, CALLBACK)
|
||||
func.restype = None
|
||||
# the function just calls the callback with the passed structure
|
||||
func(s, CALLBACK(functools.partial(callback, check)))
|
||||
self.assertEqual(check.first, s.first)
|
||||
self.assertEqual(check.second, s.second)
|
||||
self.assertEqual(check.third, s.third)
|
||||
self.assertEqual(check.first, 0xdeadbeef)
|
||||
self.assertEqual(check.second, 0xcafebabe)
|
||||
self.assertEqual(check.third, 0x0bad1dea)
|
||||
# See issue #29565.
|
||||
# Ensure that the original struct is unchanged.
|
||||
self.assertEqual(s.first, check.first)
|
||||
self.assertEqual(s.second, check.second)
|
||||
self.assertEqual(s.third, check.third)
|
||||
|
||||
################################################################
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
99
third_party/python/Lib/ctypes/test/test_cast.py
vendored
Normal file
99
third_party/python/Lib/ctypes/test/test_cast.py
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
||||
def test_array2pointer(self):
|
||||
array = (c_int * 3)(42, 17, 2)
|
||||
|
||||
# casting an array to a pointer works.
|
||||
ptr = cast(array, POINTER(c_int))
|
||||
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
|
||||
|
||||
if 2*sizeof(c_short) == sizeof(c_int):
|
||||
ptr = cast(array, POINTER(c_short))
|
||||
if sys.byteorder == "little":
|
||||
self.assertEqual([ptr[i] for i in range(6)],
|
||||
[42, 0, 17, 0, 2, 0])
|
||||
else:
|
||||
self.assertEqual([ptr[i] for i in range(6)],
|
||||
[0, 42, 0, 17, 0, 2])
|
||||
|
||||
def test_address2pointer(self):
|
||||
array = (c_int * 3)(42, 17, 2)
|
||||
|
||||
address = addressof(array)
|
||||
ptr = cast(c_void_p(address), POINTER(c_int))
|
||||
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
|
||||
|
||||
ptr = cast(address, POINTER(c_int))
|
||||
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
|
||||
|
||||
def test_p2a_objects(self):
|
||||
array = (c_char_p * 5)()
|
||||
self.assertEqual(array._objects, None)
|
||||
array[0] = b"foo bar"
|
||||
self.assertEqual(array._objects, {'0': b"foo bar"})
|
||||
|
||||
p = cast(array, POINTER(c_char_p))
|
||||
# array and p share a common _objects attribute
|
||||
self.assertIs(p._objects, array._objects)
|
||||
self.assertEqual(array._objects, {'0': b"foo bar", id(array): array})
|
||||
p[0] = b"spam spam"
|
||||
self.assertEqual(p._objects, {'0': b"spam spam", id(array): array})
|
||||
self.assertIs(array._objects, p._objects)
|
||||
p[1] = b"foo bar"
|
||||
self.assertEqual(p._objects, {'1': b'foo bar', '0': b"spam spam", id(array): array})
|
||||
self.assertIs(array._objects, p._objects)
|
||||
|
||||
def test_other(self):
|
||||
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
|
||||
self.assertEqual(p[:4], [1,2, 3, 4])
|
||||
self.assertEqual(p[:4:], [1, 2, 3, 4])
|
||||
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
|
||||
self.assertEqual(p[:4:3], [1, 4])
|
||||
c_int()
|
||||
self.assertEqual(p[:4], [1, 2, 3, 4])
|
||||
self.assertEqual(p[:4:], [1, 2, 3, 4])
|
||||
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
|
||||
self.assertEqual(p[:4:3], [1, 4])
|
||||
p[2] = 96
|
||||
self.assertEqual(p[:4], [1, 2, 96, 4])
|
||||
self.assertEqual(p[:4:], [1, 2, 96, 4])
|
||||
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
|
||||
self.assertEqual(p[:4:3], [1, 4])
|
||||
c_int()
|
||||
self.assertEqual(p[:4], [1, 2, 96, 4])
|
||||
self.assertEqual(p[:4:], [1, 2, 96, 4])
|
||||
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
|
||||
self.assertEqual(p[:4:3], [1, 4])
|
||||
|
||||
def test_char_p(self):
|
||||
# This didn't work: bad argument to internal function
|
||||
s = c_char_p(b"hiho")
|
||||
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
|
||||
b"hiho")
|
||||
|
||||
@need_symbol('c_wchar_p')
|
||||
def test_wchar_p(self):
|
||||
s = c_wchar_p("hiho")
|
||||
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
|
||||
"hiho")
|
||||
|
||||
def test_bad_type_arg(self):
|
||||
# The type argument must be a ctypes pointer type.
|
||||
array_type = c_byte * sizeof(c_int)
|
||||
array = array_type()
|
||||
self.assertRaises(TypeError, cast, array, None)
|
||||
self.assertRaises(TypeError, cast, array, array_type)
|
||||
class Struct(Structure):
|
||||
_fields_ = [("a", c_int)]
|
||||
self.assertRaises(TypeError, cast, array, Struct)
|
||||
class MyUnion(Union):
|
||||
_fields_ = [("a", c_int)]
|
||||
self.assertRaises(TypeError, cast, array, MyUnion)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
212
third_party/python/Lib/ctypes/test/test_cfuncs.py
vendored
Normal file
212
third_party/python/Lib/ctypes/test/test_cfuncs.py
vendored
Normal file
|
@ -0,0 +1,212 @@
|
|||
# A lot of failures in these tests on Mac OS X.
|
||||
# Byte order related?
|
||||
|
||||
import unittest
|
||||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
|
||||
import _ctypes_test
|
||||
|
||||
class CFunctions(unittest.TestCase):
|
||||
_dll = CDLL(_ctypes_test.__file__)
|
||||
|
||||
def S(self):
|
||||
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
|
||||
def U(self):
|
||||
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
|
||||
|
||||
def test_byte(self):
|
||||
self._dll.tf_b.restype = c_byte
|
||||
self._dll.tf_b.argtypes = (c_byte,)
|
||||
self.assertEqual(self._dll.tf_b(-126), -42)
|
||||
self.assertEqual(self.S(), -126)
|
||||
|
||||
def test_byte_plus(self):
|
||||
self._dll.tf_bb.restype = c_byte
|
||||
self._dll.tf_bb.argtypes = (c_byte, c_byte)
|
||||
self.assertEqual(self._dll.tf_bb(0, -126), -42)
|
||||
self.assertEqual(self.S(), -126)
|
||||
|
||||
def test_ubyte(self):
|
||||
self._dll.tf_B.restype = c_ubyte
|
||||
self._dll.tf_B.argtypes = (c_ubyte,)
|
||||
self.assertEqual(self._dll.tf_B(255), 85)
|
||||
self.assertEqual(self.U(), 255)
|
||||
|
||||
def test_ubyte_plus(self):
|
||||
self._dll.tf_bB.restype = c_ubyte
|
||||
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
|
||||
self.assertEqual(self._dll.tf_bB(0, 255), 85)
|
||||
self.assertEqual(self.U(), 255)
|
||||
|
||||
def test_short(self):
|
||||
self._dll.tf_h.restype = c_short
|
||||
self._dll.tf_h.argtypes = (c_short,)
|
||||
self.assertEqual(self._dll.tf_h(-32766), -10922)
|
||||
self.assertEqual(self.S(), -32766)
|
||||
|
||||
def test_short_plus(self):
|
||||
self._dll.tf_bh.restype = c_short
|
||||
self._dll.tf_bh.argtypes = (c_byte, c_short)
|
||||
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
|
||||
self.assertEqual(self.S(), -32766)
|
||||
|
||||
def test_ushort(self):
|
||||
self._dll.tf_H.restype = c_ushort
|
||||
self._dll.tf_H.argtypes = (c_ushort,)
|
||||
self.assertEqual(self._dll.tf_H(65535), 21845)
|
||||
self.assertEqual(self.U(), 65535)
|
||||
|
||||
def test_ushort_plus(self):
|
||||
self._dll.tf_bH.restype = c_ushort
|
||||
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
|
||||
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
|
||||
self.assertEqual(self.U(), 65535)
|
||||
|
||||
def test_int(self):
|
||||
self._dll.tf_i.restype = c_int
|
||||
self._dll.tf_i.argtypes = (c_int,)
|
||||
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
|
||||
self.assertEqual(self.S(), -2147483646)
|
||||
|
||||
def test_int_plus(self):
|
||||
self._dll.tf_bi.restype = c_int
|
||||
self._dll.tf_bi.argtypes = (c_byte, c_int)
|
||||
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
|
||||
self.assertEqual(self.S(), -2147483646)
|
||||
|
||||
def test_uint(self):
|
||||
self._dll.tf_I.restype = c_uint
|
||||
self._dll.tf_I.argtypes = (c_uint,)
|
||||
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
|
||||
self.assertEqual(self.U(), 4294967295)
|
||||
|
||||
def test_uint_plus(self):
|
||||
self._dll.tf_bI.restype = c_uint
|
||||
self._dll.tf_bI.argtypes = (c_byte, c_uint)
|
||||
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
|
||||
self.assertEqual(self.U(), 4294967295)
|
||||
|
||||
def test_long(self):
|
||||
self._dll.tf_l.restype = c_long
|
||||
self._dll.tf_l.argtypes = (c_long,)
|
||||
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
|
||||
self.assertEqual(self.S(), -2147483646)
|
||||
|
||||
def test_long_plus(self):
|
||||
self._dll.tf_bl.restype = c_long
|
||||
self._dll.tf_bl.argtypes = (c_byte, c_long)
|
||||
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
|
||||
self.assertEqual(self.S(), -2147483646)
|
||||
|
||||
def test_ulong(self):
|
||||
self._dll.tf_L.restype = c_ulong
|
||||
self._dll.tf_L.argtypes = (c_ulong,)
|
||||
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
|
||||
self.assertEqual(self.U(), 4294967295)
|
||||
|
||||
def test_ulong_plus(self):
|
||||
self._dll.tf_bL.restype = c_ulong
|
||||
self._dll.tf_bL.argtypes = (c_char, c_ulong)
|
||||
self.assertEqual(self._dll.tf_bL(b' ', 4294967295), 1431655765)
|
||||
self.assertEqual(self.U(), 4294967295)
|
||||
|
||||
def test_longlong(self):
|
||||
self._dll.tf_q.restype = c_longlong
|
||||
self._dll.tf_q.argtypes = (c_longlong, )
|
||||
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
|
||||
self.assertEqual(self.S(), -9223372036854775806)
|
||||
|
||||
def test_longlong_plus(self):
|
||||
self._dll.tf_bq.restype = c_longlong
|
||||
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
|
||||
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
|
||||
self.assertEqual(self.S(), -9223372036854775806)
|
||||
|
||||
def test_ulonglong(self):
|
||||
self._dll.tf_Q.restype = c_ulonglong
|
||||
self._dll.tf_Q.argtypes = (c_ulonglong, )
|
||||
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
|
||||
self.assertEqual(self.U(), 18446744073709551615)
|
||||
|
||||
def test_ulonglong_plus(self):
|
||||
self._dll.tf_bQ.restype = c_ulonglong
|
||||
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
|
||||
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
|
||||
self.assertEqual(self.U(), 18446744073709551615)
|
||||
|
||||
def test_float(self):
|
||||
self._dll.tf_f.restype = c_float
|
||||
self._dll.tf_f.argtypes = (c_float,)
|
||||
self.assertEqual(self._dll.tf_f(-42.), -14.)
|
||||
self.assertEqual(self.S(), -42)
|
||||
|
||||
def test_float_plus(self):
|
||||
self._dll.tf_bf.restype = c_float
|
||||
self._dll.tf_bf.argtypes = (c_byte, c_float)
|
||||
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
|
||||
self.assertEqual(self.S(), -42)
|
||||
|
||||
def test_double(self):
|
||||
self._dll.tf_d.restype = c_double
|
||||
self._dll.tf_d.argtypes = (c_double,)
|
||||
self.assertEqual(self._dll.tf_d(42.), 14.)
|
||||
self.assertEqual(self.S(), 42)
|
||||
|
||||
def test_double_plus(self):
|
||||
self._dll.tf_bd.restype = c_double
|
||||
self._dll.tf_bd.argtypes = (c_byte, c_double)
|
||||
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
|
||||
self.assertEqual(self.S(), 42)
|
||||
|
||||
def test_longdouble(self):
|
||||
self._dll.tf_D.restype = c_longdouble
|
||||
self._dll.tf_D.argtypes = (c_longdouble,)
|
||||
self.assertEqual(self._dll.tf_D(42.), 14.)
|
||||
self.assertEqual(self.S(), 42)
|
||||
|
||||
def test_longdouble_plus(self):
|
||||
self._dll.tf_bD.restype = c_longdouble
|
||||
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
|
||||
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
|
||||
self.assertEqual(self.S(), 42)
|
||||
|
||||
def test_callwithresult(self):
|
||||
def process_result(result):
|
||||
return result * 2
|
||||
self._dll.tf_i.restype = process_result
|
||||
self._dll.tf_i.argtypes = (c_int,)
|
||||
self.assertEqual(self._dll.tf_i(42), 28)
|
||||
self.assertEqual(self.S(), 42)
|
||||
self.assertEqual(self._dll.tf_i(-42), -28)
|
||||
self.assertEqual(self.S(), -42)
|
||||
|
||||
def test_void(self):
|
||||
self._dll.tv_i.restype = None
|
||||
self._dll.tv_i.argtypes = (c_int,)
|
||||
self.assertEqual(self._dll.tv_i(42), None)
|
||||
self.assertEqual(self.S(), 42)
|
||||
self.assertEqual(self._dll.tv_i(-42), None)
|
||||
self.assertEqual(self.S(), -42)
|
||||
|
||||
# The following repeats the above tests with stdcall functions (where
|
||||
# they are available)
|
||||
try:
|
||||
WinDLL
|
||||
except NameError:
|
||||
def stdcall_dll(*_): pass
|
||||
else:
|
||||
class stdcall_dll(WinDLL):
|
||||
def __getattr__(self, name):
|
||||
if name[:2] == '__' and name[-2:] == '__':
|
||||
raise AttributeError(name)
|
||||
func = self._FuncPtr(("s_" + name, self))
|
||||
setattr(self, name, func)
|
||||
return func
|
||||
|
||||
@need_symbol('WinDLL')
|
||||
class stdcallCFunctions(CFunctions):
|
||||
_dll = stdcall_dll(_ctypes_test.__file__)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
36
third_party/python/Lib/ctypes/test/test_checkretval.py
vendored
Normal file
36
third_party/python/Lib/ctypes/test/test_checkretval.py
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
import unittest
|
||||
|
||||
from ctypes import *
|
||||
from ctypes.test import need_symbol
|
||||
|
||||
class CHECKED(c_int):
|
||||
def _check_retval_(value):
|
||||
# Receives a CHECKED instance.
|
||||
return str(value.value)
|
||||
_check_retval_ = staticmethod(_check_retval_)
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
||||
def test_checkretval(self):
|
||||
|
||||
import _ctypes_test
|
||||
dll = CDLL(_ctypes_test.__file__)
|
||||
self.assertEqual(42, dll._testfunc_p_p(42))
|
||||
|
||||
dll._testfunc_p_p.restype = CHECKED
|
||||
self.assertEqual("42", dll._testfunc_p_p(42))
|
||||
|
||||
dll._testfunc_p_p.restype = None
|
||||
self.assertEqual(None, dll._testfunc_p_p(42))
|
||||
|
||||
del dll._testfunc_p_p.restype
|
||||
self.assertEqual(42, dll._testfunc_p_p(42))
|
||||
|
||||
@need_symbol('oledll')
|
||||
def test_oledll(self):
|
||||
self.assertRaises(OSError,
|
||||
oledll.oleaut32.CreateTypeLib2,
|
||||
0, None, None)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
21
third_party/python/Lib/ctypes/test/test_delattr.py
vendored
Normal file
21
third_party/python/Lib/ctypes/test/test_delattr.py
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
import unittest
|
||||
from ctypes import *
|
||||
|
||||
class X(Structure):
|
||||
_fields_ = [("foo", c_int)]
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
def test_simple(self):
|
||||
self.assertRaises(TypeError,
|
||||
delattr, c_int(42), "value")
|
||||
|
||||
def test_chararray(self):
|
||||
self.assertRaises(TypeError,
|
||||
delattr, (c_char * 5)(), "value")
|
||||
|
||||
def test_struct(self):
|
||||
self.assertRaises(TypeError,
|
||||
delattr, X(), "foo")
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue