python-3.6.zip added from Github

README.cosmo contains the necessary links.
This commit is contained in:
ahgamut 2021-08-08 09:38:33 +05:30 committed by Justine Tunney
parent 75fc601ff5
commit 0c4c56ff39
4219 changed files with 1968626 additions and 0 deletions

8100
third_party/python/Python/Python-ast.c generated vendored Normal file

File diff suppressed because it is too large Load diff

1
third_party/python/Python/README vendored Normal file
View file

@ -0,0 +1 @@
Miscellaneous source files for the main Python shared library

1295
third_party/python/Python/_warnings.c vendored Normal file

File diff suppressed because it is too large Load diff

64
third_party/python/Python/asdl.c vendored Normal file
View file

@ -0,0 +1,64 @@
#include "Python.h"
#include "asdl.h"
asdl_seq *
_Py_asdl_seq_new(Py_ssize_t size, PyArena *arena)
{
asdl_seq *seq = NULL;
size_t n;
/* check size is sane */
if (size < 0 ||
(size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) {
PyErr_NoMemory();
return NULL;
}
n = (size ? (sizeof(void *) * (size - 1)) : 0);
/* check if size can be added safely */
if (n > SIZE_MAX - sizeof(asdl_seq)) {
PyErr_NoMemory();
return NULL;
}
n += sizeof(asdl_seq);
seq = (asdl_seq *)PyArena_Malloc(arena, n);
if (!seq) {
PyErr_NoMemory();
return NULL;
}
memset(seq, 0, n);
seq->size = size;
return seq;
}
asdl_int_seq *
_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena)
{
asdl_int_seq *seq = NULL;
size_t n;
/* check size is sane */
if (size < 0 ||
(size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) {
PyErr_NoMemory();
return NULL;
}
n = (size ? (sizeof(void *) * (size - 1)) : 0);
/* check if size can be added safely */
if (n > SIZE_MAX - sizeof(asdl_seq)) {
PyErr_NoMemory();
return NULL;
}
n += sizeof(asdl_seq);
seq = (asdl_int_seq *)PyArena_Malloc(arena, n);
if (!seq) {
PyErr_NoMemory();
return NULL;
}
memset(seq, 0, n);
seq->size = size;
return seq;
}

5284
third_party/python/Python/ast.c vendored Normal file

File diff suppressed because it is too large Load diff

2776
third_party/python/Python/bltinmodule.c vendored Normal file

File diff suppressed because it is too large Load diff

5621
third_party/python/Python/ceval.c vendored Normal file

File diff suppressed because it is too large Load diff

270
third_party/python/Python/ceval_gil.h vendored Normal file
View file

@ -0,0 +1,270 @@
/*
* Implementation of the Global Interpreter Lock (GIL).
*/
#include <stdlib.h>
#include <errno.h>
/* First some general settings */
/* microseconds (the Python API uses seconds, though) */
#define DEFAULT_INTERVAL 5000
static unsigned long gil_interval = DEFAULT_INTERVAL;
#define INTERVAL (gil_interval >= 1 ? gil_interval : 1)
/* Enable if you want to force the switching of threads at least every `gil_interval` */
#undef FORCE_SWITCHING
#define FORCE_SWITCHING
/*
Notes about the implementation:
- The GIL is just a boolean variable (gil_locked) whose access is protected
by a mutex (gil_mutex), and whose changes are signalled by a condition
variable (gil_cond). gil_mutex is taken for short periods of time,
and therefore mostly uncontended.
- In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
able to release the GIL on demand by another thread. A volatile boolean
variable (gil_drop_request) is used for that purpose, which is checked
at every turn of the eval loop. That variable is set after a wait of
`interval` microseconds on `gil_cond` has timed out.
[Actually, another volatile boolean variable (eval_breaker) is used
which ORs several conditions into one. Volatile booleans are
sufficient as inter-thread signalling means since Python is run
on cache-coherent architectures only.]
- A thread wanting to take the GIL will first let pass a given amount of
time (`interval` microseconds) before setting gil_drop_request. This
encourages a defined switching period, but doesn't enforce it since
opcodes can take an arbitrary time to execute.
The `interval` value is available for the user to read and modify
using the Python API `sys.{get,set}switchinterval()`.
- When a thread releases the GIL and gil_drop_request is set, that thread
ensures that another GIL-awaiting thread gets scheduled.
It does so by waiting on a condition variable (switch_cond) until
the value of gil_last_holder is changed to something else than its
own thread state pointer, indicating that another thread was able to
take the GIL.
This is meant to prohibit the latency-adverse behaviour on multi-core
machines where one thread would speculatively release the GIL, but still
run and end up being the first to re-acquire it, making the "timeslices"
much longer than expected.
(Note: this mechanism is enabled with FORCE_SWITCHING above)
*/
#include "condvar.h"
#ifndef Py_HAVE_CONDVAR
#error You need either a POSIX-compatible or a Windows system!
#endif
#define MUTEX_T PyMUTEX_T
#define MUTEX_INIT(mut) \
if (PyMUTEX_INIT(&(mut))) { \
Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
#define MUTEX_FINI(mut) \
if (PyMUTEX_FINI(&(mut))) { \
Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
#define MUTEX_LOCK(mut) \
if (PyMUTEX_LOCK(&(mut))) { \
Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
#define MUTEX_UNLOCK(mut) \
if (PyMUTEX_UNLOCK(&(mut))) { \
Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
#define COND_T PyCOND_T
#define COND_INIT(cond) \
if (PyCOND_INIT(&(cond))) { \
Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
#define COND_FINI(cond) \
if (PyCOND_FINI(&(cond))) { \
Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
#define COND_SIGNAL(cond) \
if (PyCOND_SIGNAL(&(cond))) { \
Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
#define COND_WAIT(cond, mut) \
if (PyCOND_WAIT(&(cond), &(mut))) { \
Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
{ \
int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
if (r < 0) \
Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
timeout_result = 1; \
else \
timeout_result = 0; \
} \
/* Whether the GIL is already taken (-1 if uninitialized). This is atomic
because it can be read without any lock taken in ceval.c. */
static _Py_atomic_int gil_locked = {-1};
/* Number of GIL switches since the beginning. */
static unsigned long gil_switch_number = 0;
/* Last PyThreadState holding / having held the GIL. This helps us know
whether anyone else was scheduled after we dropped the GIL. */
static _Py_atomic_address gil_last_holder = {0};
/* This condition variable allows one or several threads to wait until
the GIL is released. In addition, the mutex also protects the above
variables. */
static COND_T gil_cond;
static MUTEX_T gil_mutex;
#ifdef FORCE_SWITCHING
/* This condition variable helps the GIL-releasing thread wait for
a GIL-awaiting thread to be scheduled and take the GIL. */
static COND_T switch_cond;
static MUTEX_T switch_mutex;
#endif
static int gil_created(void)
{
return _Py_atomic_load_explicit(&gil_locked, _Py_memory_order_acquire) >= 0;
}
static void create_gil(void)
{
MUTEX_INIT(gil_mutex);
#ifdef FORCE_SWITCHING
MUTEX_INIT(switch_mutex);
#endif
COND_INIT(gil_cond);
#ifdef FORCE_SWITCHING
COND_INIT(switch_cond);
#endif
_Py_atomic_store_relaxed(&gil_last_holder, 0);
_Py_ANNOTATE_RWLOCK_CREATE(&gil_locked);
_Py_atomic_store_explicit(&gil_locked, 0, _Py_memory_order_release);
}
static void destroy_gil(void)
{
/* some pthread-like implementations tie the mutex to the cond
* and must have the cond destroyed first.
*/
COND_FINI(gil_cond);
MUTEX_FINI(gil_mutex);
#ifdef FORCE_SWITCHING
COND_FINI(switch_cond);
MUTEX_FINI(switch_mutex);
#endif
_Py_atomic_store_explicit(&gil_locked, -1, _Py_memory_order_release);
_Py_ANNOTATE_RWLOCK_DESTROY(&gil_locked);
}
static void recreate_gil(void)
{
_Py_ANNOTATE_RWLOCK_DESTROY(&gil_locked);
/* XXX should we destroy the old OS resources here? */
create_gil();
}
static void drop_gil(PyThreadState *tstate)
{
if (!_Py_atomic_load_relaxed(&gil_locked))
Py_FatalError("drop_gil: GIL is not locked");
/* tstate is allowed to be NULL (early interpreter init) */
if (tstate != NULL) {
/* Sub-interpreter support: threads might have been switched
under our feet using PyThreadState_Swap(). Fix the GIL last
holder variable so that our heuristics work. */
_Py_atomic_store_relaxed(&gil_last_holder, (uintptr_t)tstate);
}
MUTEX_LOCK(gil_mutex);
_Py_ANNOTATE_RWLOCK_RELEASED(&gil_locked, /*is_write=*/1);
_Py_atomic_store_relaxed(&gil_locked, 0);
COND_SIGNAL(gil_cond);
MUTEX_UNLOCK(gil_mutex);
#ifdef FORCE_SWITCHING
if (_Py_atomic_load_relaxed(&gil_drop_request) && tstate != NULL) {
MUTEX_LOCK(switch_mutex);
/* Not switched yet => wait */
if ((PyThreadState*)_Py_atomic_load_relaxed(&gil_last_holder) == tstate) {
RESET_GIL_DROP_REQUEST();
/* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take
the GIL and drop it again, and reset the condition
before we even had a chance to wait for it. */
COND_WAIT(switch_cond, switch_mutex);
}
MUTEX_UNLOCK(switch_mutex);
}
#endif
}
static void take_gil(PyThreadState *tstate)
{
int err;
if (tstate == NULL)
Py_FatalError("take_gil: NULL tstate");
err = errno;
MUTEX_LOCK(gil_mutex);
if (!_Py_atomic_load_relaxed(&gil_locked))
goto _ready;
while (_Py_atomic_load_relaxed(&gil_locked)) {
int timed_out = 0;
unsigned long saved_switchnum;
saved_switchnum = gil_switch_number;
COND_TIMED_WAIT(gil_cond, gil_mutex, INTERVAL, timed_out);
/* If we timed out and no switch occurred in the meantime, it is time
to ask the GIL-holding thread to drop it. */
if (timed_out &&
_Py_atomic_load_relaxed(&gil_locked) &&
gil_switch_number == saved_switchnum) {
SET_GIL_DROP_REQUEST();
}
}
_ready:
#ifdef FORCE_SWITCHING
/* This mutex must be taken before modifying gil_last_holder (see drop_gil()). */
MUTEX_LOCK(switch_mutex);
#endif
/* We now hold the GIL */
_Py_atomic_store_relaxed(&gil_locked, 1);
_Py_ANNOTATE_RWLOCK_ACQUIRED(&gil_locked, /*is_write=*/1);
if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil_last_holder)) {
_Py_atomic_store_relaxed(&gil_last_holder, (uintptr_t)tstate);
++gil_switch_number;
}
#ifdef FORCE_SWITCHING
COND_SIGNAL(switch_cond);
MUTEX_UNLOCK(switch_mutex);
#endif
if (_Py_atomic_load_relaxed(&gil_drop_request)) {
RESET_GIL_DROP_REQUEST();
}
if (tstate->async_exc != NULL) {
_PyEval_SignalAsyncExc();
}
MUTEX_UNLOCK(gil_mutex);
errno = err;
}
void _PyEval_SetSwitchInterval(unsigned long microseconds)
{
gil_interval = microseconds;
}
unsigned long _PyEval_GetSwitchInterval()
{
return gil_interval;
}

679
third_party/python/Python/clinic/bltinmodule.c.h generated vendored Normal file
View file

@ -0,0 +1,679 @@
/*[clinic input]
preserve
[clinic start generated code]*/
PyDoc_STRVAR(builtin_abs__doc__,
"abs($module, x, /)\n"
"--\n"
"\n"
"Return the absolute value of the argument.");
#define BUILTIN_ABS_METHODDEF \
{"abs", (PyCFunction)builtin_abs, METH_O, builtin_abs__doc__},
PyDoc_STRVAR(builtin_all__doc__,
"all($module, iterable, /)\n"
"--\n"
"\n"
"Return True if bool(x) is True for all values x in the iterable.\n"
"\n"
"If the iterable is empty, return True.");
#define BUILTIN_ALL_METHODDEF \
{"all", (PyCFunction)builtin_all, METH_O, builtin_all__doc__},
PyDoc_STRVAR(builtin_any__doc__,
"any($module, iterable, /)\n"
"--\n"
"\n"
"Return True if bool(x) is True for any x in the iterable.\n"
"\n"
"If the iterable is empty, return False.");
#define BUILTIN_ANY_METHODDEF \
{"any", (PyCFunction)builtin_any, METH_O, builtin_any__doc__},
PyDoc_STRVAR(builtin_ascii__doc__,
"ascii($module, obj, /)\n"
"--\n"
"\n"
"Return an ASCII-only representation of an object.\n"
"\n"
"As repr(), return a string containing a printable representation of an\n"
"object, but escape the non-ASCII characters in the string returned by\n"
"repr() using \\\\x, \\\\u or \\\\U escapes. This generates a string similar\n"
"to that returned by repr() in Python 2.");
#define BUILTIN_ASCII_METHODDEF \
{"ascii", (PyCFunction)builtin_ascii, METH_O, builtin_ascii__doc__},
PyDoc_STRVAR(builtin_bin__doc__,
"bin($module, number, /)\n"
"--\n"
"\n"
"Return the binary representation of an integer.\n"
"\n"
" >>> bin(2796202)\n"
" \'0b1010101010101010101010\'");
#define BUILTIN_BIN_METHODDEF \
{"bin", (PyCFunction)builtin_bin, METH_O, builtin_bin__doc__},
PyDoc_STRVAR(builtin_callable__doc__,
"callable($module, obj, /)\n"
"--\n"
"\n"
"Return whether the object is callable (i.e., some kind of function).\n"
"\n"
"Note that classes are callable, as are instances of classes with a\n"
"__call__() method.");
#define BUILTIN_CALLABLE_METHODDEF \
{"callable", (PyCFunction)builtin_callable, METH_O, builtin_callable__doc__},
PyDoc_STRVAR(builtin_format__doc__,
"format($module, value, format_spec=\'\', /)\n"
"--\n"
"\n"
"Return value.__format__(format_spec)\n"
"\n"
"format_spec defaults to the empty string.\n"
"See the Format Specification Mini-Language section of help(\'FORMATTING\') for\n"
"details.");
#define BUILTIN_FORMAT_METHODDEF \
{"format", (PyCFunction)builtin_format, METH_VARARGS, builtin_format__doc__},
static PyObject *
builtin_format_impl(PyObject *module, PyObject *value, PyObject *format_spec);
static PyObject *
builtin_format(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *value;
PyObject *format_spec = NULL;
if (!PyArg_ParseTuple(args, "O|U:format",
&value, &format_spec)) {
goto exit;
}
return_value = builtin_format_impl(module, value, format_spec);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_chr__doc__,
"chr($module, i, /)\n"
"--\n"
"\n"
"Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.");
#define BUILTIN_CHR_METHODDEF \
{"chr", (PyCFunction)builtin_chr, METH_O, builtin_chr__doc__},
static PyObject *
builtin_chr_impl(PyObject *module, int i);
static PyObject *
builtin_chr(PyObject *module, PyObject *arg)
{
PyObject *return_value = NULL;
int i;
if (!PyArg_Parse(arg, "i:chr", &i)) {
goto exit;
}
return_value = builtin_chr_impl(module, i);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_compile__doc__,
"compile($module, /, source, filename, mode, flags=0,\n"
" dont_inherit=False, optimize=-1)\n"
"--\n"
"\n"
"Compile source into a code object that can be executed by exec() or eval().\n"
"\n"
"The source code may represent a Python module, statement or expression.\n"
"The filename will be used for run-time error messages.\n"
"The mode must be \'exec\' to compile a module, \'single\' to compile a\n"
"single (interactive) statement, or \'eval\' to compile an expression.\n"
"The flags argument, if present, controls which future statements influence\n"
"the compilation of the code.\n"
"The dont_inherit argument, if true, stops the compilation inheriting\n"
"the effects of any future statements in effect in the code calling\n"
"compile; if absent or false these statements do influence the compilation,\n"
"in addition to any features explicitly specified.");
#define BUILTIN_COMPILE_METHODDEF \
{"compile", (PyCFunction)builtin_compile, METH_FASTCALL, builtin_compile__doc__},
static PyObject *
builtin_compile_impl(PyObject *module, PyObject *source, PyObject *filename,
const char *mode, int flags, int dont_inherit,
int optimize);
static PyObject *
builtin_compile(PyObject *module, PyObject **args, Py_ssize_t nargs, PyObject *kwnames)
{
PyObject *return_value = NULL;
static const char * const _keywords[] = {"source", "filename", "mode", "flags", "dont_inherit", "optimize", NULL};
static _PyArg_Parser _parser = {"OO&s|iii:compile", _keywords, 0};
PyObject *source;
PyObject *filename;
const char *mode;
int flags = 0;
int dont_inherit = 0;
int optimize = -1;
if (!_PyArg_ParseStack(args, nargs, kwnames, &_parser,
&source, PyUnicode_FSDecoder, &filename, &mode, &flags, &dont_inherit, &optimize)) {
goto exit;
}
return_value = builtin_compile_impl(module, source, filename, mode, flags, dont_inherit, optimize);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_divmod__doc__,
"divmod($module, x, y, /)\n"
"--\n"
"\n"
"Return the tuple (x//y, x%y). Invariant: div*y + mod == x.");
#define BUILTIN_DIVMOD_METHODDEF \
{"divmod", (PyCFunction)builtin_divmod, METH_VARARGS, builtin_divmod__doc__},
static PyObject *
builtin_divmod_impl(PyObject *module, PyObject *x, PyObject *y);
static PyObject *
builtin_divmod(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *x;
PyObject *y;
if (!PyArg_UnpackTuple(args, "divmod",
2, 2,
&x, &y)) {
goto exit;
}
return_value = builtin_divmod_impl(module, x, y);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_eval__doc__,
"eval($module, source, globals=None, locals=None, /)\n"
"--\n"
"\n"
"Evaluate the given source in the context of globals and locals.\n"
"\n"
"The source may be a string representing a Python expression\n"
"or a code object as returned by compile().\n"
"The globals must be a dictionary and locals can be any mapping,\n"
"defaulting to the current globals and locals.\n"
"If only globals is given, locals defaults to it.");
#define BUILTIN_EVAL_METHODDEF \
{"eval", (PyCFunction)builtin_eval, METH_VARARGS, builtin_eval__doc__},
static PyObject *
builtin_eval_impl(PyObject *module, PyObject *source, PyObject *globals,
PyObject *locals);
static PyObject *
builtin_eval(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *source;
PyObject *globals = Py_None;
PyObject *locals = Py_None;
if (!PyArg_UnpackTuple(args, "eval",
1, 3,
&source, &globals, &locals)) {
goto exit;
}
return_value = builtin_eval_impl(module, source, globals, locals);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_exec__doc__,
"exec($module, source, globals=None, locals=None, /)\n"
"--\n"
"\n"
"Execute the given source in the context of globals and locals.\n"
"\n"
"The source may be a string representing one or more Python statements\n"
"or a code object as returned by compile().\n"
"The globals must be a dictionary and locals can be any mapping,\n"
"defaulting to the current globals and locals.\n"
"If only globals is given, locals defaults to it.");
#define BUILTIN_EXEC_METHODDEF \
{"exec", (PyCFunction)builtin_exec, METH_VARARGS, builtin_exec__doc__},
static PyObject *
builtin_exec_impl(PyObject *module, PyObject *source, PyObject *globals,
PyObject *locals);
static PyObject *
builtin_exec(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *source;
PyObject *globals = Py_None;
PyObject *locals = Py_None;
if (!PyArg_UnpackTuple(args, "exec",
1, 3,
&source, &globals, &locals)) {
goto exit;
}
return_value = builtin_exec_impl(module, source, globals, locals);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_globals__doc__,
"globals($module, /)\n"
"--\n"
"\n"
"Return the dictionary containing the current scope\'s global variables.\n"
"\n"
"NOTE: Updates to this dictionary *will* affect name lookups in the current\n"
"global scope and vice-versa.");
#define BUILTIN_GLOBALS_METHODDEF \
{"globals", (PyCFunction)builtin_globals, METH_NOARGS, builtin_globals__doc__},
static PyObject *
builtin_globals_impl(PyObject *module);
static PyObject *
builtin_globals(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return builtin_globals_impl(module);
}
PyDoc_STRVAR(builtin_hasattr__doc__,
"hasattr($module, obj, name, /)\n"
"--\n"
"\n"
"Return whether the object has an attribute with the given name.\n"
"\n"
"This is done by calling getattr(obj, name) and catching AttributeError.");
#define BUILTIN_HASATTR_METHODDEF \
{"hasattr", (PyCFunction)builtin_hasattr, METH_VARARGS, builtin_hasattr__doc__},
static PyObject *
builtin_hasattr_impl(PyObject *module, PyObject *obj, PyObject *name);
static PyObject *
builtin_hasattr(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *obj;
PyObject *name;
if (!PyArg_UnpackTuple(args, "hasattr",
2, 2,
&obj, &name)) {
goto exit;
}
return_value = builtin_hasattr_impl(module, obj, name);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_id__doc__,
"id($module, obj, /)\n"
"--\n"
"\n"
"Return the identity of an object.\n"
"\n"
"This is guaranteed to be unique among simultaneously existing objects.\n"
"(CPython uses the object\'s memory address.)");
#define BUILTIN_ID_METHODDEF \
{"id", (PyCFunction)builtin_id, METH_O, builtin_id__doc__},
PyDoc_STRVAR(builtin_setattr__doc__,
"setattr($module, obj, name, value, /)\n"
"--\n"
"\n"
"Sets the named attribute on the given object to the specified value.\n"
"\n"
"setattr(x, \'y\', v) is equivalent to ``x.y = v\'\'");
#define BUILTIN_SETATTR_METHODDEF \
{"setattr", (PyCFunction)builtin_setattr, METH_VARARGS, builtin_setattr__doc__},
static PyObject *
builtin_setattr_impl(PyObject *module, PyObject *obj, PyObject *name,
PyObject *value);
static PyObject *
builtin_setattr(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *obj;
PyObject *name;
PyObject *value;
if (!PyArg_UnpackTuple(args, "setattr",
3, 3,
&obj, &name, &value)) {
goto exit;
}
return_value = builtin_setattr_impl(module, obj, name, value);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_delattr__doc__,
"delattr($module, obj, name, /)\n"
"--\n"
"\n"
"Deletes the named attribute from the given object.\n"
"\n"
"delattr(x, \'y\') is equivalent to ``del x.y\'\'");
#define BUILTIN_DELATTR_METHODDEF \
{"delattr", (PyCFunction)builtin_delattr, METH_VARARGS, builtin_delattr__doc__},
static PyObject *
builtin_delattr_impl(PyObject *module, PyObject *obj, PyObject *name);
static PyObject *
builtin_delattr(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *obj;
PyObject *name;
if (!PyArg_UnpackTuple(args, "delattr",
2, 2,
&obj, &name)) {
goto exit;
}
return_value = builtin_delattr_impl(module, obj, name);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_hash__doc__,
"hash($module, obj, /)\n"
"--\n"
"\n"
"Return the hash value for the given object.\n"
"\n"
"Two objects that compare equal must also have the same hash value, but the\n"
"reverse is not necessarily true.");
#define BUILTIN_HASH_METHODDEF \
{"hash", (PyCFunction)builtin_hash, METH_O, builtin_hash__doc__},
PyDoc_STRVAR(builtin_hex__doc__,
"hex($module, number, /)\n"
"--\n"
"\n"
"Return the hexadecimal representation of an integer.\n"
"\n"
" >>> hex(12648430)\n"
" \'0xc0ffee\'");
#define BUILTIN_HEX_METHODDEF \
{"hex", (PyCFunction)builtin_hex, METH_O, builtin_hex__doc__},
PyDoc_STRVAR(builtin_len__doc__,
"len($module, obj, /)\n"
"--\n"
"\n"
"Return the number of items in a container.");
#define BUILTIN_LEN_METHODDEF \
{"len", (PyCFunction)builtin_len, METH_O, builtin_len__doc__},
PyDoc_STRVAR(builtin_locals__doc__,
"locals($module, /)\n"
"--\n"
"\n"
"Return a dictionary containing the current scope\'s local variables.\n"
"\n"
"NOTE: Whether or not updates to this dictionary will affect name lookups in\n"
"the local scope and vice-versa is *implementation dependent* and not\n"
"covered by any backwards compatibility guarantees.");
#define BUILTIN_LOCALS_METHODDEF \
{"locals", (PyCFunction)builtin_locals, METH_NOARGS, builtin_locals__doc__},
static PyObject *
builtin_locals_impl(PyObject *module);
static PyObject *
builtin_locals(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return builtin_locals_impl(module);
}
PyDoc_STRVAR(builtin_oct__doc__,
"oct($module, number, /)\n"
"--\n"
"\n"
"Return the octal representation of an integer.\n"
"\n"
" >>> oct(342391)\n"
" \'0o1234567\'");
#define BUILTIN_OCT_METHODDEF \
{"oct", (PyCFunction)builtin_oct, METH_O, builtin_oct__doc__},
PyDoc_STRVAR(builtin_ord__doc__,
"ord($module, c, /)\n"
"--\n"
"\n"
"Return the Unicode code point for a one-character string.");
#define BUILTIN_ORD_METHODDEF \
{"ord", (PyCFunction)builtin_ord, METH_O, builtin_ord__doc__},
PyDoc_STRVAR(builtin_pow__doc__,
"pow($module, x, y, z=None, /)\n"
"--\n"
"\n"
"Equivalent to x**y (with two arguments) or x**y % z (with three arguments)\n"
"\n"
"Some types, such as ints, are able to use a more efficient algorithm when\n"
"invoked using the three argument form.");
#define BUILTIN_POW_METHODDEF \
{"pow", (PyCFunction)builtin_pow, METH_VARARGS, builtin_pow__doc__},
static PyObject *
builtin_pow_impl(PyObject *module, PyObject *x, PyObject *y, PyObject *z);
static PyObject *
builtin_pow(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *x;
PyObject *y;
PyObject *z = Py_None;
if (!PyArg_UnpackTuple(args, "pow",
2, 3,
&x, &y, &z)) {
goto exit;
}
return_value = builtin_pow_impl(module, x, y, z);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_input__doc__,
"input($module, prompt=None, /)\n"
"--\n"
"\n"
"Read a string from standard input. The trailing newline is stripped.\n"
"\n"
"The prompt string, if given, is printed to standard output without a\n"
"trailing newline before reading input.\n"
"\n"
"If the user hits EOF (*nix: Ctrl-D, Windows: Ctrl-Z+Return), raise EOFError.\n"
"On *nix systems, readline is used if available.");
#define BUILTIN_INPUT_METHODDEF \
{"input", (PyCFunction)builtin_input, METH_VARARGS, builtin_input__doc__},
static PyObject *
builtin_input_impl(PyObject *module, PyObject *prompt);
static PyObject *
builtin_input(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *prompt = NULL;
if (!PyArg_UnpackTuple(args, "input",
0, 1,
&prompt)) {
goto exit;
}
return_value = builtin_input_impl(module, prompt);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_repr__doc__,
"repr($module, obj, /)\n"
"--\n"
"\n"
"Return the canonical string representation of the object.\n"
"\n"
"For many object types, including most builtins, eval(repr(obj)) == obj.");
#define BUILTIN_REPR_METHODDEF \
{"repr", (PyCFunction)builtin_repr, METH_O, builtin_repr__doc__},
PyDoc_STRVAR(builtin_sum__doc__,
"sum($module, iterable, start=0, /)\n"
"--\n"
"\n"
"Return the sum of a \'start\' value (default: 0) plus an iterable of numbers\n"
"\n"
"When the iterable is empty, return the start value.\n"
"This function is intended specifically for use with numeric values and may\n"
"reject non-numeric types.");
#define BUILTIN_SUM_METHODDEF \
{"sum", (PyCFunction)builtin_sum, METH_VARARGS, builtin_sum__doc__},
static PyObject *
builtin_sum_impl(PyObject *module, PyObject *iterable, PyObject *start);
static PyObject *
builtin_sum(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *iterable;
PyObject *start = NULL;
if (!PyArg_UnpackTuple(args, "sum",
1, 2,
&iterable, &start)) {
goto exit;
}
return_value = builtin_sum_impl(module, iterable, start);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_isinstance__doc__,
"isinstance($module, obj, class_or_tuple, /)\n"
"--\n"
"\n"
"Return whether an object is an instance of a class or of a subclass thereof.\n"
"\n"
"A tuple, as in ``isinstance(x, (A, B, ...))``, may be given as the target to\n"
"check against. This is equivalent to ``isinstance(x, A) or isinstance(x, B)\n"
"or ...`` etc.");
#define BUILTIN_ISINSTANCE_METHODDEF \
{"isinstance", (PyCFunction)builtin_isinstance, METH_VARARGS, builtin_isinstance__doc__},
static PyObject *
builtin_isinstance_impl(PyObject *module, PyObject *obj,
PyObject *class_or_tuple);
static PyObject *
builtin_isinstance(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *obj;
PyObject *class_or_tuple;
if (!PyArg_UnpackTuple(args, "isinstance",
2, 2,
&obj, &class_or_tuple)) {
goto exit;
}
return_value = builtin_isinstance_impl(module, obj, class_or_tuple);
exit:
return return_value;
}
PyDoc_STRVAR(builtin_issubclass__doc__,
"issubclass($module, cls, class_or_tuple, /)\n"
"--\n"
"\n"
"Return whether \'cls\' is a derived from another class or is the same class.\n"
"\n"
"A tuple, as in ``issubclass(x, (A, B, ...))``, may be given as the target to\n"
"check against. This is equivalent to ``issubclass(x, A) or issubclass(x, B)\n"
"or ...`` etc.");
#define BUILTIN_ISSUBCLASS_METHODDEF \
{"issubclass", (PyCFunction)builtin_issubclass, METH_VARARGS, builtin_issubclass__doc__},
static PyObject *
builtin_issubclass_impl(PyObject *module, PyObject *cls,
PyObject *class_or_tuple);
static PyObject *
builtin_issubclass(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *cls;
PyObject *class_or_tuple;
if (!PyArg_UnpackTuple(args, "issubclass",
2, 2,
&cls, &class_or_tuple)) {
goto exit;
}
return_value = builtin_issubclass_impl(module, cls, class_or_tuple);
exit:
return return_value;
}
/*[clinic end generated code: output=2ef82846acdfa0f5 input=a9049054013a1b77]*/

364
third_party/python/Python/clinic/import.c.h generated vendored Normal file
View file

@ -0,0 +1,364 @@
/*[clinic input]
preserve
[clinic start generated code]*/
PyDoc_STRVAR(_imp_lock_held__doc__,
"lock_held($module, /)\n"
"--\n"
"\n"
"Return True if the import lock is currently held, else False.\n"
"\n"
"On platforms without threads, return False.");
#define _IMP_LOCK_HELD_METHODDEF \
{"lock_held", (PyCFunction)_imp_lock_held, METH_NOARGS, _imp_lock_held__doc__},
static PyObject *
_imp_lock_held_impl(PyObject *module);
static PyObject *
_imp_lock_held(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return _imp_lock_held_impl(module);
}
PyDoc_STRVAR(_imp_acquire_lock__doc__,
"acquire_lock($module, /)\n"
"--\n"
"\n"
"Acquires the interpreter\'s import lock for the current thread.\n"
"\n"
"This lock should be used by import hooks to ensure thread-safety when importing\n"
"modules. On platforms without threads, this function does nothing.");
#define _IMP_ACQUIRE_LOCK_METHODDEF \
{"acquire_lock", (PyCFunction)_imp_acquire_lock, METH_NOARGS, _imp_acquire_lock__doc__},
static PyObject *
_imp_acquire_lock_impl(PyObject *module);
static PyObject *
_imp_acquire_lock(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return _imp_acquire_lock_impl(module);
}
PyDoc_STRVAR(_imp_release_lock__doc__,
"release_lock($module, /)\n"
"--\n"
"\n"
"Release the interpreter\'s import lock.\n"
"\n"
"On platforms without threads, this function does nothing.");
#define _IMP_RELEASE_LOCK_METHODDEF \
{"release_lock", (PyCFunction)_imp_release_lock, METH_NOARGS, _imp_release_lock__doc__},
static PyObject *
_imp_release_lock_impl(PyObject *module);
static PyObject *
_imp_release_lock(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return _imp_release_lock_impl(module);
}
PyDoc_STRVAR(_imp__fix_co_filename__doc__,
"_fix_co_filename($module, code, path, /)\n"
"--\n"
"\n"
"Changes code.co_filename to specify the passed-in file path.\n"
"\n"
" code\n"
" Code object to change.\n"
" path\n"
" File path to use.");
#define _IMP__FIX_CO_FILENAME_METHODDEF \
{"_fix_co_filename", (PyCFunction)_imp__fix_co_filename, METH_VARARGS, _imp__fix_co_filename__doc__},
static PyObject *
_imp__fix_co_filename_impl(PyObject *module, PyCodeObject *code,
PyObject *path);
static PyObject *
_imp__fix_co_filename(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyCodeObject *code;
PyObject *path;
if (!PyArg_ParseTuple(args, "O!U:_fix_co_filename",
&PyCode_Type, &code, &path)) {
goto exit;
}
return_value = _imp__fix_co_filename_impl(module, code, path);
exit:
return return_value;
}
PyDoc_STRVAR(_imp_create_builtin__doc__,
"create_builtin($module, spec, /)\n"
"--\n"
"\n"
"Create an extension module.");
#define _IMP_CREATE_BUILTIN_METHODDEF \
{"create_builtin", (PyCFunction)_imp_create_builtin, METH_O, _imp_create_builtin__doc__},
PyDoc_STRVAR(_imp_extension_suffixes__doc__,
"extension_suffixes($module, /)\n"
"--\n"
"\n"
"Returns the list of file suffixes used to identify extension modules.");
#define _IMP_EXTENSION_SUFFIXES_METHODDEF \
{"extension_suffixes", (PyCFunction)_imp_extension_suffixes, METH_NOARGS, _imp_extension_suffixes__doc__},
static PyObject *
_imp_extension_suffixes_impl(PyObject *module);
static PyObject *
_imp_extension_suffixes(PyObject *module, PyObject *Py_UNUSED(ignored))
{
return _imp_extension_suffixes_impl(module);
}
PyDoc_STRVAR(_imp_init_frozen__doc__,
"init_frozen($module, name, /)\n"
"--\n"
"\n"
"Initializes a frozen module.");
#define _IMP_INIT_FROZEN_METHODDEF \
{"init_frozen", (PyCFunction)_imp_init_frozen, METH_O, _imp_init_frozen__doc__},
static PyObject *
_imp_init_frozen_impl(PyObject *module, PyObject *name);
static PyObject *
_imp_init_frozen(PyObject *module, PyObject *arg)
{
PyObject *return_value = NULL;
PyObject *name;
if (!PyArg_Parse(arg, "U:init_frozen", &name)) {
goto exit;
}
return_value = _imp_init_frozen_impl(module, name);
exit:
return return_value;
}
PyDoc_STRVAR(_imp_get_frozen_object__doc__,
"get_frozen_object($module, name, /)\n"
"--\n"
"\n"
"Create a code object for a frozen module.");
#define _IMP_GET_FROZEN_OBJECT_METHODDEF \
{"get_frozen_object", (PyCFunction)_imp_get_frozen_object, METH_O, _imp_get_frozen_object__doc__},
static PyObject *
_imp_get_frozen_object_impl(PyObject *module, PyObject *name);
static PyObject *
_imp_get_frozen_object(PyObject *module, PyObject *arg)
{
PyObject *return_value = NULL;
PyObject *name;
if (!PyArg_Parse(arg, "U:get_frozen_object", &name)) {
goto exit;
}
return_value = _imp_get_frozen_object_impl(module, name);
exit:
return return_value;
}
PyDoc_STRVAR(_imp_is_frozen_package__doc__,
"is_frozen_package($module, name, /)\n"
"--\n"
"\n"
"Returns True if the module name is of a frozen package.");
#define _IMP_IS_FROZEN_PACKAGE_METHODDEF \
{"is_frozen_package", (PyCFunction)_imp_is_frozen_package, METH_O, _imp_is_frozen_package__doc__},
static PyObject *
_imp_is_frozen_package_impl(PyObject *module, PyObject *name);
static PyObject *
_imp_is_frozen_package(PyObject *module, PyObject *arg)
{
PyObject *return_value = NULL;
PyObject *name;
if (!PyArg_Parse(arg, "U:is_frozen_package", &name)) {
goto exit;
}
return_value = _imp_is_frozen_package_impl(module, name);
exit:
return return_value;
}
PyDoc_STRVAR(_imp_is_builtin__doc__,
"is_builtin($module, name, /)\n"
"--\n"
"\n"
"Returns True if the module name corresponds to a built-in module.");
#define _IMP_IS_BUILTIN_METHODDEF \
{"is_builtin", (PyCFunction)_imp_is_builtin, METH_O, _imp_is_builtin__doc__},
static PyObject *
_imp_is_builtin_impl(PyObject *module, PyObject *name);
static PyObject *
_imp_is_builtin(PyObject *module, PyObject *arg)
{
PyObject *return_value = NULL;
PyObject *name;
if (!PyArg_Parse(arg, "U:is_builtin", &name)) {
goto exit;
}
return_value = _imp_is_builtin_impl(module, name);
exit:
return return_value;
}
PyDoc_STRVAR(_imp_is_frozen__doc__,
"is_frozen($module, name, /)\n"
"--\n"
"\n"
"Returns True if the module name corresponds to a frozen module.");
#define _IMP_IS_FROZEN_METHODDEF \
{"is_frozen", (PyCFunction)_imp_is_frozen, METH_O, _imp_is_frozen__doc__},
static PyObject *
_imp_is_frozen_impl(PyObject *module, PyObject *name);
static PyObject *
_imp_is_frozen(PyObject *module, PyObject *arg)
{
PyObject *return_value = NULL;
PyObject *name;
if (!PyArg_Parse(arg, "U:is_frozen", &name)) {
goto exit;
}
return_value = _imp_is_frozen_impl(module, name);
exit:
return return_value;
}
#if defined(HAVE_DYNAMIC_LOADING)
PyDoc_STRVAR(_imp_create_dynamic__doc__,
"create_dynamic($module, spec, file=None, /)\n"
"--\n"
"\n"
"Create an extension module.");
#define _IMP_CREATE_DYNAMIC_METHODDEF \
{"create_dynamic", (PyCFunction)_imp_create_dynamic, METH_VARARGS, _imp_create_dynamic__doc__},
static PyObject *
_imp_create_dynamic_impl(PyObject *module, PyObject *spec, PyObject *file);
static PyObject *
_imp_create_dynamic(PyObject *module, PyObject *args)
{
PyObject *return_value = NULL;
PyObject *spec;
PyObject *file = NULL;
if (!PyArg_UnpackTuple(args, "create_dynamic",
1, 2,
&spec, &file)) {
goto exit;
}
return_value = _imp_create_dynamic_impl(module, spec, file);
exit:
return return_value;
}
#endif /* defined(HAVE_DYNAMIC_LOADING) */
#if defined(HAVE_DYNAMIC_LOADING)
PyDoc_STRVAR(_imp_exec_dynamic__doc__,
"exec_dynamic($module, mod, /)\n"
"--\n"
"\n"
"Initialize an extension module.");
#define _IMP_EXEC_DYNAMIC_METHODDEF \
{"exec_dynamic", (PyCFunction)_imp_exec_dynamic, METH_O, _imp_exec_dynamic__doc__},
static int
_imp_exec_dynamic_impl(PyObject *module, PyObject *mod);
static PyObject *
_imp_exec_dynamic(PyObject *module, PyObject *mod)
{
PyObject *return_value = NULL;
int _return_value;
_return_value = _imp_exec_dynamic_impl(module, mod);
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
}
return_value = PyLong_FromLong((long)_return_value);
exit:
return return_value;
}
#endif /* defined(HAVE_DYNAMIC_LOADING) */
PyDoc_STRVAR(_imp_exec_builtin__doc__,
"exec_builtin($module, mod, /)\n"
"--\n"
"\n"
"Initialize a built-in module.");
#define _IMP_EXEC_BUILTIN_METHODDEF \
{"exec_builtin", (PyCFunction)_imp_exec_builtin, METH_O, _imp_exec_builtin__doc__},
static int
_imp_exec_builtin_impl(PyObject *module, PyObject *mod);
static PyObject *
_imp_exec_builtin(PyObject *module, PyObject *mod)
{
PyObject *return_value = NULL;
int _return_value;
_return_value = _imp_exec_builtin_impl(module, mod);
if ((_return_value == -1) && PyErr_Occurred()) {
goto exit;
}
return_value = PyLong_FromLong((long)_return_value);
exit:
return return_value;
}
#ifndef _IMP_CREATE_DYNAMIC_METHODDEF
#define _IMP_CREATE_DYNAMIC_METHODDEF
#endif /* !defined(_IMP_CREATE_DYNAMIC_METHODDEF) */
#ifndef _IMP_EXEC_DYNAMIC_METHODDEF
#define _IMP_EXEC_DYNAMIC_METHODDEF
#endif /* !defined(_IMP_EXEC_DYNAMIC_METHODDEF) */
/*[clinic end generated code: output=d24d7f73702a907f input=a9049054013a1b77]*/

1547
third_party/python/Python/codecs.c vendored Normal file

File diff suppressed because it is too large Load diff

5330
third_party/python/Python/compile.c vendored Normal file

File diff suppressed because it is too large Load diff

390
third_party/python/Python/condvar.h vendored Normal file
View file

@ -0,0 +1,390 @@
/*
* Portable condition variable support for windows and pthreads.
* Everything is inline, this header can be included where needed.
*
* APIs generally return 0 on success and non-zero on error,
* and the caller needs to use its platform's error mechanism to
* discover the error (errno, or GetLastError())
*
* Note that some implementations cannot distinguish between a
* condition variable wait time-out and successful wait. Most often
* the difference is moot anyway since the wait condition must be
* re-checked.
* PyCOND_TIMEDWAIT, in addition to returning negative on error,
* thus returns 0 on regular success, 1 on timeout
* or 2 if it can't tell.
*
* There are at least two caveats with using these condition variables,
* due to the fact that they may be emulated with Semaphores on
* Windows:
* 1) While PyCOND_SIGNAL() will wake up at least one thread, we
* cannot currently guarantee that it will be one of the threads
* already waiting in a PyCOND_WAIT() call. It _could_ cause
* the wakeup of a subsequent thread to try a PyCOND_WAIT(),
* including the thread doing the PyCOND_SIGNAL() itself.
* The same applies to PyCOND_BROADCAST(), if N threads are waiting
* then at least N threads will be woken up, but not necessarily
* those already waiting.
* For this reason, don't make the scheduling assumption that a
* specific other thread will get the wakeup signal
* 2) The _mutex_ must be held when calling PyCOND_SIGNAL() and
* PyCOND_BROADCAST().
* While e.g. the posix standard strongly recommends that the mutex
* associated with the condition variable is held when a
* pthread_cond_signal() call is made, this is not a hard requirement,
* although scheduling will not be "reliable" if it isn't. Here
* the mutex is used for internal synchronization of the emulated
* Condition Variable.
*/
#ifndef _CONDVAR_H_
#define _CONDVAR_H_
#include "Python.h"
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR
#include <pthread.h>
#define PyCOND_ADD_MICROSECONDS(tv, interval) \
do { /* TODO: add overflow and truncation checks */ \
tv.tv_usec += (long) interval; \
tv.tv_sec += tv.tv_usec / 1000000; \
tv.tv_usec %= 1000000; \
} while (0)
/* We assume all modern POSIX systems have gettimeofday() */
#ifdef GETTIMEOFDAY_NO_TZ
#define PyCOND_GETTIMEOFDAY(ptv) gettimeofday(ptv)
#else
#define PyCOND_GETTIMEOFDAY(ptv) gettimeofday(ptv, (struct timezone *)NULL)
#endif
/* The following functions return 0 on success, nonzero on error */
#define PyMUTEX_T pthread_mutex_t
#define PyMUTEX_INIT(mut) pthread_mutex_init((mut), NULL)
#define PyMUTEX_FINI(mut) pthread_mutex_destroy(mut)
#define PyMUTEX_LOCK(mut) pthread_mutex_lock(mut)
#define PyMUTEX_UNLOCK(mut) pthread_mutex_unlock(mut)
#define PyCOND_T pthread_cond_t
#define PyCOND_INIT(cond) pthread_cond_init((cond), NULL)
#define PyCOND_FINI(cond) pthread_cond_destroy(cond)
#define PyCOND_SIGNAL(cond) pthread_cond_signal(cond)
#define PyCOND_BROADCAST(cond) pthread_cond_broadcast(cond)
#define PyCOND_WAIT(cond, mut) pthread_cond_wait((cond), (mut))
/* return 0 for success, 1 on timeout, -1 on error */
Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cond, PyMUTEX_T *mut, long long us)
{
int r;
struct timespec ts;
struct timeval deadline;
PyCOND_GETTIMEOFDAY(&deadline);
PyCOND_ADD_MICROSECONDS(deadline, us);
ts.tv_sec = deadline.tv_sec;
ts.tv_nsec = deadline.tv_usec * 1000;
r = pthread_cond_timedwait((cond), (mut), &ts);
if (r == ETIMEDOUT)
return 1;
else if (r)
return -1;
else
return 0;
}
#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR
/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif
/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif
#if _PY_EMULATED_WIN_CV
/* The mutex is a CriticalSection object and
The condition variables is emulated with the help of a semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is counted,
not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
This implementation still has the problem that the threads woken
with a "signal" aren't necessarily those that are already
waiting. It corresponds to listing 2 in:
http://birrell.org/andrew/papers/ImplementingCVs.pdf
Generic emulations of the pthread_cond_* API using
earlier Win32 functions can be found on the Web.
The following read can be give background information to these issues,
but the implementations are all broken in some way.
http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
*/
typedef CRITICAL_SECTION PyMUTEX_T;
Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
{
InitializeCriticalSection(cs);
return 0;
}
Py_LOCAL_INLINE(int)
PyMUTEX_FINI(PyMUTEX_T *cs)
{
DeleteCriticalSection(cs);
return 0;
}
Py_LOCAL_INLINE(int)
PyMUTEX_LOCK(PyMUTEX_T *cs)
{
EnterCriticalSection(cs);
return 0;
}
Py_LOCAL_INLINE(int)
PyMUTEX_UNLOCK(PyMUTEX_T *cs)
{
LeaveCriticalSection(cs);
return 0;
}
/* The ConditionVariable object. From XP onwards it is easily emulated with
* a Semaphore
*/
typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;
Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
{
/* A semaphore with a "large" max value, The positive value
* is only needed to catch those "lost wakeup" events and
* race conditions when a timed wait elapses.
*/
cv->sem = CreateSemaphore(NULL, 0, 100000, NULL);
if (cv->sem==NULL)
return -1;
cv->waiting = 0;
return 0;
}
Py_LOCAL_INLINE(int)
PyCOND_FINI(PyCOND_T *cv)
{
return CloseHandle(cv->sem) ? 0 : -1;
}
/* this implementation can detect a timeout. Returns 1 on timeout,
* 0 otherwise (and -1 on error)
*/
Py_LOCAL_INLINE(int)
_PyCOND_WAIT_MS(PyCOND_T *cv, PyMUTEX_T *cs, DWORD ms)
{
DWORD wait;
cv->waiting++;
PyMUTEX_UNLOCK(cs);
/* "lost wakeup bug" would occur if the caller were interrupted here,
* but we are safe because we are using a semaphore which has an internal
* count.
*/
wait = WaitForSingleObjectEx(cv->sem, ms, FALSE);
PyMUTEX_LOCK(cs);
if (wait != WAIT_OBJECT_0)
--cv->waiting;
/* Here we have a benign race condition with PyCOND_SIGNAL.
* When failure occurs or timeout, it is possible that
* PyCOND_SIGNAL also decrements this value
* and signals releases the mutex. This is benign because it
* just means an extra spurious wakeup for a waiting thread.
* ('waiting' corresponds to the semaphore's "negative" count and
* we may end up with e.g. (waiting == -1 && sem.count == 1). When
* a new thread comes along, it will pass right throuhgh, having
* adjusted it to (waiting == 0 && sem.count == 0).
*/
if (wait == WAIT_FAILED)
return -1;
/* return 0 on success, 1 on timeout */
return wait != WAIT_OBJECT_0;
}
Py_LOCAL_INLINE(int)
PyCOND_WAIT(PyCOND_T *cv, PyMUTEX_T *cs)
{
int result = _PyCOND_WAIT_MS(cv, cs, INFINITE);
return result >= 0 ? 0 : result;
}
Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cv, PyMUTEX_T *cs, long long us)
{
return _PyCOND_WAIT_MS(cv, cs, (DWORD)(us/1000));
}
Py_LOCAL_INLINE(int)
PyCOND_SIGNAL(PyCOND_T *cv)
{
/* this test allows PyCOND_SIGNAL to be a no-op unless required
* to wake someone up, thus preventing an unbounded increase of
* the semaphore's internal counter.
*/
if (cv->waiting > 0) {
/* notifying thread decreases the cv->waiting count so that
* a delay between notify and actual wakeup of the target thread
* doesn't cause a number of extra ReleaseSemaphore calls.
*/
cv->waiting--;
return ReleaseSemaphore(cv->sem, 1, NULL) ? 0 : -1;
}
return 0;
}
Py_LOCAL_INLINE(int)
PyCOND_BROADCAST(PyCOND_T *cv)
{
int waiting = cv->waiting;
if (waiting > 0) {
cv->waiting = 0;
return ReleaseSemaphore(cv->sem, waiting, NULL) ? 0 : -1;
}
return 0;
}
#else
/* Use native Win7 primitives if build target is Win7 or higher */
/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;
Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
{
InitializeSRWLock(cs);
return 0;
}
Py_LOCAL_INLINE(int)
PyMUTEX_FINI(PyMUTEX_T *cs)
{
return 0;
}
Py_LOCAL_INLINE(int)
PyMUTEX_LOCK(PyMUTEX_T *cs)
{
AcquireSRWLockExclusive(cs);
return 0;
}
Py_LOCAL_INLINE(int)
PyMUTEX_UNLOCK(PyMUTEX_T *cs)
{
ReleaseSRWLockExclusive(cs);
return 0;
}
typedef CONDITION_VARIABLE PyCOND_T;
Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
{
InitializeConditionVariable(cv);
return 0;
}
Py_LOCAL_INLINE(int)
PyCOND_FINI(PyCOND_T *cv)
{
return 0;
}
Py_LOCAL_INLINE(int)
PyCOND_WAIT(PyCOND_T *cv, PyMUTEX_T *cs)
{
return SleepConditionVariableSRW(cv, cs, INFINITE, 0) ? 0 : -1;
}
/* This implementation makes no distinction about timeouts. Signal
* 2 to indicate that we don't know.
*/
Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cv, PyMUTEX_T *cs, long long us)
{
return SleepConditionVariableSRW(cv, cs, (DWORD)(us/1000), 0) ? 2 : -1;
}
Py_LOCAL_INLINE(int)
PyCOND_SIGNAL(PyCOND_T *cv)
{
WakeConditionVariable(cv);
return 0;
}
Py_LOCAL_INLINE(int)
PyCOND_BROADCAST(PyCOND_T *cv)
{
WakeAllConditionVariable(cv);
return 0;
}
#endif /* _PY_EMULATED_WIN_CV */
#endif /* _POSIX_THREADS, NT_THREADS */
#endif /* _CONDVAR_H_ */

2876
third_party/python/Python/dtoa.c vendored Normal file

File diff suppressed because it is too large Load diff

31
third_party/python/Python/dup2.c vendored Normal file
View file

@ -0,0 +1,31 @@
/*
* Public domain dup2() lookalike
* by Curtis Jackson @ AT&T Technologies, Burlington, NC
* electronic address: burl!rcj
*
* dup2 performs the following functions:
*
* Check to make sure that fd1 is a valid open file descriptor.
* Check to see if fd2 is already open; if so, close it.
* Duplicate fd1 onto fd2; checking to make sure fd2 is a valid fd.
* Return fd2 if all went well; return BADEXIT otherwise.
*/
#include <fcntl.h>
#include <unistd.h>
#define BADEXIT -1
int
dup2(int fd1, int fd2)
{
if (fd1 != fd2) {
if (fcntl(fd1, F_GETFL) < 0)
return BADEXIT;
if (fcntl(fd2, F_GETFL) >= 0)
close(fd2);
if (fcntl(fd1, F_DUPFD, fd2) < 0)
return BADEXIT;
}
return fd2;
}

View file

@ -0,0 +1,154 @@
/* Copyright (c) 2008-2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
#ifdef _MSC_VER
# include <windows.h>
#endif
#ifdef __cplusplus
# error "This file should be built as pure C to avoid name mangling"
#endif
#include <stdlib.h>
#include <string.h>
#include "dynamic_annotations.h"
/* Each function is empty and called (via a macro) only in debug mode.
The arguments are captured by dynamic tools at runtime. */
#if DYNAMIC_ANNOTATIONS_ENABLED == 1
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock){}
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w){}
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed) {}
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier) {}
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier) {}
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock){}
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv){}
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv){}
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size){}
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq){}
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq){}
void AnnotateNewMemory(const char *file, int line,
const volatile void *mem,
long size){}
void AnnotateExpectRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRace(const char *file, int line,
const volatile void *mem,
const char *description){}
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *mem,
long size,
const char *description) {}
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu){}
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg){}
void AnnotateThreadName(const char *file, int line,
const char *name){}
void AnnotateIgnoreReadsBegin(const char *file, int line){}
void AnnotateIgnoreReadsEnd(const char *file, int line){}
void AnnotateIgnoreWritesBegin(const char *file, int line){}
void AnnotateIgnoreWritesEnd(const char *file, int line){}
void AnnotateIgnoreSyncBegin(const char *file, int line){}
void AnnotateIgnoreSyncEnd(const char *file, int line){}
void AnnotateEnableRaceDetection(const char *file, int line, int enable){}
void AnnotateNoOp(const char *file, int line,
const volatile void *arg){}
void AnnotateFlushState(const char *file, int line){}
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
#ifndef _MSC_VER
char *running_on_valgrind_str = getenv("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
#else
/* Visual Studio issues warnings if we use getenv,
* so we use GetEnvironmentVariableA instead.
*/
char value[100] = "1";
int res = GetEnvironmentVariableA("RUNNING_ON_VALGRIND",
value, sizeof(value));
/* value will remain "1" if res == 0 or res >= sizeof(value). The latter
* can happen only if the given value is long, in this case it can't be "0".
*/
if (res > 0 && !strcmp(value, "0"))
return 1;
#endif
return 0;
}
/* See the comments in dynamic_annotations.h */
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
/* C doesn't have thread-safe initialization of statics, and we
don't want to depend on pthread_once here, so hack it. */
int local_running_on_valgrind = running_on_valgrind;
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}
#endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1 */

184
third_party/python/Python/dynload_aix.c vendored Normal file
View file

@ -0,0 +1,184 @@
/* Support for dynamic loading of extension modules */
#include "Python.h"
#include "importdl.h"
#include <errno.h> /* for global errno */
#include <string.h> /* for strerror() */
#include <stdlib.h> /* for malloc(), free() */
#include <sys/ldr.h>
#ifdef AIX_GENUINE_CPLUSPLUS
#include <load.h>
#define aix_load loadAndInit
#else
#define aix_load load
#endif
extern char *Py_GetProgramName(void);
typedef struct Module {
struct Module *next;
void *entry;
} Module, *ModulePtr;
const char *_PyImport_DynLoadFiletab[] = {".so", NULL};
static int
aix_getoldmodules(void **modlistptr)
{
ModulePtr modptr, prevmodptr;
struct ld_info *ldiptr;
char *ldibuf;
int errflag, bufsize = 1024;
unsigned int offset;
char *progname = Py_GetProgramName();
/*
-- Get the list of loaded modules into ld_info structures.
*/
if ((ldibuf = malloc(bufsize)) == NULL) {
PyErr_SetString(PyExc_ImportError, strerror(errno));
return -1;
}
while ((errflag = loadquery(L_GETINFO, ldibuf, bufsize)) == -1
&& errno == ENOMEM) {
free(ldibuf);
bufsize += 1024;
if ((ldibuf = malloc(bufsize)) == NULL) {
PyErr_SetString(PyExc_ImportError, strerror(errno));
return -1;
}
}
if (errflag == -1) {
PyErr_SetString(PyExc_ImportError, strerror(errno));
return -1;
}
/*
-- Make the modules list from the ld_info structures.
*/
ldiptr = (struct ld_info *)ldibuf;
prevmodptr = NULL;
do {
if (strstr(progname, ldiptr->ldinfo_filename) == NULL &&
strstr(ldiptr->ldinfo_filename, "python") == NULL) {
/*
-- Extract only the modules belonging to the main
-- executable + those containing "python" as a
-- substring (like the "python[version]" binary or
-- "libpython[version].a" in case it's a shared lib).
*/
offset = (unsigned int)ldiptr->ldinfo_next;
ldiptr = (struct ld_info *)((char*)ldiptr + offset);
continue;
}
if ((modptr = (ModulePtr)malloc(sizeof(Module))) == NULL) {
PyErr_SetString(PyExc_ImportError, strerror(errno));
while (*modlistptr) {
modptr = (ModulePtr)*modlistptr;
*modlistptr = (void *)modptr->next;
free(modptr);
}
return -1;
}
modptr->entry = ldiptr->ldinfo_dataorg;
modptr->next = NULL;
if (prevmodptr == NULL)
*modlistptr = (void *)modptr;
else
prevmodptr->next = modptr;
prevmodptr = modptr;
offset = (unsigned int)ldiptr->ldinfo_next;
ldiptr = (struct ld_info *)((char*)ldiptr + offset);
} while (offset);
free(ldibuf);
return 0;
}
static void
aix_loaderror(const char *pathname)
{
char *message[1024], errbuf[1024];
PyObject *pathname_ob = NULL;
PyObject *errbuf_ob = NULL;
int i,j;
struct errtab {
int errNo;
char *errstr;
} load_errtab[] = {
{L_ERROR_TOOMANY, "too many errors, rest skipped."},
{L_ERROR_NOLIB, "can't load library:"},
{L_ERROR_UNDEF, "can't find symbol in library:"},
{L_ERROR_RLDBAD,
"RLD index out of range or bad relocation type:"},
{L_ERROR_FORMAT, "not a valid, executable xcoff file:"},
{L_ERROR_MEMBER,
"file not an archive or does not contain requested member:"},
{L_ERROR_TYPE, "symbol table mismatch:"},
{L_ERROR_ALIGN, "text alignment in file is wrong."},
{L_ERROR_SYSTEM, "System error:"},
{L_ERROR_ERRNO, NULL}
};
#define ERRBUF_APPEND(s) strncat(errbuf, s, sizeof(errbuf)-strlen(errbuf)-1)
PyOS_snprintf(errbuf, sizeof(errbuf), "from module %.200s ", pathname);
if (!loadquery(L_GETMESSAGES, &message[0], sizeof(message))) {
ERRBUF_APPEND(strerror(errno));
ERRBUF_APPEND("\n");
}
for(i = 0; message[i] && *message[i]; i++) {
int nerr = atoi(message[i]);
for (j=0; j < Py_ARRAY_LENGTH(load_errtab); j++) {
if (nerr == load_errtab[j].errNo && load_errtab[j].errstr)
ERRBUF_APPEND(load_errtab[j].errstr);
}
while (Py_ISDIGIT(Py_CHARMASK(*message[i]))) message[i]++ ;
ERRBUF_APPEND(message[i]);
ERRBUF_APPEND("\n");
}
errbuf[strlen(errbuf)-1] = '\0'; /* trim off last newline */
pathname_ob = PyUnicode_FromString(pathname);
errbuf_ob = PyUnicode_FromString(errbuf);
PyErr_SetImportError(errbuf_ob, NULL, pathname);
Py_DECREF(pathname_ob);
Py_DECREF(errbuf_ob);
return;
}
dl_funcptr _PyImport_FindSharedFuncptr(const char *prefix,
const char *shortname,
const char *pathname, FILE *fp)
{
dl_funcptr p;
/*
-- Invoke load() with L_NOAUTODEFER leaving the imported symbols
-- of the shared module unresolved. Thus we have to resolve them
-- explicitly with loadbind. The new module is loaded, then we
-- resolve its symbols using the list of already loaded modules
-- (only those that belong to the python executable). Get these
-- with loadquery(L_GETINFO).
*/
static void *staticmodlistptr = NULL;
if (!staticmodlistptr)
if (aix_getoldmodules(&staticmodlistptr) == -1)
return NULL;
p = (dl_funcptr) aix_load((char *)pathname, L_NOAUTODEFER, 0);
if (p == NULL) {
aix_loaderror(pathname);
return NULL;
}
return p;
}

23
third_party/python/Python/dynload_dl.c vendored Normal file
View file

@ -0,0 +1,23 @@
/* Support for dynamic loading of extension modules */
#include "dl.h"
#include "Python.h"
#include "importdl.h"
extern char *Py_GetProgramName(void);
const char *_PyImport_DynLoadFiletab[] = {".o", NULL};
dl_funcptr _PyImport_FindSharedFuncptr(const char *prefix,
const char *shortname,
const char *pathname, FILE *fp)
{
char funcname[258];
PyOS_snprintf(funcname, sizeof(funcname), "%.20s_%.200s", prefix, shortname);
return dl_loadmod(Py_GetProgramName(), pathname, funcname);
}

View file

@ -0,0 +1,66 @@
/* Support for dynamic loading of extension modules */
#include "dl.h"
#include <errno.h>
#include "Python.h"
#include "importdl.h"
#if defined(__hp9000s300)
#define FUNCNAME_PATTERN "_%.20s_%.200s"
#else
#define FUNCNAME_PATTERN "%.20s_%.200s"
#endif
const char *_PyImport_DynLoadFiletab[] = {SHLIB_EXT, NULL};
dl_funcptr _PyImport_FindSharedFuncptr(const char *prefix,
const char *shortname,
const char *pathname, FILE *fp)
{
dl_funcptr p;
shl_t lib;
int flags;
char funcname[258];
flags = BIND_FIRST | BIND_DEFERRED;
if (Py_VerboseFlag) {
flags = BIND_FIRST | BIND_IMMEDIATE |
BIND_NONFATAL | BIND_VERBOSE;
printf("shl_load %s\n",pathname);
}
lib = shl_load(pathname, flags, 0);
/* XXX Chuck Blake once wrote that 0 should be BIND_NOSTART? */
if (lib == NULL) {
char buf[256];
PyObject *pathname_ob = NULL;
PyObject *buf_ob = NULL;
PyObject *shortname_ob = NULL;
if (Py_VerboseFlag)
perror(pathname);
PyOS_snprintf(buf, sizeof(buf), "Failed to load %.200s",
pathname);
buf_ob = PyUnicode_FromString(buf);
shortname_ob = PyUnicode_FromString(shortname);
pathname_ob = PyUnicode_FromString(pathname);
PyErr_SetImportError(buf_ob, shortname_ob, pathname_ob);
Py_DECREF(buf_ob);
Py_DECREF(shortname_ob);
Py_DECREF(pathname_ob);
return NULL;
}
PyOS_snprintf(funcname, sizeof(funcname), FUNCNAME_PATTERN,
prefix, shortname);
if (Py_VerboseFlag)
printf("shl_findsym %s\n", funcname);
if (shl_findsym(&lib, funcname, TYPE_UNDEFINED, (void *) &p) == -1) {
shl_unload(lib);
p = NULL;
}
if (p == NULL && Py_VerboseFlag)
perror(funcname);
return p;
}

111
third_party/python/Python/dynload_next.c vendored Normal file
View file

@ -0,0 +1,111 @@
/* Support for dynamic loading of extension modules on Mac OS X
** All references to "NeXT" are for historical reasons.
*/
#include "Python.h"
#include "importdl.h"
#include <mach-o/dyld.h>
const char *_PyImport_DynLoadFiletab[] = {".so", NULL};
/*
** Python modules are Mach-O MH_BUNDLE files. The best way to load these
** is each in a private namespace, so you can load, say, a module bar and a
** module foo.bar. If we load everything in the global namespace the two
** initbar() symbols will conflict.
** However, it seems some extension packages depend upon being able to access
** each others' global symbols. There seems to be no way to eat our cake and
** have it, so the USE_DYLD_GLOBAL_NAMESPACE define determines which behaviour
** you get.
*/
#ifdef USE_DYLD_GLOBAL_NAMESPACE
#define LINKOPTIONS NSLINKMODULE_OPTION_BINDNOW|NSLINKMODULE_OPTION_RETURN_ON_ERROR
#else
#define LINKOPTIONS NSLINKMODULE_OPTION_BINDNOW| \
NSLINKMODULE_OPTION_RETURN_ON_ERROR|NSLINKMODULE_OPTION_PRIVATE
#endif
dl_funcptr _PyImport_FindSharedFuncptr(const char *prefix,
const char *shortname,
const char *pathname, FILE *fp)
{
dl_funcptr p = NULL;
char funcname[258];
NSObjectFileImageReturnCode rc;
NSObjectFileImage image;
NSModule newModule;
NSSymbol theSym;
const char *errString;
char errBuf[512];
PyOS_snprintf(funcname, sizeof(funcname), "_%.20s_%.200s", prefix, shortname);
#ifdef USE_DYLD_GLOBAL_NAMESPACE
if (NSIsSymbolNameDefined(funcname)) {
theSym = NSLookupAndBindSymbol(funcname);
p = (dl_funcptr)NSAddressOfSymbol(theSym);
return p;
}
#endif
rc = NSCreateObjectFileImageFromFile(pathname, &image);
switch(rc) {
default:
case NSObjectFileImageFailure:
case NSObjectFileImageFormat:
/* for these a message is printed on stderr by dyld */
errString = "Can't create object file image";
break;
case NSObjectFileImageSuccess:
errString = NULL;
break;
case NSObjectFileImageInappropriateFile:
errString = "Inappropriate file type for dynamic loading";
break;
case NSObjectFileImageArch:
errString = "Wrong CPU type in object file";
break;
case NSObjectFileImageAccess:
errString = "Can't read object file (no access)";
break;
}
if (errString == NULL) {
newModule = NSLinkModule(image, pathname, LINKOPTIONS);
if (newModule == NULL) {
int errNo;
const char *fileName, *moreErrorStr;
NSLinkEditErrors c;
NSLinkEditError( &c, &errNo, &fileName, &moreErrorStr );
PyOS_snprintf(errBuf, 512, "Failure linking new module: %s: %s",
fileName, moreErrorStr);
errString = errBuf;
}
}
if (errString != NULL) {
PyErr_SetString(PyExc_ImportError, errString);
return NULL;
}
#ifdef USE_DYLD_GLOBAL_NAMESPACE
if (!NSIsSymbolNameDefined(funcname)) {
/* UnlinkModule() isn't implemented in current versions, but calling it does no harm */
/* NSUnLinkModule(newModule, FALSE); removed: causes problems for ObjC code */
PyErr_Format(PyExc_ImportError,
"Loaded module does not contain symbol %.200s",
funcname);
return NULL;
}
theSym = NSLookupAndBindSymbol(funcname);
#else
theSym = NSLookupSymbolInModule(newModule, funcname);
if ( theSym == NULL ) {
/* NSUnLinkModule(newModule, FALSE); removed: causes problems for ObjC code */
PyErr_Format(PyExc_ImportError,
"Loaded module does not contain symbol %.200s",
funcname);
return NULL;
}
#endif
p = (dl_funcptr)NSAddressOfSymbol(theSym);
return p;
}

View file

@ -0,0 +1,128 @@
/* Support for dynamic loading of extension modules */
#include "Python.h"
#include "importdl.h"
#include <sys/types.h>
#include <sys/stat.h>
#if defined(__NetBSD__)
#include <sys/param.h>
#if (NetBSD < 199712)
#include <nlist.h>
#include <link.h>
#define dlerror() "error in dynamic linking"
#endif
#endif /* NetBSD */
#ifdef HAVE_DLFCN_H
#include <dlfcn.h>
#endif
#if (defined(__OpenBSD__) || defined(__NetBSD__)) && !defined(__ELF__)
#define LEAD_UNDERSCORE "_"
#else
#define LEAD_UNDERSCORE ""
#endif
/* The .so extension module ABI tag, supplied by the Makefile via
Makefile.pre.in and configure. This is used to discriminate between
incompatible .so files so that extensions for different Python builds can
live in the same directory. E.g. foomodule.cpython-32.so
*/
const char *_PyImport_DynLoadFiletab[] = {
#ifdef __CYGWIN__
".dll",
#else /* !__CYGWIN__ */
"." SOABI ".so",
".abi" PYTHON_ABI_STRING ".so",
".so",
#endif /* __CYGWIN__ */
NULL,
};
static struct {
dev_t dev;
ino_t ino;
void *handle;
} handles[128];
static int nhandles = 0;
dl_funcptr
_PyImport_FindSharedFuncptr(const char *prefix,
const char *shortname,
const char *pathname, FILE *fp)
{
dl_funcptr p;
void *handle;
char funcname[258];
char pathbuf[260];
int dlopenflags=0;
if (strchr(pathname, '/') == NULL) {
/* Prefix bare filename with "./" */
PyOS_snprintf(pathbuf, sizeof(pathbuf), "./%-.255s", pathname);
pathname = pathbuf;
}
PyOS_snprintf(funcname, sizeof(funcname),
LEAD_UNDERSCORE "%.20s_%.200s", prefix, shortname);
if (fp != NULL) {
int i;
struct _Py_stat_struct status;
if (_Py_fstat(fileno(fp), &status) == -1)
return NULL;
for (i = 0; i < nhandles; i++) {
if (status.st_dev == handles[i].dev &&
status.st_ino == handles[i].ino) {
p = (dl_funcptr) dlsym(handles[i].handle,
funcname);
return p;
}
}
if (nhandles < 128) {
handles[nhandles].dev = status.st_dev;
handles[nhandles].ino = status.st_ino;
}
}
dlopenflags = PyThreadState_GET()->interp->dlopenflags;
handle = dlopen(pathname, dlopenflags);
if (handle == NULL) {
PyObject *mod_name;
PyObject *path;
PyObject *error_ob;
const char *error = dlerror();
if (error == NULL)
error = "unknown dlopen() error";
error_ob = PyUnicode_FromString(error);
if (error_ob == NULL)
return NULL;
mod_name = PyUnicode_FromString(shortname);
if (mod_name == NULL) {
Py_DECREF(error_ob);
return NULL;
}
path = PyUnicode_FromString(pathname);
if (path == NULL) {
Py_DECREF(error_ob);
Py_DECREF(mod_name);
return NULL;
}
PyErr_SetImportError(error_ob, mod_name, path);
Py_DECREF(error_ob);
Py_DECREF(mod_name);
Py_DECREF(path);
return NULL;
}
if (fp != NULL && nhandles < 128)
handles[nhandles++].handle = handle;
p = (dl_funcptr) dlsym(handle, funcname);
return p;
}

View file

@ -0,0 +1,9 @@
/* This module provides the necessary stubs for when dynamic loading is
not present. */
#include "Python.h"
#include "importdl.h"
const char *_PyImport_DynLoadFiletab[] = {NULL};

307
third_party/python/Python/dynload_win.c vendored Normal file
View file

@ -0,0 +1,307 @@
/* Support for dynamic loading of extension modules */
#include "Python.h"
#ifdef HAVE_DIRECT_H
#include <direct.h>
#endif
#include <ctype.h>
#include "importdl.h"
#include "patchlevel.h"
#include <windows.h>
// "activation context" magic - see dl_nt.c...
#if HAVE_SXS
extern ULONG_PTR _Py_ActivateActCtx();
void _Py_DeactivateActCtx(ULONG_PTR cookie);
#endif
#ifdef _DEBUG
#define PYD_DEBUG_SUFFIX "_d"
#else
#define PYD_DEBUG_SUFFIX ""
#endif
#ifdef PYD_PLATFORM_TAG
#define PYD_TAGGED_SUFFIX PYD_DEBUG_SUFFIX ".cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) "-" PYD_PLATFORM_TAG ".pyd"
#else
#define PYD_TAGGED_SUFFIX PYD_DEBUG_SUFFIX ".cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) ".pyd"
#endif
#define PYD_UNTAGGED_SUFFIX PYD_DEBUG_SUFFIX ".pyd"
const char *_PyImport_DynLoadFiletab[] = {
PYD_TAGGED_SUFFIX,
PYD_UNTAGGED_SUFFIX,
NULL
};
/* Case insensitive string compare, to avoid any dependencies on particular
C RTL implementations */
static int strcasecmp (const char *string1, const char *string2)
{
int first, second;
do {
first = tolower(*string1);
second = tolower(*string2);
string1++;
string2++;
} while (first && first == second);
return (first - second);
}
/* Function to return the name of the "python" DLL that the supplied module
directly imports. Looks through the list of imported modules and
returns the first entry that starts with "python" (case sensitive) and
is followed by nothing but numbers until the separator (period).
Returns a pointer to the import name, or NULL if no matching name was
located.
This function parses through the PE header for the module as loaded in
memory by the system loader. The PE header is accessed as documented by
Microsoft in the MSDN PE and COFF specification (2/99), and handles
both PE32 and PE32+. It only worries about the direct import table and
not the delay load import table since it's unlikely an extension is
going to be delay loading Python (after all, it's already loaded).
If any magic values are not found (e.g., the PE header or optional
header magic), then this function simply returns NULL. */
#define DWORD_AT(mem) (*(DWORD *)(mem))
#define WORD_AT(mem) (*(WORD *)(mem))
static char *GetPythonImport (HINSTANCE hModule)
{
unsigned char *dllbase, *import_data, *import_name;
DWORD pe_offset, opt_offset;
WORD opt_magic;
int num_dict_off, import_off;
/* Safety check input */
if (hModule == NULL) {
return NULL;
}
/* Module instance is also the base load address. First portion of
memory is the MS-DOS loader, which holds the offset to the PE
header (from the load base) at 0x3C */
dllbase = (unsigned char *)hModule;
pe_offset = DWORD_AT(dllbase + 0x3C);
/* The PE signature must be "PE\0\0" */
if (memcmp(dllbase+pe_offset,"PE\0\0",4)) {
return NULL;
}
/* Following the PE signature is the standard COFF header (20
bytes) and then the optional header. The optional header starts
with a magic value of 0x10B for PE32 or 0x20B for PE32+ (PE32+
uses 64-bits for some fields). It might also be 0x107 for a ROM
image, but we don't process that here.
The optional header ends with a data dictionary that directly
points to certain types of data, among them the import entries
(in the second table entry). Based on the header type, we
determine offsets for the data dictionary count and the entry
within the dictionary pointing to the imports. */
opt_offset = pe_offset + 4 + 20;
opt_magic = WORD_AT(dllbase+opt_offset);
if (opt_magic == 0x10B) {
/* PE32 */
num_dict_off = 92;
import_off = 104;
} else if (opt_magic == 0x20B) {
/* PE32+ */
num_dict_off = 108;
import_off = 120;
} else {
/* Unsupported */
return NULL;
}
/* Now if an import table exists, offset to it and walk the list of
imports. The import table is an array (ending when an entry has
empty values) of structures (20 bytes each), which contains (at
offset 12) a relative address (to the module base) at which a
string constant holding the import name is located. */
if (DWORD_AT(dllbase + opt_offset + num_dict_off) >= 2) {
/* We have at least 2 tables - the import table is the second
one. But still it may be that the table size is zero */
if (0 == DWORD_AT(dllbase + opt_offset + import_off + sizeof(DWORD)))
return NULL;
import_data = dllbase + DWORD_AT(dllbase +
opt_offset +
import_off);
while (DWORD_AT(import_data)) {
import_name = dllbase + DWORD_AT(import_data+12);
if (strlen(import_name) >= 6 &&
!strncmp(import_name,"python",6)) {
char *pch;
#ifndef _DEBUG
/* In a release version, don't claim that python3.dll is
a Python DLL. */
if (strcmp(import_name, "python3.dll") == 0) {
import_data += 20;
continue;
}
#endif
/* Ensure python prefix is followed only
by numbers to the end of the basename */
pch = import_name + 6;
#ifdef _DEBUG
while (*pch && pch[0] != '_' && pch[1] != 'd' && pch[2] != '.') {
#else
while (*pch && *pch != '.') {
#endif
if (*pch >= '0' && *pch <= '9') {
pch++;
} else {
pch = NULL;
break;
}
}
if (pch) {
/* Found it - return the name */
return import_name;
}
}
import_data += 20;
}
}
return NULL;
}
dl_funcptr _PyImport_FindSharedFuncptrWindows(const char *prefix,
const char *shortname,
PyObject *pathname, FILE *fp)
{
dl_funcptr p;
char funcname[258], *import_python;
const wchar_t *wpathname;
_Py_CheckPython3();
wpathname = _PyUnicode_AsUnicode(pathname);
if (wpathname == NULL)
return NULL;
PyOS_snprintf(funcname, sizeof(funcname), "%.20s_%.200s", prefix, shortname);
{
HINSTANCE hDLL = NULL;
unsigned int old_mode;
#if HAVE_SXS
ULONG_PTR cookie = 0;
#endif
/* Don't display a message box when Python can't load a DLL */
old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
#if HAVE_SXS
cookie = _Py_ActivateActCtx();
#endif
/* We use LoadLibraryEx so Windows looks for dependent DLLs
in directory of pathname first. */
/* XXX This call doesn't exist in Windows CE */
hDLL = LoadLibraryExW(wpathname, NULL,
LOAD_WITH_ALTERED_SEARCH_PATH);
#if HAVE_SXS
_Py_DeactivateActCtx(cookie);
#endif
/* restore old error mode settings */
SetErrorMode(old_mode);
if (hDLL==NULL){
PyObject *message;
unsigned int errorCode;
/* Get an error string from Win32 error code */
wchar_t theInfo[256]; /* Pointer to error text
from system */
int theLength; /* Length of error text */
errorCode = GetLastError();
theLength = FormatMessageW(
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS, /* flags */
NULL, /* message source */
errorCode, /* the message (error) ID */
MAKELANGID(LANG_NEUTRAL,
SUBLANG_DEFAULT),
/* Default language */
theInfo, /* the buffer */
sizeof(theInfo) / sizeof(wchar_t), /* size in wchars */
NULL); /* no additional format args. */
/* Problem: could not get the error message.
This should not happen if called correctly. */
if (theLength == 0) {
message = PyUnicode_FromFormat(
"DLL load failed with error code %d",
errorCode);
} else {
/* For some reason a \r\n
is appended to the text */
if (theLength >= 2 &&
theInfo[theLength-2] == '\r' &&
theInfo[theLength-1] == '\n') {
theLength -= 2;
theInfo[theLength] = '\0';
}
message = PyUnicode_FromString(
"DLL load failed: ");
PyUnicode_AppendAndDel(&message,
PyUnicode_FromWideChar(
theInfo,
theLength));
}
if (message != NULL) {
PyObject *shortname_obj = PyUnicode_FromString(shortname);
PyErr_SetImportError(message, shortname_obj, pathname);
Py_XDECREF(shortname_obj);
Py_DECREF(message);
}
return NULL;
} else {
char buffer[256];
PyOS_snprintf(buffer, sizeof(buffer),
#ifdef _DEBUG
"python%d%d_d.dll",
#else
"python%d%d.dll",
#endif
PY_MAJOR_VERSION,PY_MINOR_VERSION);
import_python = GetPythonImport(hDLL);
if (import_python &&
strcasecmp(buffer,import_python)) {
PyErr_Format(PyExc_ImportError,
"Module use of %.150s conflicts "
"with this version of Python.",
import_python);
FreeLibrary(hDLL);
return NULL;
}
}
p = GetProcAddress(hDLL, funcname);
}
return p;
}

1197
third_party/python/Python/errors.c vendored Normal file

File diff suppressed because it is too large Load diff

1739
third_party/python/Python/fileutils.c vendored Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

46
third_party/python/Python/frozen.c vendored Normal file
View file

@ -0,0 +1,46 @@
/* Dummy frozen modules initializer */
#include "Python.h"
#include "importlib.h"
#include "importlib_external.h"
/* In order to test the support for frozen modules, by default we
define a single frozen module, __hello__. Loading it will print
some famous words... */
/* To regenerate this data after the bytecode or marshal format has changed,
go to ../Tools/freeze/ and freeze the flag.py file; then copy and paste
the appropriate bytes from M___main__.c. */
static unsigned char M___hello__[] = {
227,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,
0,64,0,0,0,115,16,0,0,0,100,0,90,0,101,1,
100,1,131,1,1,0,100,2,83,0,41,3,84,122,12,72,
101,108,108,111,32,119,111,114,108,100,33,78,41,2,218,11,
105,110,105,116,105,97,108,105,122,101,100,218,5,112,114,105,
110,116,169,0,114,3,0,0,0,114,3,0,0,0,250,22,
46,47,84,111,111,108,115,47,102,114,101,101,122,101,47,102,
108,97,103,46,112,121,218,8,60,109,111,100,117,108,101,62,
1,0,0,0,115,2,0,0,0,4,1,
};
#define SIZE (int)sizeof(M___hello__)
static const struct _frozen _PyImport_FrozenModules[] = {
/* importlib */
{"_frozen_importlib", _Py_M__importlib, (int)sizeof(_Py_M__importlib)},
{"_frozen_importlib_external", _Py_M__importlib_external,
(int)sizeof(_Py_M__importlib_external)},
/* Test module */
{"__hello__", M___hello__, SIZE},
/* Test package (negative size indicates package-ness) */
{"__phello__", M___hello__, -SIZE},
{"__phello__.spam", M___hello__, SIZE},
{0, 0, 0} /* sentinel */
};
/* Embedding apps may change this pointer to point to their favorite
collection of frozen modules: */
const struct _frozen *PyImport_FrozenModules = _PyImport_FrozenModules;

115
third_party/python/Python/frozenmain.c vendored Normal file
View file

@ -0,0 +1,115 @@
/* Python interpreter main program for frozen scripts */
#include "Python.h"
#include <locale.h>
#ifdef MS_WINDOWS
extern void PyWinFreeze_ExeInit(void);
extern void PyWinFreeze_ExeTerm(void);
extern int PyInitFrozenExtensions(void);
#endif
/* Main program */
int
Py_FrozenMain(int argc, char **argv)
{
char *p;
int i, n, sts = 1;
int inspect = 0;
int unbuffered = 0;
char *oldloc = NULL;
wchar_t **argv_copy = NULL;
/* We need a second copies, as Python might modify the first one. */
wchar_t **argv_copy2 = NULL;
if (argc > 0) {
argv_copy = PyMem_RawMalloc(sizeof(wchar_t*) * argc);
argv_copy2 = PyMem_RawMalloc(sizeof(wchar_t*) * argc);
if (!argv_copy || !argv_copy2) {
fprintf(stderr, "out of memory\n");
goto error;
}
}
Py_FrozenFlag = 1; /* Suppress errors from getpath.c */
if ((p = Py_GETENV("PYTHONINSPECT")) && *p != '\0')
inspect = 1;
if ((p = Py_GETENV("PYTHONUNBUFFERED")) && *p != '\0')
unbuffered = 1;
if (unbuffered) {
setbuf(stdin, (char *)NULL);
setbuf(stdout, (char *)NULL);
setbuf(stderr, (char *)NULL);
}
oldloc = _PyMem_RawStrdup(setlocale(LC_ALL, NULL));
if (!oldloc) {
fprintf(stderr, "out of memory\n");
goto error;
}
setlocale(LC_ALL, "");
for (i = 0; i < argc; i++) {
argv_copy[i] = Py_DecodeLocale(argv[i], NULL);
argv_copy2[i] = argv_copy[i];
if (!argv_copy[i]) {
fprintf(stderr, "Unable to decode the command line argument #%i\n",
i + 1);
argc = i;
goto error;
}
}
setlocale(LC_ALL, oldloc);
PyMem_RawFree(oldloc);
oldloc = NULL;
#ifdef MS_WINDOWS
PyInitFrozenExtensions();
#endif /* MS_WINDOWS */
if (argc >= 1)
Py_SetProgramName(argv_copy[0]);
Py_Initialize();
#ifdef MS_WINDOWS
PyWinFreeze_ExeInit();
#endif
if (Py_VerboseFlag)
fprintf(stderr, "Python %s\n%s\n",
Py_GetVersion(), Py_GetCopyright());
PySys_SetArgv(argc, argv_copy);
n = PyImport_ImportFrozenModule("__main__");
if (n == 0)
Py_FatalError("__main__ not frozen");
if (n < 0) {
PyErr_Print();
sts = 1;
}
else
sts = 0;
if (inspect && isatty((int)fileno(stdin)))
sts = PyRun_AnyFile(stdin, "<stdin>") != 0;
#ifdef MS_WINDOWS
PyWinFreeze_ExeTerm();
#endif
if (Py_FinalizeEx() < 0) {
sts = 120;
}
error:
PyMem_RawFree(argv_copy);
if (argv_copy2) {
for (i = 0; i < argc; i++)
PyMem_RawFree(argv_copy2[i]);
PyMem_RawFree(argv_copy2);
}
PyMem_RawFree(oldloc);
return sts;
}

161
third_party/python/Python/future.c vendored Normal file
View file

@ -0,0 +1,161 @@
#include "Python.h"
#include "Python-ast.h"
#include "node.h"
#include "token.h"
#include "graminit.h"
#include "code.h"
#include "symtable.h"
#define UNDEFINED_FUTURE_FEATURE "future feature %.100s is not defined"
#define ERR_LATE_FUTURE \
"from __future__ imports must occur at the beginning of the file"
static int
future_check_features(PyFutureFeatures *ff, stmt_ty s, PyObject *filename)
{
int i;
asdl_seq *names;
assert(s->kind == ImportFrom_kind);
names = s->v.ImportFrom.names;
for (i = 0; i < asdl_seq_LEN(names); i++) {
alias_ty name = (alias_ty)asdl_seq_GET(names, i);
const char *feature = PyUnicode_AsUTF8(name->name);
if (!feature)
return 0;
if (strcmp(feature, FUTURE_NESTED_SCOPES) == 0) {
continue;
} else if (strcmp(feature, FUTURE_GENERATORS) == 0) {
continue;
} else if (strcmp(feature, FUTURE_DIVISION) == 0) {
continue;
} else if (strcmp(feature, FUTURE_ABSOLUTE_IMPORT) == 0) {
continue;
} else if (strcmp(feature, FUTURE_WITH_STATEMENT) == 0) {
continue;
} else if (strcmp(feature, FUTURE_PRINT_FUNCTION) == 0) {
continue;
} else if (strcmp(feature, FUTURE_UNICODE_LITERALS) == 0) {
continue;
} else if (strcmp(feature, FUTURE_BARRY_AS_BDFL) == 0) {
ff->ff_features |= CO_FUTURE_BARRY_AS_BDFL;
} else if (strcmp(feature, FUTURE_GENERATOR_STOP) == 0) {
ff->ff_features |= CO_FUTURE_GENERATOR_STOP;
} else if (strcmp(feature, "braces") == 0) {
PyErr_SetString(PyExc_SyntaxError,
"not a chance");
PyErr_SyntaxLocationObject(filename, s->lineno, s->col_offset);
return 0;
} else {
PyErr_Format(PyExc_SyntaxError,
UNDEFINED_FUTURE_FEATURE, feature);
PyErr_SyntaxLocationObject(filename, s->lineno, s->col_offset);
return 0;
}
}
return 1;
}
static int
future_parse(PyFutureFeatures *ff, mod_ty mod, PyObject *filename)
{
int i, done = 0, prev_line = 0;
stmt_ty first;
if (!(mod->kind == Module_kind || mod->kind == Interactive_kind))
return 1;
if (asdl_seq_LEN(mod->v.Module.body) == 0)
return 1;
/* A subsequent pass will detect future imports that don't
appear at the beginning of the file. There's one case,
however, that is easier to handle here: A series of imports
joined by semi-colons, where the first import is a future
statement but some subsequent import has the future form
but is preceded by a regular import.
*/
i = 0;
first = (stmt_ty)asdl_seq_GET(mod->v.Module.body, i);
if (first->kind == Expr_kind
&& (first->v.Expr.value->kind == Str_kind
|| (first->v.Expr.value->kind == Constant_kind
&& PyUnicode_CheckExact(first->v.Expr.value->v.Constant.value))))
i++;
for (; i < asdl_seq_LEN(mod->v.Module.body); i++) {
stmt_ty s = (stmt_ty)asdl_seq_GET(mod->v.Module.body, i);
if (done && s->lineno > prev_line)
return 1;
prev_line = s->lineno;
/* The tests below will return from this function unless it is
still possible to find a future statement. The only things
that can precede a future statement are another future
statement and a doc string.
*/
if (s->kind == ImportFrom_kind) {
identifier modname = s->v.ImportFrom.module;
if (modname &&
_PyUnicode_EqualToASCIIString(modname, "__future__")) {
if (done) {
PyErr_SetString(PyExc_SyntaxError,
ERR_LATE_FUTURE);
PyErr_SyntaxLocationObject(filename, s->lineno, s->col_offset);
return 0;
}
if (!future_check_features(ff, s, filename))
return 0;
ff->ff_lineno = s->lineno;
}
else {
done = 1;
}
}
else {
done = 1;
}
}
return 1;
}
PyFutureFeatures *
PyFuture_FromASTObject(mod_ty mod, PyObject *filename)
{
PyFutureFeatures *ff;
ff = (PyFutureFeatures *)PyObject_Malloc(sizeof(PyFutureFeatures));
if (ff == NULL) {
PyErr_NoMemory();
return NULL;
}
ff->ff_features = 0;
ff->ff_lineno = -1;
if (!future_parse(ff, mod, filename)) {
PyObject_Free(ff);
return NULL;
}
return ff;
}
PyFutureFeatures *
PyFuture_FromAST(mod_ty mod, const char *filename_str)
{
PyFutureFeatures *ff;
PyObject *filename;
filename = PyUnicode_DecodeFSDefault(filename_str);
if (filename == NULL)
return NULL;
ff = PyFuture_FromASTObject(mod, filename);
Py_DECREF(filename);
return ff;
}

2463
third_party/python/Python/getargs.c vendored Normal file

File diff suppressed because it is too large Load diff

28
third_party/python/Python/getcompiler.c vendored Normal file
View file

@ -0,0 +1,28 @@
/* Return the compiler identification, if possible. */
#include "Python.h"
#ifndef COMPILER
#ifdef __GNUC__
#define COMPILER "\n[GCC " __VERSION__ "]"
#endif
#endif /* !COMPILER */
#ifndef COMPILER
#ifdef __cplusplus
#define COMPILER "[C++]"
#else
#define COMPILER "[C]"
#endif
#endif /* !COMPILER */
const char *
Py_GetCompiler(void)
{
return COMPILER;
}

View file

@ -0,0 +1,23 @@
/* Return the copyright string. This is updated manually. */
#include "Python.h"
static const char cprt[] =
"\
Copyright (c) 2001-2021 Python Software Foundation.\n\
All Rights Reserved.\n\
\n\
Copyright (c) 2000 BeOpen.com.\n\
All Rights Reserved.\n\
\n\
Copyright (c) 1995-2001 Corporation for National Research Initiatives.\n\
All Rights Reserved.\n\
\n\
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\n\
All Rights Reserved.";
const char *
Py_GetCopyright(void)
{
return cprt;
}

132
third_party/python/Python/getopt.c vendored Normal file
View file

@ -0,0 +1,132 @@
/*---------------------------------------------------------------------------*
* <RCS keywords>
*
* C++ Library
*
* Copyright 1992-1994, David Gottner
*
* All Rights Reserved
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose and without fee is hereby granted,
* provided that the above copyright notice, this permission notice and
* the following disclaimer notice appear unmodified in all copies.
*
* I DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL I
* BE LIABLE FOR ANY SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA, OR PROFITS, WHETHER
* IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Nevertheless, I would like to know about bugs in this library or
* suggestions for improvment. Send bug reports and feedback to
* davegottner@delphi.com.
*---------------------------------------------------------------------------*/
/* Modified to support --help and --version, as well as /? on Windows
* by Georg Brandl. */
#include <Python.h>
#include <stdio.h>
#include <string.h>
#include <wchar.h>
#include <pygetopt.h>
#ifdef __cplusplus
extern "C" {
#endif
int _PyOS_opterr = 1; /* generate error messages */
int _PyOS_optind = 1; /* index into argv array */
wchar_t *_PyOS_optarg = NULL; /* optional argument */
static wchar_t *opt_ptr = L"";
void _PyOS_ResetGetOpt(void)
{
_PyOS_opterr = 1;
_PyOS_optind = 1;
_PyOS_optarg = NULL;
opt_ptr = L"";
}
int _PyOS_GetOpt(int argc, wchar_t **argv, wchar_t *optstring)
{
wchar_t *ptr;
wchar_t option;
if (*opt_ptr == '\0') {
if (_PyOS_optind >= argc)
return -1;
#ifdef MS_WINDOWS
else if (wcscmp(argv[_PyOS_optind], L"/?") == 0) {
++_PyOS_optind;
return 'h';
}
#endif
else if (argv[_PyOS_optind][0] != L'-' ||
argv[_PyOS_optind][1] == L'\0' /* lone dash */ )
return -1;
else if (wcscmp(argv[_PyOS_optind], L"--") == 0) {
++_PyOS_optind;
return -1;
}
else if (wcscmp(argv[_PyOS_optind], L"--help") == 0) {
++_PyOS_optind;
return 'h';
}
else if (wcscmp(argv[_PyOS_optind], L"--version") == 0) {
++_PyOS_optind;
return 'V';
}
opt_ptr = &argv[_PyOS_optind++][1];
}
if ((option = *opt_ptr++) == L'\0')
return -1;
if (option == 'J') {
if (_PyOS_opterr)
fprintf(stderr, "-J is reserved for Jython\n");
return '_';
}
if ((ptr = wcschr(optstring, option)) == NULL) {
if (_PyOS_opterr)
fprintf(stderr, "Unknown option: -%c\n", (char)option);
return '_';
}
if (*(ptr + 1) == L':') {
if (*opt_ptr != L'\0') {
_PyOS_optarg = opt_ptr;
opt_ptr = L"";
}
else {
if (_PyOS_optind >= argc) {
if (_PyOS_opterr)
fprintf(stderr,
"Argument expected for the -%c option\n", (char)option);
return '_';
}
_PyOS_optarg = argv[_PyOS_optind++];
}
}
return option;
}
#ifdef __cplusplus
}
#endif

12
third_party/python/Python/getplatform.c vendored Normal file
View file

@ -0,0 +1,12 @@
#include "Python.h"
#ifndef PLATFORM
#define PLATFORM "unknown"
#endif
const char *
Py_GetPlatform(void)
{
return PLATFORM;
}

15
third_party/python/Python/getversion.c vendored Normal file
View file

@ -0,0 +1,15 @@
/* Return the full version string. */
#include "Python.h"
#include "patchlevel.h"
const char *
Py_GetVersion(void)
{
static char version[250];
PyOS_snprintf(version, sizeof(version), "%.80s (%.80s) %.80s",
PY_VERSION, Py_GetBuildInfo(), Py_GetCompiler());
return version;
}

2263
third_party/python/Python/graminit.c vendored Normal file

File diff suppressed because it is too large Load diff

2151
third_party/python/Python/import.c vendored Normal file

File diff suppressed because it is too large Load diff

240
third_party/python/Python/importdl.c vendored Normal file
View file

@ -0,0 +1,240 @@
/* Support for dynamic loading of extension modules */
#include "Python.h"
/* ./configure sets HAVE_DYNAMIC_LOADING if dynamic loading of modules is
supported on this platform. configure will then compile and link in one
of the dynload_*.c files, as appropriate. We will call a function in
those modules to get a function pointer to the module's init function.
*/
#ifdef HAVE_DYNAMIC_LOADING
#include "importdl.h"
#ifdef MS_WINDOWS
extern dl_funcptr _PyImport_FindSharedFuncptrWindows(const char *prefix,
const char *shortname,
PyObject *pathname,
FILE *fp);
#else
extern dl_funcptr _PyImport_FindSharedFuncptr(const char *prefix,
const char *shortname,
const char *pathname, FILE *fp);
#endif
static const char * const ascii_only_prefix = "PyInit";
static const char * const nonascii_prefix = "PyInitU";
/* Get the variable part of a module's export symbol name.
* Returns a bytes instance. For non-ASCII-named modules, the name is
* encoded as per PEP 489.
* The hook_prefix pointer is set to either ascii_only_prefix or
* nonascii_prefix, as appropriate.
*/
static PyObject *
get_encoded_name(PyObject *name, const char **hook_prefix) {
PyObject *tmp;
PyObject *encoded = NULL;
PyObject *modname = NULL;
Py_ssize_t name_len, lastdot;
_Py_IDENTIFIER(replace);
/* Get the short name (substring after last dot) */
name_len = PyUnicode_GetLength(name);
lastdot = PyUnicode_FindChar(name, '.', 0, name_len, -1);
if (lastdot < -1) {
return NULL;
} else if (lastdot >= 0) {
tmp = PyUnicode_Substring(name, lastdot + 1, name_len);
if (tmp == NULL)
return NULL;
name = tmp;
/* "name" now holds a new reference to the substring */
} else {
Py_INCREF(name);
}
/* Encode to ASCII or Punycode, as needed */
encoded = PyUnicode_AsEncodedString(name, "ascii", NULL);
if (encoded != NULL) {
*hook_prefix = ascii_only_prefix;
} else {
if (PyErr_ExceptionMatches(PyExc_UnicodeEncodeError)) {
PyErr_Clear();
encoded = PyUnicode_AsEncodedString(name, "punycode", NULL);
if (encoded == NULL) {
goto error;
}
*hook_prefix = nonascii_prefix;
} else {
goto error;
}
}
/* Replace '-' by '_' */
modname = _PyObject_CallMethodId(encoded, &PyId_replace, "cc", '-', '_');
if (modname == NULL)
goto error;
Py_DECREF(name);
Py_DECREF(encoded);
return modname;
error:
Py_DECREF(name);
Py_XDECREF(encoded);
return NULL;
}
PyObject *
_PyImport_LoadDynamicModuleWithSpec(PyObject *spec, FILE *fp)
{
#ifndef MS_WINDOWS
PyObject *pathbytes = NULL;
#endif
PyObject *name_unicode = NULL, *name = NULL, *path = NULL, *m = NULL;
const char *name_buf, *hook_prefix;
char *oldcontext;
dl_funcptr exportfunc;
PyModuleDef *def;
PyObject *(*p0)(void);
name_unicode = PyObject_GetAttrString(spec, "name");
if (name_unicode == NULL) {
return NULL;
}
if (!PyUnicode_Check(name_unicode)) {
PyErr_SetString(PyExc_TypeError,
"spec.name must be a string");
goto error;
}
name = get_encoded_name(name_unicode, &hook_prefix);
if (name == NULL) {
goto error;
}
name_buf = PyBytes_AS_STRING(name);
path = PyObject_GetAttrString(spec, "origin");
if (path == NULL)
goto error;
#ifdef MS_WINDOWS
exportfunc = _PyImport_FindSharedFuncptrWindows(hook_prefix, name_buf,
path, fp);
#else
pathbytes = PyUnicode_EncodeFSDefault(path);
if (pathbytes == NULL)
goto error;
exportfunc = _PyImport_FindSharedFuncptr(hook_prefix, name_buf,
PyBytes_AS_STRING(pathbytes),
fp);
Py_DECREF(pathbytes);
#endif
if (exportfunc == NULL) {
if (!PyErr_Occurred()) {
PyObject *msg;
msg = PyUnicode_FromFormat(
"dynamic module does not define "
"module export function (%s_%s)",
hook_prefix, name_buf);
if (msg == NULL)
goto error;
PyErr_SetImportError(msg, name_unicode, path);
Py_DECREF(msg);
}
goto error;
}
p0 = (PyObject *(*)(void))exportfunc;
/* Package context is needed for single-phase init */
oldcontext = _Py_PackageContext;
_Py_PackageContext = PyUnicode_AsUTF8(name_unicode);
if (_Py_PackageContext == NULL) {
_Py_PackageContext = oldcontext;
goto error;
}
m = p0();
_Py_PackageContext = oldcontext;
if (m == NULL) {
if (!PyErr_Occurred()) {
PyErr_Format(
PyExc_SystemError,
"initialization of %s failed without raising an exception",
name_buf);
}
goto error;
} else if (PyErr_Occurred()) {
PyErr_Clear();
PyErr_Format(
PyExc_SystemError,
"initialization of %s raised unreported exception",
name_buf);
m = NULL;
goto error;
}
if (Py_TYPE(m) == NULL) {
/* This can happen when a PyModuleDef is returned without calling
* PyModuleDef_Init on it
*/
PyErr_Format(PyExc_SystemError,
"init function of %s returned uninitialized object",
name_buf);
m = NULL; /* prevent segfault in DECREF */
goto error;
}
if (PyObject_TypeCheck(m, &PyModuleDef_Type)) {
Py_DECREF(name_unicode);
Py_DECREF(name);
Py_DECREF(path);
return PyModule_FromDefAndSpec((PyModuleDef*)m, spec);
}
/* Fall back to single-phase init mechanism */
if (hook_prefix == nonascii_prefix) {
/* don't allow legacy init for non-ASCII module names */
PyErr_Format(
PyExc_SystemError,
"initialization of * did not return PyModuleDef",
name_buf);
goto error;
}
/* Remember pointer to module init function. */
def = PyModule_GetDef(m);
if (def == NULL) {
PyErr_Format(PyExc_SystemError,
"initialization of %s did not return an extension "
"module", name_buf);
goto error;
}
def->m_base.m_init = p0;
/* Remember the filename as the __file__ attribute */
if (PyModule_AddObject(m, "__file__", path) < 0)
PyErr_Clear(); /* Not important enough to report */
else
Py_INCREF(path);
if (_PyImport_FixupExtensionObject(m, name_unicode, path) < 0)
goto error;
Py_DECREF(name_unicode);
Py_DECREF(name);
Py_DECREF(path);
return m;
error:
Py_DECREF(name_unicode);
Py_XDECREF(name);
Py_XDECREF(path);
Py_XDECREF(m);
return NULL;
}
#endif /* HAVE_DYNAMIC_LOADING */

27
third_party/python/Python/importdl.h vendored Normal file
View file

@ -0,0 +1,27 @@
#ifndef Py_IMPORTDL_H
#define Py_IMPORTDL_H
#ifdef __cplusplus
extern "C" {
#endif
extern const char *_PyImport_DynLoadFiletab[];
extern PyObject *_PyImport_LoadDynamicModuleWithSpec(PyObject *spec, FILE *);
/* Max length of module suffix searched for -- accommodates "module.slb" */
#define MAXSUFFIXSIZE 12
#ifdef MS_WINDOWS
#include <windows.h>
typedef FARPROC dl_funcptr;
#else
typedef void (*dl_funcptr)(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_IMPORTDL_H */

1822
third_party/python/Python/importlib.h generated vendored Normal file

File diff suppressed because it is too large Load diff

2433
third_party/python/Python/importlib_external.h generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,55 @@
#! /usr/bin/env python
"""Generate C code for the jump table of the threaded code interpreter
(for compilers supporting computed gotos or "labels-as-values", such as gcc).
"""
import os
import sys
try:
from importlib.machinery import SourceFileLoader
except ImportError:
import imp
def find_module(modname):
"""Finds and returns a module in the local dist/checkout.
"""
modpath = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "Lib")
return imp.load_module(modname, *imp.find_module(modname, [modpath]))
else:
def find_module(modname):
"""Finds and returns a module in the local dist/checkout.
"""
modpath = os.path.join(
os.path.dirname(os.path.dirname(__file__)), "Lib", modname + ".py")
return SourceFileLoader(modname, modpath).load_module()
def write_contents(f):
"""Write C code contents to the target file object.
"""
opcode = find_module('opcode')
targets = ['_unknown_opcode'] * 256
for opname, op in opcode.opmap.items():
targets[op] = "TARGET_%s" % opname
f.write("static void *opcode_targets[256] = {\n")
f.write(",\n".join([" &&%s" % s for s in targets]))
f.write("\n};\n")
def main():
if len(sys.argv) >= 3:
sys.exit("Too many arguments")
if len(sys.argv) == 2:
target = sys.argv[1]
else:
target = "Python/opcode_targets.h"
with open(target, "w") as f:
write_contents(f)
print("Jump table written into %s" % target)
if __name__ == "__main__":
main()

1861
third_party/python/Python/marshal.c vendored Normal file

File diff suppressed because it is too large Load diff

591
third_party/python/Python/modsupport.c vendored Normal file
View file

@ -0,0 +1,591 @@
/* Module support implementation */
#include "Python.h"
#define FLAG_SIZE_T 1
typedef double va_double;
static PyObject *va_build_value(const char *, va_list, int);
/* Package context -- the full module name for package imports */
char *_Py_PackageContext = NULL;
/* Helper for mkvalue() to scan the length of a format */
static int
countformat(const char *format, int endchar)
{
int count = 0;
int level = 0;
while (level > 0 || *format != endchar) {
switch (*format) {
case '\0':
/* Premature end */
PyErr_SetString(PyExc_SystemError,
"unmatched paren in format");
return -1;
case '(':
case '[':
case '{':
if (level == 0)
count++;
level++;
break;
case ')':
case ']':
case '}':
level--;
break;
case '#':
case '&':
case ',':
case ':':
case ' ':
case '\t':
break;
default:
if (level == 0)
count++;
}
format++;
}
return count;
}
/* Generic function to create a value -- the inverse of getargs() */
/* After an original idea and first implementation by Steven Miale */
static PyObject *do_mktuple(const char**, va_list *, int, int, int);
static PyObject *do_mklist(const char**, va_list *, int, int, int);
static PyObject *do_mkdict(const char**, va_list *, int, int, int);
static PyObject *do_mkvalue(const char**, va_list *, int);
static void
do_ignore(const char **p_format, va_list *p_va, int endchar, int n, int flags)
{
PyObject *v;
int i;
assert(PyErr_Occurred());
v = PyTuple_New(n);
for (i = 0; i < n; i++) {
PyObject *exception, *value, *tb, *w;
PyErr_Fetch(&exception, &value, &tb);
w = do_mkvalue(p_format, p_va, flags);
PyErr_Restore(exception, value, tb);
if (w != NULL) {
if (v != NULL) {
PyTuple_SET_ITEM(v, i, w);
}
else {
Py_DECREF(w);
}
}
}
Py_XDECREF(v);
if (**p_format != endchar) {
PyErr_SetString(PyExc_SystemError,
"Unmatched paren in format");
return;
}
if (endchar)
++*p_format;
}
static PyObject *
do_mkdict(const char **p_format, va_list *p_va, int endchar, int n, int flags)
{
PyObject *d;
int i;
if (n < 0)
return NULL;
if (n % 2) {
PyErr_SetString(PyExc_SystemError,
"Bad dict format");
do_ignore(p_format, p_va, endchar, n, flags);
return NULL;
}
/* Note that we can't bail immediately on error as this will leak
refcounts on any 'N' arguments. */
if ((d = PyDict_New()) == NULL) {
do_ignore(p_format, p_va, endchar, n, flags);
return NULL;
}
for (i = 0; i < n; i+= 2) {
PyObject *k, *v;
k = do_mkvalue(p_format, p_va, flags);
if (k == NULL) {
do_ignore(p_format, p_va, endchar, n - i - 1, flags);
Py_DECREF(d);
return NULL;
}
v = do_mkvalue(p_format, p_va, flags);
if (v == NULL || PyDict_SetItem(d, k, v) < 0) {
do_ignore(p_format, p_va, endchar, n - i - 2, flags);
Py_DECREF(k);
Py_XDECREF(v);
Py_DECREF(d);
return NULL;
}
Py_DECREF(k);
Py_DECREF(v);
}
if (**p_format != endchar) {
Py_DECREF(d);
PyErr_SetString(PyExc_SystemError,
"Unmatched paren in format");
return NULL;
}
if (endchar)
++*p_format;
return d;
}
static PyObject *
do_mklist(const char **p_format, va_list *p_va, int endchar, int n, int flags)
{
PyObject *v;
int i;
if (n < 0)
return NULL;
/* Note that we can't bail immediately on error as this will leak
refcounts on any 'N' arguments. */
v = PyList_New(n);
if (v == NULL) {
do_ignore(p_format, p_va, endchar, n, flags);
return NULL;
}
for (i = 0; i < n; i++) {
PyObject *w = do_mkvalue(p_format, p_va, flags);
if (w == NULL) {
do_ignore(p_format, p_va, endchar, n - i - 1, flags);
Py_DECREF(v);
return NULL;
}
PyList_SET_ITEM(v, i, w);
}
if (**p_format != endchar) {
Py_DECREF(v);
PyErr_SetString(PyExc_SystemError,
"Unmatched paren in format");
return NULL;
}
if (endchar)
++*p_format;
return v;
}
static PyObject *
do_mktuple(const char **p_format, va_list *p_va, int endchar, int n, int flags)
{
PyObject *v;
int i;
if (n < 0)
return NULL;
/* Note that we can't bail immediately on error as this will leak
refcounts on any 'N' arguments. */
if ((v = PyTuple_New(n)) == NULL) {
do_ignore(p_format, p_va, endchar, n, flags);
return NULL;
}
for (i = 0; i < n; i++) {
PyObject *w = do_mkvalue(p_format, p_va, flags);
if (w == NULL) {
do_ignore(p_format, p_va, endchar, n - i - 1, flags);
Py_DECREF(v);
return NULL;
}
PyTuple_SET_ITEM(v, i, w);
}
if (**p_format != endchar) {
Py_DECREF(v);
PyErr_SetString(PyExc_SystemError,
"Unmatched paren in format");
return NULL;
}
if (endchar)
++*p_format;
return v;
}
static PyObject *
do_mkvalue(const char **p_format, va_list *p_va, int flags)
{
for (;;) {
switch (*(*p_format)++) {
case '(':
return do_mktuple(p_format, p_va, ')',
countformat(*p_format, ')'), flags);
case '[':
return do_mklist(p_format, p_va, ']',
countformat(*p_format, ']'), flags);
case '{':
return do_mkdict(p_format, p_va, '}',
countformat(*p_format, '}'), flags);
case 'b':
case 'B':
case 'h':
case 'i':
return PyLong_FromLong((long)va_arg(*p_va, int));
case 'H':
return PyLong_FromLong((long)va_arg(*p_va, unsigned int));
case 'I':
{
unsigned int n;
n = va_arg(*p_va, unsigned int);
return PyLong_FromUnsignedLong(n);
}
case 'n':
#if SIZEOF_SIZE_T!=SIZEOF_LONG
return PyLong_FromSsize_t(va_arg(*p_va, Py_ssize_t));
#endif
/* Fall through from 'n' to 'l' if Py_ssize_t is long */
case 'l':
return PyLong_FromLong(va_arg(*p_va, long));
case 'k':
{
unsigned long n;
n = va_arg(*p_va, unsigned long);
return PyLong_FromUnsignedLong(n);
}
case 'L':
return PyLong_FromLongLong((long long)va_arg(*p_va, long long));
case 'K':
return PyLong_FromUnsignedLongLong((long long)va_arg(*p_va, unsigned long long));
case 'u':
{
PyObject *v;
Py_UNICODE *u = va_arg(*p_va, Py_UNICODE *);
Py_ssize_t n;
if (**p_format == '#') {
++*p_format;
if (flags & FLAG_SIZE_T)
n = va_arg(*p_va, Py_ssize_t);
else
n = va_arg(*p_va, int);
}
else
n = -1;
if (u == NULL) {
v = Py_None;
Py_INCREF(v);
}
else {
if (n < 0)
n = Py_UNICODE_strlen(u);
v = PyUnicode_FromUnicode(u, n);
}
return v;
}
case 'f':
case 'd':
return PyFloat_FromDouble(
(double)va_arg(*p_va, va_double));
case 'D':
return PyComplex_FromCComplex(
*((Py_complex *)va_arg(*p_va, Py_complex *)));
case 'c':
{
char p[1];
p[0] = (char)va_arg(*p_va, int);
return PyBytes_FromStringAndSize(p, 1);
}
case 'C':
{
int i = va_arg(*p_va, int);
return PyUnicode_FromOrdinal(i);
}
case 's':
case 'z':
case 'U': /* XXX deprecated alias */
{
PyObject *v;
const char *str = va_arg(*p_va, const char *);
Py_ssize_t n;
if (**p_format == '#') {
++*p_format;
if (flags & FLAG_SIZE_T)
n = va_arg(*p_va, Py_ssize_t);
else
n = va_arg(*p_va, int);
}
else
n = -1;
if (str == NULL) {
v = Py_None;
Py_INCREF(v);
}
else {
if (n < 0) {
size_t m = strlen(str);
if (m > PY_SSIZE_T_MAX) {
PyErr_SetString(PyExc_OverflowError,
"string too long for Python string");
return NULL;
}
n = (Py_ssize_t)m;
}
v = PyUnicode_FromStringAndSize(str, n);
}
return v;
}
case 'y':
{
PyObject *v;
const char *str = va_arg(*p_va, const char *);
Py_ssize_t n;
if (**p_format == '#') {
++*p_format;
if (flags & FLAG_SIZE_T)
n = va_arg(*p_va, Py_ssize_t);
else
n = va_arg(*p_va, int);
}
else
n = -1;
if (str == NULL) {
v = Py_None;
Py_INCREF(v);
}
else {
if (n < 0) {
size_t m = strlen(str);
if (m > PY_SSIZE_T_MAX) {
PyErr_SetString(PyExc_OverflowError,
"string too long for Python bytes");
return NULL;
}
n = (Py_ssize_t)m;
}
v = PyBytes_FromStringAndSize(str, n);
}
return v;
}
case 'N':
case 'S':
case 'O':
if (**p_format == '&') {
typedef PyObject *(*converter)(void *);
converter func = va_arg(*p_va, converter);
void *arg = va_arg(*p_va, void *);
++*p_format;
return (*func)(arg);
}
else {
PyObject *v;
v = va_arg(*p_va, PyObject *);
if (v != NULL) {
if (*(*p_format - 1) != 'N')
Py_INCREF(v);
}
else if (!PyErr_Occurred())
/* If a NULL was passed
* because a call that should
* have constructed a value
* failed, that's OK, and we
* pass the error on; but if
* no error occurred it's not
* clear that the caller knew
* what she was doing. */
PyErr_SetString(PyExc_SystemError,
"NULL object passed to Py_BuildValue");
return v;
}
case ':':
case ',':
case ' ':
case '\t':
break;
default:
PyErr_SetString(PyExc_SystemError,
"bad format char passed to Py_BuildValue");
return NULL;
}
}
}
PyObject *
Py_BuildValue(const char *format, ...)
{
va_list va;
PyObject* retval;
va_start(va, format);
retval = va_build_value(format, va, 0);
va_end(va);
return retval;
}
PyObject *
_Py_BuildValue_SizeT(const char *format, ...)
{
va_list va;
PyObject* retval;
va_start(va, format);
retval = va_build_value(format, va, FLAG_SIZE_T);
va_end(va);
return retval;
}
PyObject *
Py_VaBuildValue(const char *format, va_list va)
{
return va_build_value(format, va, 0);
}
PyObject *
_Py_VaBuildValue_SizeT(const char *format, va_list va)
{
return va_build_value(format, va, FLAG_SIZE_T);
}
static PyObject *
va_build_value(const char *format, va_list va, int flags)
{
const char *f = format;
int n = countformat(f, '\0');
va_list lva;
PyObject *retval;
if (n < 0)
return NULL;
if (n == 0) {
Py_INCREF(Py_None);
return Py_None;
}
va_copy(lva, va);
if (n == 1) {
retval = do_mkvalue(&f, &lva, flags);
} else {
retval = do_mktuple(&f, &lva, '\0', n, flags);
}
va_end(lva);
return retval;
}
PyObject *
PyEval_CallFunction(PyObject *obj, const char *format, ...)
{
va_list vargs;
PyObject *args;
PyObject *res;
va_start(vargs, format);
args = Py_VaBuildValue(format, vargs);
va_end(vargs);
if (args == NULL)
return NULL;
res = PyEval_CallObject(obj, args);
Py_DECREF(args);
return res;
}
PyObject *
PyEval_CallMethod(PyObject *obj, const char *methodname, const char *format, ...)
{
va_list vargs;
PyObject *meth;
PyObject *args;
PyObject *res;
meth = PyObject_GetAttrString(obj, methodname);
if (meth == NULL)
return NULL;
va_start(vargs, format);
args = Py_VaBuildValue(format, vargs);
va_end(vargs);
if (args == NULL) {
Py_DECREF(meth);
return NULL;
}
res = PyEval_CallObject(meth, args);
Py_DECREF(meth);
Py_DECREF(args);
return res;
}
int
PyModule_AddObject(PyObject *m, const char *name, PyObject *o)
{
PyObject *dict;
if (!PyModule_Check(m)) {
PyErr_SetString(PyExc_TypeError,
"PyModule_AddObject() needs module as first arg");
return -1;
}
if (!o) {
if (!PyErr_Occurred())
PyErr_SetString(PyExc_TypeError,
"PyModule_AddObject() needs non-NULL value");
return -1;
}
dict = PyModule_GetDict(m);
if (dict == NULL) {
/* Internal error -- modules must have a dict! */
PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__",
PyModule_GetName(m));
return -1;
}
if (PyDict_SetItemString(dict, name, o))
return -1;
Py_DECREF(o);
return 0;
}
int
PyModule_AddIntConstant(PyObject *m, const char *name, long value)
{
PyObject *o = PyLong_FromLong(value);
if (!o)
return -1;
if (PyModule_AddObject(m, name, o) == 0)
return 0;
Py_DECREF(o);
return -1;
}
int
PyModule_AddStringConstant(PyObject *m, const char *name, const char *value)
{
PyObject *o = PyUnicode_FromString(value);
if (!o)
return -1;
if (PyModule_AddObject(m, name, o) == 0)
return 0;
Py_DECREF(o);
return -1;
}

104
third_party/python/Python/mysnprintf.c vendored Normal file
View file

@ -0,0 +1,104 @@
#include "Python.h"
/* snprintf() wrappers. If the platform has vsnprintf, we use it, else we
emulate it in a half-hearted way. Even if the platform has it, we wrap
it because platforms differ in what vsnprintf does in case the buffer
is too small: C99 behavior is to return the number of characters that
would have been written had the buffer not been too small, and to set
the last byte of the buffer to \0. At least MS _vsnprintf returns a
negative value instead, and fills the entire buffer with non-\0 data.
The wrappers ensure that str[size-1] is always \0 upon return.
PyOS_snprintf and PyOS_vsnprintf never write more than size bytes
(including the trailing '\0') into str.
If the platform doesn't have vsnprintf, and the buffer size needed to
avoid truncation exceeds size by more than 512, Python aborts with a
Py_FatalError.
Return value (rv):
When 0 <= rv < size, the output conversion was unexceptional, and
rv characters were written to str (excluding a trailing \0 byte at
str[rv]).
When rv >= size, output conversion was truncated, and a buffer of
size rv+1 would have been needed to avoid truncation. str[size-1]
is \0 in this case.
When rv < 0, "something bad happened". str[size-1] is \0 in this
case too, but the rest of str is unreliable. It could be that
an error in format codes was detected by libc, or on platforms
with a non-C99 vsnprintf simply that the buffer wasn't big enough
to avoid truncation, or on platforms without any vsnprintf that
PyMem_Malloc couldn't obtain space for a temp buffer.
CAUTION: Unlike C99, str != NULL and size > 0 are required.
*/
int
PyOS_snprintf(char *str, size_t size, const char *format, ...)
{
int rc;
va_list va;
va_start(va, format);
rc = PyOS_vsnprintf(str, size, format, va);
va_end(va);
return rc;
}
int
PyOS_vsnprintf(char *str, size_t size, const char *format, va_list va)
{
int len; /* # bytes written, excluding \0 */
#ifdef HAVE_SNPRINTF
#define _PyOS_vsnprintf_EXTRA_SPACE 1
#else
#define _PyOS_vsnprintf_EXTRA_SPACE 512
char *buffer;
#endif
assert(str != NULL);
assert(size > 0);
assert(format != NULL);
/* We take a size_t as input but return an int. Sanity check
* our input so that it won't cause an overflow in the
* vsnprintf return value or the buffer malloc size. */
if (size > INT_MAX - _PyOS_vsnprintf_EXTRA_SPACE) {
len = -666;
goto Done;
}
#ifdef HAVE_SNPRINTF
len = vsnprintf(str, size, format, va);
#else
/* Emulate it. */
buffer = PyMem_MALLOC(size + _PyOS_vsnprintf_EXTRA_SPACE);
if (buffer == NULL) {
len = -666;
goto Done;
}
len = vsprintf(buffer, format, va);
if (len < 0)
/* ignore the error */;
else if ((size_t)len >= size + _PyOS_vsnprintf_EXTRA_SPACE)
Py_FatalError("Buffer overflow in PyOS_snprintf/PyOS_vsnprintf");
else {
const size_t to_copy = (size_t)len < size ?
(size_t)len : size - 1;
assert(to_copy < size);
memcpy(str, buffer, to_copy);
str[to_copy] = '\0';
}
PyMem_FREE(buffer);
#endif
Done:
if (size > 0)
str[size-1] = '\0';
return len;
#undef _PyOS_vsnprintf_EXTRA_SPACE
}

291
third_party/python/Python/mystrtoul.c vendored Normal file
View file

@ -0,0 +1,291 @@
#include "Python.h"
#if defined(__sgi) && defined(WITH_THREAD) && !defined(_SGI_MP_SOURCE)
#define _SGI_MP_SOURCE
#endif
/* strtol and strtoul, renamed to avoid conflicts */
#include <ctype.h>
#ifdef HAVE_ERRNO_H
#include <errno.h>
#endif
/* Static overflow check values for bases 2 through 36.
* smallmax[base] is the largest unsigned long i such that
* i * base doesn't overflow unsigned long.
*/
static const unsigned long smallmax[] = {
0, /* bases 0 and 1 are invalid */
0,
ULONG_MAX / 2,
ULONG_MAX / 3,
ULONG_MAX / 4,
ULONG_MAX / 5,
ULONG_MAX / 6,
ULONG_MAX / 7,
ULONG_MAX / 8,
ULONG_MAX / 9,
ULONG_MAX / 10,
ULONG_MAX / 11,
ULONG_MAX / 12,
ULONG_MAX / 13,
ULONG_MAX / 14,
ULONG_MAX / 15,
ULONG_MAX / 16,
ULONG_MAX / 17,
ULONG_MAX / 18,
ULONG_MAX / 19,
ULONG_MAX / 20,
ULONG_MAX / 21,
ULONG_MAX / 22,
ULONG_MAX / 23,
ULONG_MAX / 24,
ULONG_MAX / 25,
ULONG_MAX / 26,
ULONG_MAX / 27,
ULONG_MAX / 28,
ULONG_MAX / 29,
ULONG_MAX / 30,
ULONG_MAX / 31,
ULONG_MAX / 32,
ULONG_MAX / 33,
ULONG_MAX / 34,
ULONG_MAX / 35,
ULONG_MAX / 36,
};
/* maximum digits that can't ever overflow for bases 2 through 36,
* calculated by [int(math.floor(math.log(2**32, i))) for i in range(2, 37)].
* Note that this is pessimistic if sizeof(long) > 4.
*/
#if SIZEOF_LONG == 4
static const int digitlimit[] = {
0, 0, 32, 20, 16, 13, 12, 11, 10, 10, /* 0 - 9 */
9, 9, 8, 8, 8, 8, 8, 7, 7, 7, /* 10 - 19 */
7, 7, 7, 7, 6, 6, 6, 6, 6, 6, /* 20 - 29 */
6, 6, 6, 6, 6, 6, 6}; /* 30 - 36 */
#elif SIZEOF_LONG == 8
/* [int(math.floor(math.log(2**64, i))) for i in range(2, 37)] */
static const int digitlimit[] = {
0, 0, 64, 40, 32, 27, 24, 22, 21, 20, /* 0 - 9 */
19, 18, 17, 17, 16, 16, 16, 15, 15, 15, /* 10 - 19 */
14, 14, 14, 14, 13, 13, 13, 13, 13, 13, /* 20 - 29 */
13, 12, 12, 12, 12, 12, 12}; /* 30 - 36 */
#else
#error "Need table for SIZEOF_LONG"
#endif
/*
** strtoul
** This is a general purpose routine for converting
** an ascii string to an integer in an arbitrary base.
** Leading white space is ignored. If 'base' is zero
** it looks for a leading 0b, 0o or 0x to tell which
** base. If these are absent it defaults to 10.
** Base must be 0 or between 2 and 36 (inclusive).
** If 'ptr' is non-NULL it will contain a pointer to
** the end of the scan.
** Errors due to bad pointers will probably result in
** exceptions - we don't check for them.
*/
unsigned long
PyOS_strtoul(const char *str, char **ptr, int base)
{
unsigned long result = 0; /* return value of the function */
int c; /* current input character */
int ovlimit; /* required digits to overflow */
/* skip leading white space */
while (*str && Py_ISSPACE(Py_CHARMASK(*str)))
++str;
/* check for leading 0b, 0o or 0x for auto-base or base 16 */
switch (base) {
case 0: /* look for leading 0b, 0o or 0x */
if (*str == '0') {
++str;
if (*str == 'x' || *str == 'X') {
/* there must be at least one digit after 0x */
if (_PyLong_DigitValue[Py_CHARMASK(str[1])] >= 16) {
if (ptr)
*ptr = (char *)str;
return 0;
}
++str;
base = 16;
} else if (*str == 'o' || *str == 'O') {
/* there must be at least one digit after 0o */
if (_PyLong_DigitValue[Py_CHARMASK(str[1])] >= 8) {
if (ptr)
*ptr = (char *)str;
return 0;
}
++str;
base = 8;
} else if (*str == 'b' || *str == 'B') {
/* there must be at least one digit after 0b */
if (_PyLong_DigitValue[Py_CHARMASK(str[1])] >= 2) {
if (ptr)
*ptr = (char *)str;
return 0;
}
++str;
base = 2;
} else {
/* skip all zeroes... */
while (*str == '0')
++str;
while (Py_ISSPACE(Py_CHARMASK(*str)))
++str;
if (ptr)
*ptr = (char *)str;
return 0;
}
}
else
base = 10;
break;
/* even with explicit base, skip leading 0? prefix */
case 16:
if (*str == '0') {
++str;
if (*str == 'x' || *str == 'X') {
/* there must be at least one digit after 0x */
if (_PyLong_DigitValue[Py_CHARMASK(str[1])] >= 16) {
if (ptr)
*ptr = (char *)str;
return 0;
}
++str;
}
}
break;
case 8:
if (*str == '0') {
++str;
if (*str == 'o' || *str == 'O') {
/* there must be at least one digit after 0o */
if (_PyLong_DigitValue[Py_CHARMASK(str[1])] >= 8) {
if (ptr)
*ptr = (char *)str;
return 0;
}
++str;
}
}
break;
case 2:
if(*str == '0') {
++str;
if (*str == 'b' || *str == 'B') {
/* there must be at least one digit after 0b */
if (_PyLong_DigitValue[Py_CHARMASK(str[1])] >= 2) {
if (ptr)
*ptr = (char *)str;
return 0;
}
++str;
}
}
break;
}
/* catch silly bases */
if (base < 2 || base > 36) {
if (ptr)
*ptr = (char *)str;
return 0;
}
/* skip leading zeroes */
while (*str == '0')
++str;
/* base is guaranteed to be in [2, 36] at this point */
ovlimit = digitlimit[base];
/* do the conversion until non-digit character encountered */
while ((c = _PyLong_DigitValue[Py_CHARMASK(*str)]) < base) {
if (ovlimit > 0) /* no overflow check required */
result = result * base + c;
else { /* requires overflow check */
unsigned long temp_result;
if (ovlimit < 0) /* guaranteed overflow */
goto overflowed;
/* there could be an overflow */
/* check overflow just from shifting */
if (result > smallmax[base])
goto overflowed;
result *= base;
/* check overflow from the digit's value */
temp_result = result + c;
if (temp_result < result)
goto overflowed;
result = temp_result;
}
++str;
--ovlimit;
}
/* set pointer to point to the last character scanned */
if (ptr)
*ptr = (char *)str;
return result;
overflowed:
if (ptr) {
/* spool through remaining digit characters */
while (_PyLong_DigitValue[Py_CHARMASK(*str)] < base)
++str;
*ptr = (char *)str;
}
errno = ERANGE;
return (unsigned long)-1;
}
/* Checking for overflow in PyOS_strtol is a PITA; see comments
* about PY_ABS_LONG_MIN in longobject.c.
*/
#define PY_ABS_LONG_MIN (0-(unsigned long)LONG_MIN)
long
PyOS_strtol(const char *str, char **ptr, int base)
{
long result;
unsigned long uresult;
char sign;
while (*str && Py_ISSPACE(Py_CHARMASK(*str)))
str++;
sign = *str;
if (sign == '+' || sign == '-')
str++;
uresult = PyOS_strtoul(str, ptr, base);
if (uresult <= (unsigned long)LONG_MAX) {
result = (long)uresult;
if (sign == '-')
result = -result;
}
else if (sign == '-' && uresult == PY_ABS_LONG_MIN) {
result = LONG_MIN;
}
else {
errno = ERANGE;
result = LONG_MAX;
}
return result;
}

258
third_party/python/Python/opcode_targets.h generated vendored Normal file
View file

@ -0,0 +1,258 @@
static void *opcode_targets[256] = {
&&_unknown_opcode,
&&TARGET_POP_TOP,
&&TARGET_ROT_TWO,
&&TARGET_ROT_THREE,
&&TARGET_DUP_TOP,
&&TARGET_DUP_TOP_TWO,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_NOP,
&&TARGET_UNARY_POSITIVE,
&&TARGET_UNARY_NEGATIVE,
&&TARGET_UNARY_NOT,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_UNARY_INVERT,
&&TARGET_BINARY_MATRIX_MULTIPLY,
&&TARGET_INPLACE_MATRIX_MULTIPLY,
&&_unknown_opcode,
&&TARGET_BINARY_POWER,
&&TARGET_BINARY_MULTIPLY,
&&_unknown_opcode,
&&TARGET_BINARY_MODULO,
&&TARGET_BINARY_ADD,
&&TARGET_BINARY_SUBTRACT,
&&TARGET_BINARY_SUBSCR,
&&TARGET_BINARY_FLOOR_DIVIDE,
&&TARGET_BINARY_TRUE_DIVIDE,
&&TARGET_INPLACE_FLOOR_DIVIDE,
&&TARGET_INPLACE_TRUE_DIVIDE,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_GET_AITER,
&&TARGET_GET_ANEXT,
&&TARGET_BEFORE_ASYNC_WITH,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_INPLACE_ADD,
&&TARGET_INPLACE_SUBTRACT,
&&TARGET_INPLACE_MULTIPLY,
&&_unknown_opcode,
&&TARGET_INPLACE_MODULO,
&&TARGET_STORE_SUBSCR,
&&TARGET_DELETE_SUBSCR,
&&TARGET_BINARY_LSHIFT,
&&TARGET_BINARY_RSHIFT,
&&TARGET_BINARY_AND,
&&TARGET_BINARY_XOR,
&&TARGET_BINARY_OR,
&&TARGET_INPLACE_POWER,
&&TARGET_GET_ITER,
&&TARGET_GET_YIELD_FROM_ITER,
&&TARGET_PRINT_EXPR,
&&TARGET_LOAD_BUILD_CLASS,
&&TARGET_YIELD_FROM,
&&TARGET_GET_AWAITABLE,
&&_unknown_opcode,
&&TARGET_INPLACE_LSHIFT,
&&TARGET_INPLACE_RSHIFT,
&&TARGET_INPLACE_AND,
&&TARGET_INPLACE_XOR,
&&TARGET_INPLACE_OR,
&&TARGET_BREAK_LOOP,
&&TARGET_WITH_CLEANUP_START,
&&TARGET_WITH_CLEANUP_FINISH,
&&TARGET_RETURN_VALUE,
&&TARGET_IMPORT_STAR,
&&TARGET_SETUP_ANNOTATIONS,
&&TARGET_YIELD_VALUE,
&&TARGET_POP_BLOCK,
&&TARGET_END_FINALLY,
&&TARGET_POP_EXCEPT,
&&TARGET_STORE_NAME,
&&TARGET_DELETE_NAME,
&&TARGET_UNPACK_SEQUENCE,
&&TARGET_FOR_ITER,
&&TARGET_UNPACK_EX,
&&TARGET_STORE_ATTR,
&&TARGET_DELETE_ATTR,
&&TARGET_STORE_GLOBAL,
&&TARGET_DELETE_GLOBAL,
&&_unknown_opcode,
&&TARGET_LOAD_CONST,
&&TARGET_LOAD_NAME,
&&TARGET_BUILD_TUPLE,
&&TARGET_BUILD_LIST,
&&TARGET_BUILD_SET,
&&TARGET_BUILD_MAP,
&&TARGET_LOAD_ATTR,
&&TARGET_COMPARE_OP,
&&TARGET_IMPORT_NAME,
&&TARGET_IMPORT_FROM,
&&TARGET_JUMP_FORWARD,
&&TARGET_JUMP_IF_FALSE_OR_POP,
&&TARGET_JUMP_IF_TRUE_OR_POP,
&&TARGET_JUMP_ABSOLUTE,
&&TARGET_POP_JUMP_IF_FALSE,
&&TARGET_POP_JUMP_IF_TRUE,
&&TARGET_LOAD_GLOBAL,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_CONTINUE_LOOP,
&&TARGET_SETUP_LOOP,
&&TARGET_SETUP_EXCEPT,
&&TARGET_SETUP_FINALLY,
&&_unknown_opcode,
&&TARGET_LOAD_FAST,
&&TARGET_STORE_FAST,
&&TARGET_DELETE_FAST,
&&TARGET_STORE_ANNOTATION,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_RAISE_VARARGS,
&&TARGET_CALL_FUNCTION,
&&TARGET_MAKE_FUNCTION,
&&TARGET_BUILD_SLICE,
&&_unknown_opcode,
&&TARGET_LOAD_CLOSURE,
&&TARGET_LOAD_DEREF,
&&TARGET_STORE_DEREF,
&&TARGET_DELETE_DEREF,
&&_unknown_opcode,
&&_unknown_opcode,
&&TARGET_CALL_FUNCTION_KW,
&&TARGET_CALL_FUNCTION_EX,
&&TARGET_SETUP_WITH,
&&TARGET_EXTENDED_ARG,
&&TARGET_LIST_APPEND,
&&TARGET_SET_ADD,
&&TARGET_MAP_ADD,
&&TARGET_LOAD_CLASSDEREF,
&&TARGET_BUILD_LIST_UNPACK,
&&TARGET_BUILD_MAP_UNPACK,
&&TARGET_BUILD_MAP_UNPACK_WITH_CALL,
&&TARGET_BUILD_TUPLE_UNPACK,
&&TARGET_BUILD_SET_UNPACK,
&&TARGET_SETUP_ASYNC_WITH,
&&TARGET_FORMAT_VALUE,
&&TARGET_BUILD_CONST_KEY_MAP,
&&TARGET_BUILD_STRING,
&&TARGET_BUILD_TUPLE_UNPACK_WITH_CALL,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode,
&&_unknown_opcode
};

911
third_party/python/Python/peephole.c vendored Normal file
View file

@ -0,0 +1,911 @@
/* Peephole optimizations for bytecode compiler. */
#include "Python.h"
#include "Python-ast.h"
#include "node.h"
#include "ast.h"
#include "code.h"
#include "symtable.h"
#include "opcode.h"
#include "wordcode_helpers.h"
#define UNCONDITIONAL_JUMP(op) (op==JUMP_ABSOLUTE || op==JUMP_FORWARD)
#define CONDITIONAL_JUMP(op) (op==POP_JUMP_IF_FALSE || op==POP_JUMP_IF_TRUE \
|| op==JUMP_IF_FALSE_OR_POP || op==JUMP_IF_TRUE_OR_POP)
#define ABSOLUTE_JUMP(op) (op==JUMP_ABSOLUTE || op==CONTINUE_LOOP \
|| op==POP_JUMP_IF_FALSE || op==POP_JUMP_IF_TRUE \
|| op==JUMP_IF_FALSE_OR_POP || op==JUMP_IF_TRUE_OR_POP)
#define JUMPS_ON_TRUE(op) (op==POP_JUMP_IF_TRUE || op==JUMP_IF_TRUE_OR_POP)
#define GETJUMPTGT(arr, i) (get_arg(arr, i) / sizeof(_Py_CODEUNIT) + \
(ABSOLUTE_JUMP(_Py_OPCODE(arr[i])) ? 0 : i+1))
#define ISBASICBLOCK(blocks, start, end) \
(blocks[start]==blocks[end])
#define CONST_STACK_CREATE() { \
const_stack_size = 256; \
const_stack = PyMem_New(PyObject *, const_stack_size); \
if (!const_stack) { \
PyErr_NoMemory(); \
goto exitError; \
} \
}
#define CONST_STACK_DELETE() do { \
if (const_stack) \
PyMem_Free(const_stack); \
} while(0)
#define CONST_STACK_LEN() ((unsigned)(const_stack_top + 1))
#define CONST_STACK_PUSH_OP(i) do { \
PyObject *_x; \
assert(_Py_OPCODE(codestr[i]) == LOAD_CONST); \
assert(PyList_GET_SIZE(consts) > (Py_ssize_t)get_arg(codestr, i)); \
_x = PyList_GET_ITEM(consts, get_arg(codestr, i)); \
if (++const_stack_top >= const_stack_size) { \
const_stack_size *= 2; \
PyMem_Resize(const_stack, PyObject *, const_stack_size); \
if (!const_stack) { \
PyErr_NoMemory(); \
goto exitError; \
} \
} \
const_stack[const_stack_top] = _x; \
in_consts = 1; \
} while(0)
#define CONST_STACK_RESET() do { \
const_stack_top = -1; \
} while(0)
#define CONST_STACK_LASTN(i) \
&const_stack[CONST_STACK_LEN() - i]
#define CONST_STACK_POP(i) do { \
assert(CONST_STACK_LEN() >= i); \
const_stack_top -= i; \
} while(0)
/* Scans back N consecutive LOAD_CONST instructions, skipping NOPs,
returns index of the Nth last's LOAD_CONST's EXTENDED_ARG prefix.
Callers are responsible to check CONST_STACK_LEN beforehand.
*/
static Py_ssize_t
lastn_const_start(const _Py_CODEUNIT *codestr, Py_ssize_t i, Py_ssize_t n)
{
assert(n > 0);
for (;;) {
i--;
assert(i >= 0);
if (_Py_OPCODE(codestr[i]) == LOAD_CONST) {
if (!--n) {
while (i > 0 && _Py_OPCODE(codestr[i-1]) == EXTENDED_ARG) {
i--;
}
return i;
}
}
else {
assert(_Py_OPCODE(codestr[i]) == NOP ||
_Py_OPCODE(codestr[i]) == EXTENDED_ARG);
}
}
}
/* Scans through EXTENDED ARGs, seeking the index of the effective opcode */
static Py_ssize_t
find_op(const _Py_CODEUNIT *codestr, Py_ssize_t codelen, Py_ssize_t i)
{
while (i < codelen && _Py_OPCODE(codestr[i]) == EXTENDED_ARG) {
i++;
}
return i;
}
/* Given the index of the effective opcode,
scan back to construct the oparg with EXTENDED_ARG */
static unsigned int
get_arg(const _Py_CODEUNIT *codestr, Py_ssize_t i)
{
_Py_CODEUNIT word;
unsigned int oparg = _Py_OPARG(codestr[i]);
if (i >= 1 && _Py_OPCODE(word = codestr[i-1]) == EXTENDED_ARG) {
oparg |= _Py_OPARG(word) << 8;
if (i >= 2 && _Py_OPCODE(word = codestr[i-2]) == EXTENDED_ARG) {
oparg |= _Py_OPARG(word) << 16;
if (i >= 3 && _Py_OPCODE(word = codestr[i-3]) == EXTENDED_ARG) {
oparg |= _Py_OPARG(word) << 24;
}
}
}
return oparg;
}
/* Fill the region with NOPs. */
static void
fill_nops(_Py_CODEUNIT *codestr, Py_ssize_t start, Py_ssize_t end)
{
memset(codestr + start, NOP, (end - start) * sizeof(_Py_CODEUNIT));
}
/* Given the index of the effective opcode,
attempt to replace the argument, taking into account EXTENDED_ARG.
Returns -1 on failure, or the new op index on success */
static Py_ssize_t
set_arg(_Py_CODEUNIT *codestr, Py_ssize_t i, unsigned int oparg)
{
unsigned int curarg = get_arg(codestr, i);
int curilen, newilen;
if (curarg == oparg)
return i;
curilen = instrsize(curarg);
newilen = instrsize(oparg);
if (curilen < newilen) {
return -1;
}
write_op_arg(codestr + i + 1 - curilen, _Py_OPCODE(codestr[i]), oparg, newilen);
fill_nops(codestr, i + 1 - curilen + newilen, i + 1);
return i-curilen+newilen;
}
/* Attempt to write op/arg at end of specified region of memory.
Preceding memory in the region is overwritten with NOPs.
Returns -1 on failure, op index on success */
static Py_ssize_t
copy_op_arg(_Py_CODEUNIT *codestr, Py_ssize_t i, unsigned char op,
unsigned int oparg, Py_ssize_t maxi)
{
int ilen = instrsize(oparg);
if (i + ilen > maxi) {
return -1;
}
write_op_arg(codestr + maxi - ilen, op, oparg, ilen);
fill_nops(codestr, i, maxi - ilen);
return maxi - 1;
}
/* Check whether a collection doesn't containing too much items (including
subcollections). This protects from creating a constant that needs
too much time for calculating a hash.
"limit" is the maximal number of items.
Returns the negative number if the total number of items exceeds the
limit. Otherwise returns the limit minus the total number of items.
*/
static Py_ssize_t
check_complexity(PyObject *obj, Py_ssize_t limit)
{
if (PyTuple_Check(obj)) {
Py_ssize_t i;
limit -= PyTuple_GET_SIZE(obj);
for (i = 0; limit >= 0 && i < PyTuple_GET_SIZE(obj); i++) {
limit = check_complexity(PyTuple_GET_ITEM(obj, i), limit);
}
return limit;
}
else if (PyFrozenSet_Check(obj)) {
Py_ssize_t i = 0;
PyObject *item;
Py_hash_t hash;
limit -= PySet_GET_SIZE(obj);
while (limit >= 0 && _PySet_NextEntry(obj, &i, &item, &hash)) {
limit = check_complexity(item, limit);
}
}
return limit;
}
/* Replace LOAD_CONST c1, LOAD_CONST c2 ... LOAD_CONST cn, BUILD_TUPLE n
with LOAD_CONST (c1, c2, ... cn).
The consts table must still be in list form so that the
new constant (c1, c2, ... cn) can be appended.
Called with codestr pointing to the first LOAD_CONST.
Bails out with no change if one or more of the LOAD_CONSTs is missing.
Also works for BUILD_LIST and BUILT_SET when followed by an "in" or "not in"
test; for BUILD_SET it assembles a frozenset rather than a tuple.
*/
static Py_ssize_t
fold_tuple_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start,
Py_ssize_t opcode_end, unsigned char opcode,
PyObject *consts, PyObject **objs, int n)
{
PyObject *newconst, *constant;
Py_ssize_t i, len_consts;
/* Pre-conditions */
assert(PyList_CheckExact(consts));
/* Buildup new tuple of constants */
newconst = PyTuple_New(n);
if (newconst == NULL) {
return -1;
}
for (i=0 ; i<n ; i++) {
constant = objs[i];
Py_INCREF(constant);
PyTuple_SET_ITEM(newconst, i, constant);
}
/* If it's a BUILD_SET, use the PyTuple we just built to create a
PyFrozenSet, and use that as the constant instead: */
if (opcode == BUILD_SET) {
Py_SETREF(newconst, PyFrozenSet_New(newconst));
if (newconst == NULL) {
return -1;
}
}
/* Append folded constant onto consts */
len_consts = PyList_GET_SIZE(consts);
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
return -1;
}
Py_DECREF(newconst);
return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end);
}
#define MAX_INT_SIZE 128 /* bits */
#define MAX_COLLECTION_SIZE 20 /* items */
#define MAX_STR_SIZE 20 /* characters */
#define MAX_TOTAL_ITEMS 1024 /* including nested collections */
static PyObject *
safe_multiply(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) && Py_SIZE(v) && Py_SIZE(w)) {
size_t vbits = _PyLong_NumBits(v);
size_t wbits = _PyLong_NumBits(w);
if (vbits == (size_t)-1 || wbits == (size_t)-1) {
return NULL;
}
if (vbits + wbits > MAX_INT_SIZE) {
return NULL;
}
}
else if (PyLong_Check(v) && (PyTuple_Check(w) || PyFrozenSet_Check(w))) {
Py_ssize_t size = PyTuple_Check(w) ? PyTuple_GET_SIZE(w) :
PySet_GET_SIZE(w);
if (size) {
long n = PyLong_AsLong(v);
if (n < 0 || n > MAX_COLLECTION_SIZE / size) {
return NULL;
}
if (n && check_complexity(w, MAX_TOTAL_ITEMS / n) < 0) {
return NULL;
}
}
}
else if (PyLong_Check(v) && (PyUnicode_Check(w) || PyBytes_Check(w))) {
Py_ssize_t size = PyUnicode_Check(w) ? PyUnicode_GET_LENGTH(w) :
PyBytes_GET_SIZE(w);
if (size) {
long n = PyLong_AsLong(v);
if (n < 0 || n > MAX_STR_SIZE / size) {
return NULL;
}
}
}
else if (PyLong_Check(w) &&
(PyTuple_Check(v) || PyFrozenSet_Check(v) ||
PyUnicode_Check(v) || PyBytes_Check(v)))
{
return safe_multiply(w, v);
}
return PyNumber_Multiply(v, w);
}
static PyObject *
safe_power(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) && Py_SIZE(v) && Py_SIZE(w) > 0) {
size_t vbits = _PyLong_NumBits(v);
size_t wbits = PyLong_AsSize_t(w);
if (vbits == (size_t)-1 || wbits == (size_t)-1) {
return NULL;
}
if (vbits > MAX_INT_SIZE / wbits) {
return NULL;
}
}
return PyNumber_Power(v, w, Py_None);
}
static PyObject *
safe_lshift(PyObject *v, PyObject *w)
{
if (PyLong_Check(v) && PyLong_Check(w) && Py_SIZE(v) && Py_SIZE(w)) {
size_t vbits = _PyLong_NumBits(v);
size_t wbits = PyLong_AsSize_t(w);
if (vbits == (size_t)-1 || wbits == (size_t)-1) {
return NULL;
}
if (wbits > MAX_INT_SIZE || vbits > MAX_INT_SIZE - wbits) {
return NULL;
}
}
return PyNumber_Lshift(v, w);
}
static PyObject *
safe_mod(PyObject *v, PyObject *w)
{
if (PyUnicode_Check(v) || PyBytes_Check(v)) {
return NULL;
}
return PyNumber_Remainder(v, w);
}
/* Replace LOAD_CONST c1, LOAD_CONST c2, BINOP
with LOAD_CONST binop(c1,c2)
The consts table must still be in list form so that the
new constant can be appended.
Called with codestr pointing to the BINOP.
Abandons the transformation if the folding fails (i.e. 1+'a').
If the new constant is a sequence, only folds when the size
is below a threshold value. That keeps pyc files from
becoming large in the presence of code like: (None,)*1000.
*/
static Py_ssize_t
fold_binops_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start,
Py_ssize_t opcode_end, unsigned char opcode,
PyObject *consts, PyObject **objs)
{
PyObject *newconst, *v, *w;
Py_ssize_t len_consts;
/* Pre-conditions */
assert(PyList_CheckExact(consts));
len_consts = PyList_GET_SIZE(consts);
/* Create new constant */
v = objs[0];
w = objs[1];
switch (opcode) {
case BINARY_POWER:
newconst = safe_power(v, w);
break;
case BINARY_MULTIPLY:
newconst = safe_multiply(v, w);
break;
case BINARY_TRUE_DIVIDE:
newconst = PyNumber_TrueDivide(v, w);
break;
case BINARY_FLOOR_DIVIDE:
newconst = PyNumber_FloorDivide(v, w);
break;
case BINARY_MODULO:
newconst = safe_mod(v, w);
break;
case BINARY_ADD:
newconst = PyNumber_Add(v, w);
break;
case BINARY_SUBTRACT:
newconst = PyNumber_Subtract(v, w);
break;
case BINARY_SUBSCR:
newconst = PyObject_GetItem(v, w);
break;
case BINARY_LSHIFT:
newconst = safe_lshift(v, w);
break;
case BINARY_RSHIFT:
newconst = PyNumber_Rshift(v, w);
break;
case BINARY_AND:
newconst = PyNumber_And(v, w);
break;
case BINARY_XOR:
newconst = PyNumber_Xor(v, w);
break;
case BINARY_OR:
newconst = PyNumber_Or(v, w);
break;
default:
/* Called with an unknown opcode */
PyErr_Format(PyExc_SystemError,
"unexpected binary operation %d on a constant",
opcode);
return -1;
}
if (newconst == NULL) {
if(!PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
PyErr_Clear();
}
return -1;
}
/* Append folded constant into consts table */
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
return -1;
}
Py_DECREF(newconst);
return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end);
}
static Py_ssize_t
fold_unaryops_on_constants(_Py_CODEUNIT *codestr, Py_ssize_t c_start,
Py_ssize_t opcode_end, unsigned char opcode,
PyObject *consts, PyObject *v)
{
PyObject *newconst;
Py_ssize_t len_consts;
/* Pre-conditions */
assert(PyList_CheckExact(consts));
len_consts = PyList_GET_SIZE(consts);
/* Create new constant */
switch (opcode) {
case UNARY_NEGATIVE:
newconst = PyNumber_Negative(v);
break;
case UNARY_INVERT:
newconst = PyNumber_Invert(v);
break;
case UNARY_POSITIVE:
newconst = PyNumber_Positive(v);
break;
default:
/* Called with an unknown opcode */
PyErr_Format(PyExc_SystemError,
"unexpected unary operation %d on a constant",
opcode);
return -1;
}
if (newconst == NULL) {
if(!PyErr_ExceptionMatches(PyExc_KeyboardInterrupt)) {
PyErr_Clear();
}
return -1;
}
/* Append folded constant into consts table */
if (PyList_Append(consts, newconst)) {
Py_DECREF(newconst);
PyErr_Clear();
return -1;
}
Py_DECREF(newconst);
return copy_op_arg(codestr, c_start, LOAD_CONST, len_consts, opcode_end);
}
static unsigned int *
markblocks(_Py_CODEUNIT *code, Py_ssize_t len)
{
unsigned int *blocks = PyMem_New(unsigned int, len);
int i, j, opcode, blockcnt = 0;
if (blocks == NULL) {
PyErr_NoMemory();
return NULL;
}
memset(blocks, 0, len*sizeof(int));
/* Mark labels in the first pass */
for (i = 0; i < len; i++) {
opcode = _Py_OPCODE(code[i]);
switch (opcode) {
case FOR_ITER:
case JUMP_FORWARD:
case JUMP_IF_FALSE_OR_POP:
case JUMP_IF_TRUE_OR_POP:
case POP_JUMP_IF_FALSE:
case POP_JUMP_IF_TRUE:
case JUMP_ABSOLUTE:
case CONTINUE_LOOP:
case SETUP_LOOP:
case SETUP_EXCEPT:
case SETUP_FINALLY:
case SETUP_WITH:
case SETUP_ASYNC_WITH:
j = GETJUMPTGT(code, i);
assert(j < len);
blocks[j] = 1;
break;
}
}
/* Build block numbers in the second pass */
for (i = 0; i < len; i++) {
blockcnt += blocks[i]; /* increment blockcnt over labels */
blocks[i] = blockcnt;
}
return blocks;
}
/* Perform basic peephole optimizations to components of a code object.
The consts object should still be in list form to allow new constants
to be appended.
To keep the optimizer simple, it bails when the lineno table has complex
encoding for gaps >= 255.
Optimizations are restricted to simple transformations occurring within a
single basic block. All transformations keep the code size the same or
smaller. For those that reduce size, the gaps are initially filled with
NOPs. Later those NOPs are removed and the jump addresses retargeted in
a single pass. */
PyObject *
PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
PyObject *lnotab_obj)
{
Py_ssize_t h, i, nexti, op_start, codelen, tgt;
unsigned int j, nops;
unsigned char opcode, nextop;
_Py_CODEUNIT *codestr = NULL;
unsigned char *lnotab;
unsigned int cum_orig_offset, last_offset;
Py_ssize_t tabsiz;
PyObject **const_stack = NULL;
Py_ssize_t const_stack_top = -1;
Py_ssize_t const_stack_size = 0;
int in_consts = 0; /* whether we are in a LOAD_CONST sequence */
unsigned int *blocks = NULL;
/* Bail out if an exception is set */
if (PyErr_Occurred())
goto exitError;
/* Bypass optimization when the lnotab table is too complex */
assert(PyBytes_Check(lnotab_obj));
lnotab = (unsigned char*)PyBytes_AS_STRING(lnotab_obj);
tabsiz = PyBytes_GET_SIZE(lnotab_obj);
assert(tabsiz == 0 || Py_REFCNT(lnotab_obj) == 1);
if (memchr(lnotab, 255, tabsiz) != NULL) {
/* 255 value are used for multibyte bytecode instructions */
goto exitUnchanged;
}
/* Note: -128 and 127 special values for line number delta are ok,
the peephole optimizer doesn't modify line numbers. */
assert(PyBytes_Check(code));
codelen = PyBytes_GET_SIZE(code);
assert(codelen % sizeof(_Py_CODEUNIT) == 0);
/* Make a modifiable copy of the code string */
codestr = (_Py_CODEUNIT *)PyMem_Malloc(codelen);
if (codestr == NULL) {
PyErr_NoMemory();
goto exitError;
}
memcpy(codestr, PyBytes_AS_STRING(code), codelen);
codelen /= sizeof(_Py_CODEUNIT);
blocks = markblocks(codestr, codelen);
if (blocks == NULL)
goto exitError;
assert(PyList_Check(consts));
CONST_STACK_CREATE();
for (i=find_op(codestr, codelen, 0) ; i<codelen ; i=nexti) {
opcode = _Py_OPCODE(codestr[i]);
op_start = i;
while (op_start >= 1 && _Py_OPCODE(codestr[op_start-1]) == EXTENDED_ARG) {
op_start--;
}
nexti = i + 1;
while (nexti < codelen && _Py_OPCODE(codestr[nexti]) == EXTENDED_ARG)
nexti++;
nextop = nexti < codelen ? _Py_OPCODE(codestr[nexti]) : 0;
if (!in_consts) {
CONST_STACK_RESET();
}
in_consts = 0;
switch (opcode) {
/* Replace UNARY_NOT POP_JUMP_IF_FALSE
with POP_JUMP_IF_TRUE */
case UNARY_NOT:
if (nextop != POP_JUMP_IF_FALSE
|| !ISBASICBLOCK(blocks, op_start, i + 1))
break;
fill_nops(codestr, op_start, i + 1);
codestr[nexti] = PACKOPARG(POP_JUMP_IF_TRUE, _Py_OPARG(codestr[nexti]));
break;
/* not a is b --> a is not b
not a in b --> a not in b
not a is not b --> a is b
not a not in b --> a in b
*/
case COMPARE_OP:
j = get_arg(codestr, i);
if (j < 6 || j > 9 ||
nextop != UNARY_NOT ||
!ISBASICBLOCK(blocks, op_start, i + 1))
break;
codestr[i] = PACKOPARG(opcode, j^1);
fill_nops(codestr, i + 1, nexti + 1);
break;
/* Skip over LOAD_CONST trueconst
POP_JUMP_IF_FALSE xx. This improves
"while 1" performance. */
case LOAD_CONST:
CONST_STACK_PUSH_OP(i);
if (nextop != POP_JUMP_IF_FALSE ||
!ISBASICBLOCK(blocks, op_start, i + 1) ||
!PyObject_IsTrue(PyList_GET_ITEM(consts, get_arg(codestr, i))))
break;
fill_nops(codestr, op_start, nexti + 1);
CONST_STACK_POP(1);
break;
/* Try to fold tuples of constants (includes a case for lists
and sets which are only used for "in" and "not in" tests).
Skip over BUILD_SEQN 1 UNPACK_SEQN 1.
Replace BUILD_SEQN 2 UNPACK_SEQN 2 with ROT2.
Replace BUILD_SEQN 3 UNPACK_SEQN 3 with ROT3 ROT2. */
case BUILD_TUPLE:
case BUILD_LIST:
case BUILD_SET:
j = get_arg(codestr, i);
if (j > 0 && CONST_STACK_LEN() >= j) {
h = lastn_const_start(codestr, op_start, j);
if ((opcode == BUILD_TUPLE &&
ISBASICBLOCK(blocks, h, op_start)) ||
((opcode == BUILD_LIST || opcode == BUILD_SET) &&
((nextop==COMPARE_OP &&
(_Py_OPARG(codestr[nexti]) == PyCmp_IN ||
_Py_OPARG(codestr[nexti]) == PyCmp_NOT_IN)) ||
nextop == GET_ITER) && ISBASICBLOCK(blocks, h, i + 1))) {
h = fold_tuple_on_constants(codestr, h, i + 1, opcode,
consts, CONST_STACK_LASTN(j), j);
if (h >= 0) {
CONST_STACK_POP(j);
CONST_STACK_PUSH_OP(h);
}
break;
}
}
if (nextop != UNPACK_SEQUENCE ||
!ISBASICBLOCK(blocks, op_start, i + 1) ||
j != get_arg(codestr, nexti) ||
opcode == BUILD_SET)
break;
if (j < 2) {
fill_nops(codestr, op_start, nexti + 1);
} else if (j == 2) {
codestr[op_start] = PACKOPARG(ROT_TWO, 0);
fill_nops(codestr, op_start + 1, nexti + 1);
CONST_STACK_RESET();
} else if (j == 3) {
codestr[op_start] = PACKOPARG(ROT_THREE, 0);
codestr[op_start + 1] = PACKOPARG(ROT_TWO, 0);
fill_nops(codestr, op_start + 2, nexti + 1);
CONST_STACK_RESET();
}
break;
/* Fold binary ops on constants.
LOAD_CONST c1 LOAD_CONST c2 BINOP --> LOAD_CONST binop(c1,c2) */
case BINARY_POWER:
case BINARY_MULTIPLY:
case BINARY_TRUE_DIVIDE:
case BINARY_FLOOR_DIVIDE:
case BINARY_MODULO:
case BINARY_ADD:
case BINARY_SUBTRACT:
case BINARY_SUBSCR:
case BINARY_LSHIFT:
case BINARY_RSHIFT:
case BINARY_AND:
case BINARY_XOR:
case BINARY_OR:
if (CONST_STACK_LEN() < 2)
break;
h = lastn_const_start(codestr, op_start, 2);
if (ISBASICBLOCK(blocks, h, op_start)) {
h = fold_binops_on_constants(codestr, h, i + 1, opcode,
consts, CONST_STACK_LASTN(2));
if (h >= 0) {
CONST_STACK_POP(2);
CONST_STACK_PUSH_OP(h);
}
}
break;
/* Fold unary ops on constants.
LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
case UNARY_NEGATIVE:
case UNARY_INVERT:
case UNARY_POSITIVE:
if (CONST_STACK_LEN() < 1)
break;
h = lastn_const_start(codestr, op_start, 1);
if (ISBASICBLOCK(blocks, h, op_start)) {
h = fold_unaryops_on_constants(codestr, h, i + 1, opcode,
consts, *CONST_STACK_LASTN(1));
if (h >= 0) {
CONST_STACK_POP(1);
CONST_STACK_PUSH_OP(h);
}
}
break;
/* Simplify conditional jump to conditional jump where the
result of the first test implies the success of a similar
test or the failure of the opposite test.
Arises in code like:
"if a and b:"
"if a or b:"
"a and b or c"
"(a and b) and c"
x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_FALSE_OR_POP z
--> x:JUMP_IF_FALSE_OR_POP z
x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_TRUE_OR_POP z
--> x:POP_JUMP_IF_FALSE y+1
where y+1 is the instruction following the second test.
*/
case JUMP_IF_FALSE_OR_POP:
case JUMP_IF_TRUE_OR_POP:
h = get_arg(codestr, i) / sizeof(_Py_CODEUNIT);
tgt = find_op(codestr, codelen, h);
j = _Py_OPCODE(codestr[tgt]);
if (CONDITIONAL_JUMP(j)) {
/* NOTE: all possible jumps here are absolute. */
if (JUMPS_ON_TRUE(j) == JUMPS_ON_TRUE(opcode)) {
/* The second jump will be taken iff the first is.
The current opcode inherits its target's
stack effect */
h = set_arg(codestr, i, get_arg(codestr, tgt));
} else {
/* The second jump is not taken if the first is (so
jump past it), and all conditional jumps pop their
argument when they're not taken (so change the
first jump to pop its argument when it's taken). */
h = set_arg(codestr, i, (tgt + 1) * sizeof(_Py_CODEUNIT));
j = opcode == JUMP_IF_TRUE_OR_POP ?
POP_JUMP_IF_TRUE : POP_JUMP_IF_FALSE;
}
if (h >= 0) {
nexti = h;
codestr[nexti] = PACKOPARG(j, _Py_OPARG(codestr[nexti]));
break;
}
}
/* Intentional fallthrough */
/* Replace jumps to unconditional jumps */
case POP_JUMP_IF_FALSE:
case POP_JUMP_IF_TRUE:
case FOR_ITER:
case JUMP_FORWARD:
case JUMP_ABSOLUTE:
case CONTINUE_LOOP:
case SETUP_LOOP:
case SETUP_EXCEPT:
case SETUP_FINALLY:
case SETUP_WITH:
case SETUP_ASYNC_WITH:
h = GETJUMPTGT(codestr, i);
tgt = find_op(codestr, codelen, h);
/* Replace JUMP_* to a RETURN into just a RETURN */
if (UNCONDITIONAL_JUMP(opcode) &&
_Py_OPCODE(codestr[tgt]) == RETURN_VALUE) {
codestr[op_start] = PACKOPARG(RETURN_VALUE, 0);
fill_nops(codestr, op_start + 1, i + 1);
} else if (UNCONDITIONAL_JUMP(_Py_OPCODE(codestr[tgt]))) {
j = GETJUMPTGT(codestr, tgt);
if (opcode == JUMP_FORWARD) { /* JMP_ABS can go backwards */
opcode = JUMP_ABSOLUTE;
} else if (!ABSOLUTE_JUMP(opcode)) {
if ((Py_ssize_t)j < i + 1) {
break; /* No backward relative jumps */
}
j -= i + 1; /* Calc relative jump addr */
}
j *= sizeof(_Py_CODEUNIT);
copy_op_arg(codestr, op_start, opcode, j, i + 1);
}
break;
/* Remove unreachable ops after RETURN */
case RETURN_VALUE:
h = i + 1;
while (h < codelen && ISBASICBLOCK(blocks, i, h)) {
h++;
}
if (h > i + 1) {
fill_nops(codestr, i + 1, h);
nexti = find_op(codestr, codelen, h);
}
break;
}
}
/* Fixup lnotab */
for (i = 0, nops = 0; i < codelen; i++) {
assert(i - nops <= INT_MAX);
/* original code offset => new code offset */
blocks[i] = i - nops;
if (_Py_OPCODE(codestr[i]) == NOP)
nops++;
}
cum_orig_offset = 0;
last_offset = 0;
for (i=0 ; i < tabsiz ; i+=2) {
unsigned int offset_delta, new_offset;
cum_orig_offset += lnotab[i];
assert(cum_orig_offset % sizeof(_Py_CODEUNIT) == 0);
new_offset = blocks[cum_orig_offset / sizeof(_Py_CODEUNIT)] *
sizeof(_Py_CODEUNIT);
offset_delta = new_offset - last_offset;
assert(offset_delta <= 255);
lnotab[i] = (unsigned char)offset_delta;
last_offset = new_offset;
}
/* Remove NOPs and fixup jump targets */
for (op_start = i = h = 0; i < codelen; i++, op_start = i) {
j = _Py_OPARG(codestr[i]);
while (_Py_OPCODE(codestr[i]) == EXTENDED_ARG) {
i++;
j = j<<8 | _Py_OPARG(codestr[i]);
}
opcode = _Py_OPCODE(codestr[i]);
switch (opcode) {
case NOP:continue;
case JUMP_ABSOLUTE:
case CONTINUE_LOOP:
case POP_JUMP_IF_FALSE:
case POP_JUMP_IF_TRUE:
case JUMP_IF_FALSE_OR_POP:
case JUMP_IF_TRUE_OR_POP:
j = blocks[j / sizeof(_Py_CODEUNIT)] * sizeof(_Py_CODEUNIT);
break;
case FOR_ITER:
case JUMP_FORWARD:
case SETUP_LOOP:
case SETUP_EXCEPT:
case SETUP_FINALLY:
case SETUP_WITH:
case SETUP_ASYNC_WITH:
j = blocks[j / sizeof(_Py_CODEUNIT) + i + 1] - blocks[i] - 1;
j *= sizeof(_Py_CODEUNIT);
break;
}
nexti = i - op_start + 1;
if (instrsize(j) > nexti)
goto exitUnchanged;
/* If instrsize(j) < nexti, we'll emit EXTENDED_ARG 0 */
write_op_arg(codestr + h, opcode, j, nexti);
h += nexti;
}
assert(h + (Py_ssize_t)nops == codelen);
CONST_STACK_DELETE();
PyMem_Free(blocks);
code = PyBytes_FromStringAndSize((char *)codestr, h * sizeof(_Py_CODEUNIT));
PyMem_Free(codestr);
return code;
exitError:
code = NULL;
exitUnchanged:
Py_XINCREF(code);
CONST_STACK_DELETE();
PyMem_Free(blocks);
PyMem_Free(codestr);
return code;
}

210
third_party/python/Python/pyarena.c vendored Normal file
View file

@ -0,0 +1,210 @@
#include "Python.h"
/* A simple arena block structure.
Measurements with standard library modules suggest the average
allocation is about 20 bytes and that most compiles use a single
block.
TODO(jhylton): Think about a realloc API, maybe just for the last
allocation?
*/
#define DEFAULT_BLOCK_SIZE 8192
#define ALIGNMENT 8
typedef struct _block {
/* Total number of bytes owned by this block available to pass out.
* Read-only after initialization. The first such byte starts at
* ab_mem.
*/
size_t ab_size;
/* Total number of bytes already passed out. The next byte available
* to pass out starts at ab_mem + ab_offset.
*/
size_t ab_offset;
/* An arena maintains a singly-linked, NULL-terminated list of
* all blocks owned by the arena. These are linked via the
* ab_next member.
*/
struct _block *ab_next;
/* Pointer to the first allocatable byte owned by this block. Read-
* only after initialization.
*/
void *ab_mem;
} block;
/* The arena manages two kinds of memory, blocks of raw memory
and a list of PyObject* pointers. PyObjects are decrefed
when the arena is freed.
*/
struct _arena {
/* Pointer to the first block allocated for the arena, never NULL.
It is used only to find the first block when the arena is
being freed.
*/
block *a_head;
/* Pointer to the block currently used for allocation. It's
ab_next field should be NULL. If it is not-null after a
call to block_alloc(), it means a new block has been allocated
and a_cur should be reset to point it.
*/
block *a_cur;
/* A Python list object containing references to all the PyObject
pointers associated with this area. They will be DECREFed
when the arena is freed.
*/
PyObject *a_objects;
#if defined(Py_DEBUG)
/* Debug output */
size_t total_allocs;
size_t total_size;
size_t total_blocks;
size_t total_block_size;
size_t total_big_blocks;
#endif
};
static block *
block_new(size_t size)
{
/* Allocate header and block as one unit.
ab_mem points just past header. */
block *b = (block *)PyMem_Malloc(sizeof(block) + size);
if (!b)
return NULL;
b->ab_size = size;
b->ab_mem = (void *)(b + 1);
b->ab_next = NULL;
b->ab_offset = (char *)_Py_ALIGN_UP(b->ab_mem, ALIGNMENT) -
(char *)(b->ab_mem);
return b;
}
static void
block_free(block *b) {
while (b) {
block *next = b->ab_next;
PyMem_Free(b);
b = next;
}
}
static void *
block_alloc(block *b, size_t size)
{
void *p;
assert(b);
size = _Py_SIZE_ROUND_UP(size, ALIGNMENT);
if (b->ab_offset + size > b->ab_size) {
/* If we need to allocate more memory than will fit in
the default block, allocate a one-off block that is
exactly the right size. */
/* TODO(jhylton): Think about space waste at end of block */
block *newbl = block_new(
size < DEFAULT_BLOCK_SIZE ?
DEFAULT_BLOCK_SIZE : size);
if (!newbl)
return NULL;
assert(!b->ab_next);
b->ab_next = newbl;
b = newbl;
}
assert(b->ab_offset + size <= b->ab_size);
p = (void *)(((char *)b->ab_mem) + b->ab_offset);
b->ab_offset += size;
return p;
}
PyArena *
PyArena_New()
{
PyArena* arena = (PyArena *)PyMem_Malloc(sizeof(PyArena));
if (!arena)
return (PyArena*)PyErr_NoMemory();
arena->a_head = block_new(DEFAULT_BLOCK_SIZE);
arena->a_cur = arena->a_head;
if (!arena->a_head) {
PyMem_Free((void *)arena);
return (PyArena*)PyErr_NoMemory();
}
arena->a_objects = PyList_New(0);
if (!arena->a_objects) {
block_free(arena->a_head);
PyMem_Free((void *)arena);
return (PyArena*)PyErr_NoMemory();
}
#if defined(Py_DEBUG)
arena->total_allocs = 0;
arena->total_size = 0;
arena->total_blocks = 1;
arena->total_block_size = DEFAULT_BLOCK_SIZE;
arena->total_big_blocks = 0;
#endif
return arena;
}
void
PyArena_Free(PyArena *arena)
{
assert(arena);
#if defined(Py_DEBUG)
/*
fprintf(stderr,
"alloc=%d size=%d blocks=%d block_size=%d big=%d objects=%d\n",
arena->total_allocs, arena->total_size, arena->total_blocks,
arena->total_block_size, arena->total_big_blocks,
PyList_Size(arena->a_objects));
*/
#endif
block_free(arena->a_head);
/* This property normally holds, except when the code being compiled
is sys.getobjects(0), in which case there will be two references.
assert(arena->a_objects->ob_refcnt == 1);
*/
Py_DECREF(arena->a_objects);
PyMem_Free(arena);
}
void *
PyArena_Malloc(PyArena *arena, size_t size)
{
void *p = block_alloc(arena->a_cur, size);
if (!p)
return PyErr_NoMemory();
#if defined(Py_DEBUG)
arena->total_allocs++;
arena->total_size += size;
#endif
/* Reset cur if we allocated a new block. */
if (arena->a_cur->ab_next) {
arena->a_cur = arena->a_cur->ab_next;
#if defined(Py_DEBUG)
arena->total_blocks++;
arena->total_block_size += arena->a_cur->ab_size;
if (arena->a_cur->ab_size > DEFAULT_BLOCK_SIZE)
++arena->total_big_blocks;
#endif
}
return p;
}
int
PyArena_AddPyObject(PyArena *arena, PyObject *obj)
{
int r = PyList_Append(arena->a_objects, obj);
if (r >= 0) {
Py_DECREF(obj);
}
return r;
}

214
third_party/python/Python/pyctype.c vendored Normal file
View file

@ -0,0 +1,214 @@
#include "Python.h"
/* Our own locale-independent ctype.h-like macros */
const unsigned int _Py_ctype_table[256] = {
0, /* 0x0 '\x00' */
0, /* 0x1 '\x01' */
0, /* 0x2 '\x02' */
0, /* 0x3 '\x03' */
0, /* 0x4 '\x04' */
0, /* 0x5 '\x05' */
0, /* 0x6 '\x06' */
0, /* 0x7 '\x07' */
0, /* 0x8 '\x08' */
PY_CTF_SPACE, /* 0x9 '\t' */
PY_CTF_SPACE, /* 0xa '\n' */
PY_CTF_SPACE, /* 0xb '\v' */
PY_CTF_SPACE, /* 0xc '\f' */
PY_CTF_SPACE, /* 0xd '\r' */
0, /* 0xe '\x0e' */
0, /* 0xf '\x0f' */
0, /* 0x10 '\x10' */
0, /* 0x11 '\x11' */
0, /* 0x12 '\x12' */
0, /* 0x13 '\x13' */
0, /* 0x14 '\x14' */
0, /* 0x15 '\x15' */
0, /* 0x16 '\x16' */
0, /* 0x17 '\x17' */
0, /* 0x18 '\x18' */
0, /* 0x19 '\x19' */
0, /* 0x1a '\x1a' */
0, /* 0x1b '\x1b' */
0, /* 0x1c '\x1c' */
0, /* 0x1d '\x1d' */
0, /* 0x1e '\x1e' */
0, /* 0x1f '\x1f' */
PY_CTF_SPACE, /* 0x20 ' ' */
0, /* 0x21 '!' */
0, /* 0x22 '"' */
0, /* 0x23 '#' */
0, /* 0x24 '$' */
0, /* 0x25 '%' */
0, /* 0x26 '&' */
0, /* 0x27 "'" */
0, /* 0x28 '(' */
0, /* 0x29 ')' */
0, /* 0x2a '*' */
0, /* 0x2b '+' */
0, /* 0x2c ',' */
0, /* 0x2d '-' */
0, /* 0x2e '.' */
0, /* 0x2f '/' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x30 '0' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x31 '1' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x32 '2' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x33 '3' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x34 '4' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x35 '5' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x36 '6' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x37 '7' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x38 '8' */
PY_CTF_DIGIT|PY_CTF_XDIGIT, /* 0x39 '9' */
0, /* 0x3a ':' */
0, /* 0x3b ';' */
0, /* 0x3c '<' */
0, /* 0x3d '=' */
0, /* 0x3e '>' */
0, /* 0x3f '?' */
0, /* 0x40 '@' */
PY_CTF_UPPER|PY_CTF_XDIGIT, /* 0x41 'A' */
PY_CTF_UPPER|PY_CTF_XDIGIT, /* 0x42 'B' */
PY_CTF_UPPER|PY_CTF_XDIGIT, /* 0x43 'C' */
PY_CTF_UPPER|PY_CTF_XDIGIT, /* 0x44 'D' */
PY_CTF_UPPER|PY_CTF_XDIGIT, /* 0x45 'E' */
PY_CTF_UPPER|PY_CTF_XDIGIT, /* 0x46 'F' */
PY_CTF_UPPER, /* 0x47 'G' */
PY_CTF_UPPER, /* 0x48 'H' */
PY_CTF_UPPER, /* 0x49 'I' */
PY_CTF_UPPER, /* 0x4a 'J' */
PY_CTF_UPPER, /* 0x4b 'K' */
PY_CTF_UPPER, /* 0x4c 'L' */
PY_CTF_UPPER, /* 0x4d 'M' */
PY_CTF_UPPER, /* 0x4e 'N' */
PY_CTF_UPPER, /* 0x4f 'O' */
PY_CTF_UPPER, /* 0x50 'P' */
PY_CTF_UPPER, /* 0x51 'Q' */
PY_CTF_UPPER, /* 0x52 'R' */
PY_CTF_UPPER, /* 0x53 'S' */
PY_CTF_UPPER, /* 0x54 'T' */
PY_CTF_UPPER, /* 0x55 'U' */
PY_CTF_UPPER, /* 0x56 'V' */
PY_CTF_UPPER, /* 0x57 'W' */
PY_CTF_UPPER, /* 0x58 'X' */
PY_CTF_UPPER, /* 0x59 'Y' */
PY_CTF_UPPER, /* 0x5a 'Z' */
0, /* 0x5b '[' */
0, /* 0x5c '\\' */
0, /* 0x5d ']' */
0, /* 0x5e '^' */
0, /* 0x5f '_' */
0, /* 0x60 '`' */
PY_CTF_LOWER|PY_CTF_XDIGIT, /* 0x61 'a' */
PY_CTF_LOWER|PY_CTF_XDIGIT, /* 0x62 'b' */
PY_CTF_LOWER|PY_CTF_XDIGIT, /* 0x63 'c' */
PY_CTF_LOWER|PY_CTF_XDIGIT, /* 0x64 'd' */
PY_CTF_LOWER|PY_CTF_XDIGIT, /* 0x65 'e' */
PY_CTF_LOWER|PY_CTF_XDIGIT, /* 0x66 'f' */
PY_CTF_LOWER, /* 0x67 'g' */
PY_CTF_LOWER, /* 0x68 'h' */
PY_CTF_LOWER, /* 0x69 'i' */
PY_CTF_LOWER, /* 0x6a 'j' */
PY_CTF_LOWER, /* 0x6b 'k' */
PY_CTF_LOWER, /* 0x6c 'l' */
PY_CTF_LOWER, /* 0x6d 'm' */
PY_CTF_LOWER, /* 0x6e 'n' */
PY_CTF_LOWER, /* 0x6f 'o' */
PY_CTF_LOWER, /* 0x70 'p' */
PY_CTF_LOWER, /* 0x71 'q' */
PY_CTF_LOWER, /* 0x72 'r' */
PY_CTF_LOWER, /* 0x73 's' */
PY_CTF_LOWER, /* 0x74 't' */
PY_CTF_LOWER, /* 0x75 'u' */
PY_CTF_LOWER, /* 0x76 'v' */
PY_CTF_LOWER, /* 0x77 'w' */
PY_CTF_LOWER, /* 0x78 'x' */
PY_CTF_LOWER, /* 0x79 'y' */
PY_CTF_LOWER, /* 0x7a 'z' */
0, /* 0x7b '{' */
0, /* 0x7c '|' */
0, /* 0x7d '}' */
0, /* 0x7e '~' */
0, /* 0x7f '\x7f' */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
const unsigned char _Py_ctype_tolower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
};
const unsigned char _Py_ctype_toupper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
};

23
third_party/python/Python/pyfpe.c vendored Normal file
View file

@ -0,0 +1,23 @@
#include "pyconfig.h"
#include "pyfpe.h"
/*
* The signal handler for SIGFPE is actually declared in an external
* module fpectl, or as preferred by the user. These variable
* definitions are required in order to compile Python without
* getting missing externals, but to actually handle SIGFPE requires
* defining a handler and enabling generation of SIGFPE.
*/
#ifdef WANT_SIGFPE_HANDLER
jmp_buf PyFPE_jbuf;
int PyFPE_counter = 0;
#endif
/* Have this outside the above #ifdef, since some picky ANSI compilers issue a
warning when compiling an empty file. */
double
PyFPE_dummy(void *dummy)
{
return 1.0;
}

424
third_party/python/Python/pyhash.c vendored Normal file
View file

@ -0,0 +1,424 @@
/* Set of hash utility functions to help maintaining the invariant that
if a==b then hash(a)==hash(b)
All the utility functions (_Py_Hash*()) return "-1" to signify an error.
*/
#include "Python.h"
#ifdef __APPLE__
# include <libkern/OSByteOrder.h>
#elif defined(HAVE_LE64TOH) && defined(HAVE_ENDIAN_H)
# include <endian.h>
#elif defined(HAVE_LE64TOH) && defined(HAVE_SYS_ENDIAN_H)
# include <sys/endian.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
_Py_HashSecret_t _Py_HashSecret;
#if Py_HASH_ALGORITHM == Py_HASH_EXTERNAL
extern PyHash_FuncDef PyHash_Func;
#else
static PyHash_FuncDef PyHash_Func;
#endif
/* Count _Py_HashBytes() calls */
#ifdef Py_HASH_STATS
#define Py_HASH_STATS_MAX 32
static Py_ssize_t hashstats[Py_HASH_STATS_MAX + 1] = {0};
#endif
/* For numeric types, the hash of a number x is based on the reduction
of x modulo the prime P = 2**_PyHASH_BITS - 1. It's designed so that
hash(x) == hash(y) whenever x and y are numerically equal, even if
x and y have different types.
A quick summary of the hashing strategy:
(1) First define the 'reduction of x modulo P' for any rational
number x; this is a standard extension of the usual notion of
reduction modulo P for integers. If x == p/q (written in lowest
terms), the reduction is interpreted as the reduction of p times
the inverse of the reduction of q, all modulo P; if q is exactly
divisible by P then define the reduction to be infinity. So we've
got a well-defined map
reduce : { rational numbers } -> { 0, 1, 2, ..., P-1, infinity }.
(2) Now for a rational number x, define hash(x) by:
reduce(x) if x >= 0
-reduce(-x) if x < 0
If the result of the reduction is infinity (this is impossible for
integers, floats and Decimals) then use the predefined hash value
_PyHASH_INF for x >= 0, or -_PyHASH_INF for x < 0, instead.
_PyHASH_INF, -_PyHASH_INF and _PyHASH_NAN are also used for the
hashes of float and Decimal infinities and nans.
A selling point for the above strategy is that it makes it possible
to compute hashes of decimal and binary floating-point numbers
efficiently, even if the exponent of the binary or decimal number
is large. The key point is that
reduce(x * y) == reduce(x) * reduce(y) (modulo _PyHASH_MODULUS)
provided that {reduce(x), reduce(y)} != {0, infinity}. The reduction of a
binary or decimal float is never infinity, since the denominator is a power
of 2 (for binary) or a divisor of a power of 10 (for decimal). So we have,
for nonnegative x,
reduce(x * 2**e) == reduce(x) * reduce(2**e) % _PyHASH_MODULUS
reduce(x * 10**e) == reduce(x) * reduce(10**e) % _PyHASH_MODULUS
and reduce(10**e) can be computed efficiently by the usual modular
exponentiation algorithm. For reduce(2**e) it's even better: since
P is of the form 2**n-1, reduce(2**e) is 2**(e mod n), and multiplication
by 2**(e mod n) modulo 2**n-1 just amounts to a rotation of bits.
*/
Py_hash_t
_Py_HashDouble(double v)
{
int e, sign;
double m;
Py_uhash_t x, y;
if (!Py_IS_FINITE(v)) {
if (Py_IS_INFINITY(v))
return v > 0 ? _PyHASH_INF : -_PyHASH_INF;
else
return _PyHASH_NAN;
}
m = frexp(v, &e);
sign = 1;
if (m < 0) {
sign = -1;
m = -m;
}
/* process 28 bits at a time; this should work well both for binary
and hexadecimal floating point. */
x = 0;
while (m) {
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28);
m *= 268435456.0; /* 2**28 */
e -= 28;
y = (Py_uhash_t)m; /* pull out integer part */
m -= y;
x += y;
if (x >= _PyHASH_MODULUS)
x -= _PyHASH_MODULUS;
}
/* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
e = e >= 0 ? e % _PyHASH_BITS : _PyHASH_BITS-1-((-1-e) % _PyHASH_BITS);
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e);
x = x * sign;
if (x == (Py_uhash_t)-1)
x = (Py_uhash_t)-2;
return (Py_hash_t)x;
}
Py_hash_t
_Py_HashPointer(void *p)
{
Py_hash_t x;
size_t y = (size_t)p;
/* bottom 3 or 4 bits are likely to be 0; rotate y by 4 to avoid
excessive hash collisions for dicts and sets */
y = (y >> 4) | (y << (8 * SIZEOF_VOID_P - 4));
x = (Py_hash_t)y;
if (x == -1)
x = -2;
return x;
}
Py_hash_t
_Py_HashBytes(const void *src, Py_ssize_t len)
{
Py_hash_t x;
/*
We make the hash of the empty string be 0, rather than using
(prefix ^ suffix), since this slightly obfuscates the hash secret
*/
if (len == 0) {
return 0;
}
#ifdef Py_HASH_STATS
hashstats[(len <= Py_HASH_STATS_MAX) ? len : 0]++;
#endif
#if Py_HASH_CUTOFF > 0
if (len < Py_HASH_CUTOFF) {
/* Optimize hashing of very small strings with inline DJBX33A. */
Py_uhash_t hash;
const unsigned char *p = src;
hash = 5381; /* DJBX33A starts with 5381 */
switch(len) {
/* ((hash << 5) + hash) + *p == hash * 33 + *p */
case 7: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
case 6: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
case 5: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
case 4: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
case 3: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
case 2: hash = ((hash << 5) + hash) + *p++; /* fallthrough */
case 1: hash = ((hash << 5) + hash) + *p++; break;
default:
assert(0);
}
hash ^= len;
hash ^= (Py_uhash_t) _Py_HashSecret.djbx33a.suffix;
x = (Py_hash_t)hash;
}
else
#endif /* Py_HASH_CUTOFF */
x = PyHash_Func.hash(src, len);
if (x == -1)
return -2;
return x;
}
void
_PyHash_Fini(void)
{
#ifdef Py_HASH_STATS
int i;
Py_ssize_t total = 0;
char *fmt = "%2i %8" PY_FORMAT_SIZE_T "d %8" PY_FORMAT_SIZE_T "d\n";
fprintf(stderr, "len calls total\n");
for (i = 1; i <= Py_HASH_STATS_MAX; i++) {
total += hashstats[i];
fprintf(stderr, fmt, i, hashstats[i], total);
}
total += hashstats[0];
fprintf(stderr, "> %8" PY_FORMAT_SIZE_T "d %8" PY_FORMAT_SIZE_T "d\n",
hashstats[0], total);
#endif
}
PyHash_FuncDef *
PyHash_GetFuncDef(void)
{
return &PyHash_Func;
}
/* Optimized memcpy() for Windows */
#ifdef _MSC_VER
# if SIZEOF_PY_UHASH_T == 4
# define PY_UHASH_CPY(dst, src) do { \
dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; \
} while(0)
# elif SIZEOF_PY_UHASH_T == 8
# define PY_UHASH_CPY(dst, src) do { \
dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; \
dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; \
} while(0)
# else
# error SIZEOF_PY_UHASH_T must be 4 or 8
# endif /* SIZEOF_PY_UHASH_T */
#else /* not Windows */
# define PY_UHASH_CPY(dst, src) memcpy(dst, src, SIZEOF_PY_UHASH_T)
#endif /* _MSC_VER */
#if Py_HASH_ALGORITHM == Py_HASH_FNV
/* **************************************************************************
* Modified Fowler-Noll-Vo (FNV) hash function
*/
static Py_hash_t
fnv(const void *src, Py_ssize_t len)
{
const unsigned char *p = src;
Py_uhash_t x;
Py_ssize_t remainder, blocks;
union {
Py_uhash_t value;
unsigned char bytes[SIZEOF_PY_UHASH_T];
} block;
#ifdef Py_DEBUG
assert(_Py_HashSecret_Initialized);
#endif
remainder = len % SIZEOF_PY_UHASH_T;
if (remainder == 0) {
/* Process at least one block byte by byte to reduce hash collisions
* for strings with common prefixes. */
remainder = SIZEOF_PY_UHASH_T;
}
blocks = (len - remainder) / SIZEOF_PY_UHASH_T;
x = (Py_uhash_t) _Py_HashSecret.fnv.prefix;
x ^= (Py_uhash_t) *p << 7;
while (blocks--) {
PY_UHASH_CPY(block.bytes, p);
x = (_PyHASH_MULTIPLIER * x) ^ block.value;
p += SIZEOF_PY_UHASH_T;
}
/* add remainder */
for (; remainder > 0; remainder--)
x = (_PyHASH_MULTIPLIER * x) ^ (Py_uhash_t) *p++;
x ^= (Py_uhash_t) len;
x ^= (Py_uhash_t) _Py_HashSecret.fnv.suffix;
if (x == (Py_uhash_t) -1) {
x = (Py_uhash_t) -2;
}
return x;
}
static PyHash_FuncDef PyHash_Func = {fnv, "fnv", 8 * SIZEOF_PY_HASH_T,
16 * SIZEOF_PY_HASH_T};
#endif /* Py_HASH_ALGORITHM == Py_HASH_FNV */
#if Py_HASH_ALGORITHM == Py_HASH_SIPHASH24
/* **************************************************************************
<MIT License>
Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</MIT License>
Original location:
https://github.com/majek/csiphash/
Solution inspired by code from:
Samuel Neves (supercop/crypto_auth/siphash24/little)
djb (supercop/crypto_auth/siphash24/little2)
Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
Modified for Python by Christian Heimes:
- C89 / MSVC compatibility
- _rotl64() on Windows
- letoh64() fallback
*/
/* byte swap little endian to host endian
* Endian conversion not only ensures that the hash function returns the same
* value on all platforms. It is also required to for a good dispersion of
* the hash values' least significant bits.
*/
#if PY_LITTLE_ENDIAN
# define _le64toh(x) ((uint64_t)(x))
#elif defined(__APPLE__)
# define _le64toh(x) OSSwapLittleToHostInt64(x)
#elif defined(HAVE_LETOH64)
# define _le64toh(x) le64toh(x)
#else
# define _le64toh(x) (((uint64_t)(x) << 56) | \
(((uint64_t)(x) << 40) & 0xff000000000000ULL) | \
(((uint64_t)(x) << 24) & 0xff0000000000ULL) | \
(((uint64_t)(x) << 8) & 0xff00000000ULL) | \
(((uint64_t)(x) >> 8) & 0xff000000ULL) | \
(((uint64_t)(x) >> 24) & 0xff0000ULL) | \
(((uint64_t)(x) >> 40) & 0xff00ULL) | \
((uint64_t)(x) >> 56))
#endif
#ifdef _MSC_VER
# define ROTATE(x, b) _rotl64(x, b)
#else
# define ROTATE(x, b) (uint64_t)( ((x) << (b)) | ( (x) >> (64 - (b))) )
#endif
#define HALF_ROUND(a,b,c,d,s,t) \
a += b; c += d; \
b = ROTATE(b, s) ^ a; \
d = ROTATE(d, t) ^ c; \
a = ROTATE(a, 32);
#define DOUBLE_ROUND(v0,v1,v2,v3) \
HALF_ROUND(v0,v1,v2,v3,13,16); \
HALF_ROUND(v2,v1,v0,v3,17,21); \
HALF_ROUND(v0,v1,v2,v3,13,16); \
HALF_ROUND(v2,v1,v0,v3,17,21);
static Py_hash_t
siphash24(const void *src, Py_ssize_t src_sz) {
uint64_t k0 = _le64toh(_Py_HashSecret.siphash.k0);
uint64_t k1 = _le64toh(_Py_HashSecret.siphash.k1);
uint64_t b = (uint64_t)src_sz << 56;
const uint8_t *in = (uint8_t*)src;
uint64_t v0 = k0 ^ 0x736f6d6570736575ULL;
uint64_t v1 = k1 ^ 0x646f72616e646f6dULL;
uint64_t v2 = k0 ^ 0x6c7967656e657261ULL;
uint64_t v3 = k1 ^ 0x7465646279746573ULL;
uint64_t t;
uint8_t *pt;
while (src_sz >= 8) {
uint64_t mi;
memcpy(&mi, in, sizeof(mi));
mi = _le64toh(mi);
in += sizeof(mi);
src_sz -= sizeof(mi);
v3 ^= mi;
DOUBLE_ROUND(v0,v1,v2,v3);
v0 ^= mi;
}
t = 0;
pt = (uint8_t *)&t;
switch (src_sz) {
case 7: pt[6] = in[6]; /* fall through */
case 6: pt[5] = in[5]; /* fall through */
case 5: pt[4] = in[4]; /* fall through */
case 4: memcpy(pt, in, sizeof(uint32_t)); break;
case 3: pt[2] = in[2]; /* fall through */
case 2: pt[1] = in[1]; /* fall through */
case 1: pt[0] = in[0]; /* fall through */
}
b |= _le64toh(t);
v3 ^= b;
DOUBLE_ROUND(v0,v1,v2,v3);
v0 ^= b;
v2 ^= 0xff;
DOUBLE_ROUND(v0,v1,v2,v3);
DOUBLE_ROUND(v0,v1,v2,v3);
/* modified */
t = (v0 ^ v1) ^ (v2 ^ v3);
return (Py_hash_t)t;
}
static PyHash_FuncDef PyHash_Func = {siphash24, "siphash24", 64, 128};
#endif /* Py_HASH_ALGORITHM == Py_HASH_SIPHASH24 */
#ifdef __cplusplus
}
#endif

1695
third_party/python/Python/pylifecycle.c vendored Normal file

File diff suppressed because it is too large Load diff

81
third_party/python/Python/pymath.c vendored Normal file
View file

@ -0,0 +1,81 @@
#include "Python.h"
#ifdef X87_DOUBLE_ROUNDING
/* On x86 platforms using an x87 FPU, this function is called from the
Py_FORCE_DOUBLE macro (defined in pymath.h) to force a floating-point
number out of an 80-bit x87 FPU register and into a 64-bit memory location,
thus rounding from extended precision to double precision. */
double _Py_force_double(double x)
{
volatile double y;
y = x;
return y;
}
#endif
#ifdef HAVE_GCC_ASM_FOR_X87
/* inline assembly for getting and setting the 387 FPU control word on
gcc/x86 */
#ifdef _Py_MEMORY_SANITIZER
__attribute__((no_sanitize_memory))
#endif
unsigned short _Py_get_387controlword(void) {
unsigned short cw;
__asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
return cw;
}
void _Py_set_387controlword(unsigned short cw) {
__asm__ __volatile__ ("fldcw %0" : : "m" (cw));
}
#endif
#ifndef HAVE_HYPOT
double hypot(double x, double y)
{
double yx;
x = fabs(x);
y = fabs(y);
if (x < y) {
double temp = x;
x = y;
y = temp;
}
if (x == 0.)
return 0.;
else {
yx = y/x;
return x*sqrt(1.+yx*yx);
}
}
#endif /* HAVE_HYPOT */
#ifndef HAVE_COPYSIGN
double
copysign(double x, double y)
{
/* use atan2 to distinguish -0. from 0. */
if (y > 0. || (y == 0. && atan2(y, -1.) > 0.)) {
return fabs(x);
} else {
return -fabs(x);
}
}
#endif /* HAVE_COPYSIGN */
#ifndef HAVE_ROUND
double
round(double x)
{
double absx, y;
absx = fabs(x);
y = floor(absx);
if (absx - y >= 0.5)
y += 1.0;
return copysign(y, x);
}
#endif /* HAVE_ROUND */

959
third_party/python/Python/pystate.c vendored Normal file
View file

@ -0,0 +1,959 @@
/* Thread and interpreter state structures and their interfaces */
#include "Python.h"
#define GET_TSTATE() \
((PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current))
#define SET_TSTATE(value) \
_Py_atomic_store_relaxed(&_PyThreadState_Current, (uintptr_t)(value))
#define GET_INTERP_STATE() \
(GET_TSTATE()->interp)
/* --------------------------------------------------------------------------
CAUTION
Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A
number of these functions are advertised as safe to call when the GIL isn't
held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL
to avoid the expense of doing their own locking).
-------------------------------------------------------------------------- */
#ifdef HAVE_DLOPEN
#ifdef HAVE_DLFCN_H
#include <dlfcn.h>
#endif
#if !HAVE_DECL_RTLD_LAZY
#define RTLD_LAZY 1
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
int _PyGILState_check_enabled = 1;
#ifdef WITH_THREAD
#include "pythread.h"
static PyThread_type_lock head_mutex = NULL; /* Protects interp->tstate_head */
#define HEAD_INIT() (void)(head_mutex || (head_mutex = PyThread_allocate_lock()))
#define HEAD_LOCK() PyThread_acquire_lock(head_mutex, WAIT_LOCK)
#define HEAD_UNLOCK() PyThread_release_lock(head_mutex)
/* The single PyInterpreterState used by this process'
GILState implementation
*/
static PyInterpreterState *autoInterpreterState = NULL;
static int autoTLSkey = -1;
#else
#define HEAD_INIT() /* Nothing */
#define HEAD_LOCK() /* Nothing */
#define HEAD_UNLOCK() /* Nothing */
#endif
static PyInterpreterState *interp_head = NULL;
static __PyCodeExtraState *coextra_head = NULL;
/* Assuming the current thread holds the GIL, this is the
PyThreadState for the current thread. */
_Py_atomic_address _PyThreadState_Current = {0};
PyThreadFrameGetter _PyThreadState_GetFrame = NULL;
#ifdef WITH_THREAD
static void _PyGILState_NoteThreadState(PyThreadState* tstate);
#endif
PyInterpreterState *
PyInterpreterState_New(void)
{
PyInterpreterState *interp = (PyInterpreterState *)
PyMem_RawMalloc(sizeof(PyInterpreterState));
if (interp != NULL) {
__PyCodeExtraState* coextra = PyMem_RawMalloc(sizeof(__PyCodeExtraState));
if (coextra == NULL) {
PyMem_RawFree(interp);
return NULL;
}
HEAD_INIT();
#ifdef WITH_THREAD
if (head_mutex == NULL)
Py_FatalError("Can't initialize threads for interpreter");
#endif
interp->modules = NULL;
interp->modules_by_index = NULL;
interp->sysdict = NULL;
interp->builtins = NULL;
interp->builtins_copy = NULL;
interp->tstate_head = NULL;
interp->codec_search_path = NULL;
interp->codec_search_cache = NULL;
interp->codec_error_registry = NULL;
interp->codecs_initialized = 0;
interp->fscodec_initialized = 0;
interp->importlib = NULL;
interp->import_func = NULL;
interp->eval_frame = _PyEval_EvalFrameDefault;
coextra->co_extra_user_count = 0;
coextra->interp = interp;
#ifdef HAVE_DLOPEN
#if HAVE_DECL_RTLD_NOW
interp->dlopenflags = RTLD_NOW;
#else
interp->dlopenflags = RTLD_LAZY;
#endif
#endif
HEAD_LOCK();
interp->next = interp_head;
interp_head = interp;
coextra->next = coextra_head;
coextra_head = coextra;
HEAD_UNLOCK();
}
return interp;
}
void
PyInterpreterState_Clear(PyInterpreterState *interp)
{
PyThreadState *p;
HEAD_LOCK();
for (p = interp->tstate_head; p != NULL; p = p->next)
PyThreadState_Clear(p);
HEAD_UNLOCK();
Py_CLEAR(interp->codec_search_path);
Py_CLEAR(interp->codec_search_cache);
Py_CLEAR(interp->codec_error_registry);
Py_CLEAR(interp->modules);
Py_CLEAR(interp->modules_by_index);
Py_CLEAR(interp->sysdict);
Py_CLEAR(interp->builtins);
Py_CLEAR(interp->builtins_copy);
Py_CLEAR(interp->importlib);
Py_CLEAR(interp->import_func);
}
static void
zapthreads(PyInterpreterState *interp)
{
PyThreadState *p;
/* No need to lock the mutex here because this should only happen
when the threads are all really dead (XXX famous last words). */
while ((p = interp->tstate_head) != NULL) {
PyThreadState_Delete(p);
}
}
void
PyInterpreterState_Delete(PyInterpreterState *interp)
{
PyInterpreterState **p;
__PyCodeExtraState **pextra;
zapthreads(interp);
HEAD_LOCK();
for (p = &interp_head; /* N/A */; p = &(*p)->next) {
if (*p == NULL)
Py_FatalError(
"PyInterpreterState_Delete: invalid interp");
if (*p == interp)
break;
}
if (interp->tstate_head != NULL)
Py_FatalError("PyInterpreterState_Delete: remaining threads");
*p = interp->next;
for (pextra = &coextra_head; ; pextra = &(*pextra)->next) {
if (*pextra == NULL)
Py_FatalError(
"PyInterpreterState_Delete: invalid extra");
__PyCodeExtraState* extra = *pextra;
if (extra->interp == interp) {
*pextra = extra->next;
PyMem_RawFree(extra);
break;
}
}
HEAD_UNLOCK();
PyMem_RawFree(interp);
#ifdef WITH_THREAD
if (interp_head == NULL && head_mutex != NULL) {
PyThread_free_lock(head_mutex);
head_mutex = NULL;
}
#endif
}
/* Default implementation for _PyThreadState_GetFrame */
static struct _frame *
threadstate_getframe(PyThreadState *self)
{
return self->frame;
}
static PyThreadState *
new_threadstate(PyInterpreterState *interp, int init)
{
PyThreadState *tstate = (PyThreadState *)PyMem_RawMalloc(sizeof(PyThreadState));
if (_PyThreadState_GetFrame == NULL)
_PyThreadState_GetFrame = threadstate_getframe;
if (tstate != NULL) {
tstate->interp = interp;
tstate->frame = NULL;
tstate->recursion_depth = 0;
tstate->overflowed = 0;
tstate->recursion_critical = 0;
tstate->tracing = 0;
tstate->use_tracing = 0;
tstate->gilstate_counter = 0;
tstate->async_exc = NULL;
#ifdef WITH_THREAD
tstate->thread_id = PyThread_get_thread_ident();
#else
tstate->thread_id = 0;
#endif
tstate->dict = NULL;
tstate->curexc_type = NULL;
tstate->curexc_value = NULL;
tstate->curexc_traceback = NULL;
tstate->exc_type = NULL;
tstate->exc_value = NULL;
tstate->exc_traceback = NULL;
tstate->c_profilefunc = NULL;
tstate->c_tracefunc = NULL;
tstate->c_profileobj = NULL;
tstate->c_traceobj = NULL;
tstate->trash_delete_nesting = 0;
tstate->trash_delete_later = NULL;
tstate->on_delete = NULL;
tstate->on_delete_data = NULL;
tstate->coroutine_wrapper = NULL;
tstate->in_coroutine_wrapper = 0;
tstate->async_gen_firstiter = NULL;
tstate->async_gen_finalizer = NULL;
if (init)
_PyThreadState_Init(tstate);
HEAD_LOCK();
tstate->prev = NULL;
tstate->next = interp->tstate_head;
if (tstate->next)
tstate->next->prev = tstate;
interp->tstate_head = tstate;
HEAD_UNLOCK();
}
return tstate;
}
PyThreadState *
PyThreadState_New(PyInterpreterState *interp)
{
return new_threadstate(interp, 1);
}
PyThreadState *
_PyThreadState_Prealloc(PyInterpreterState *interp)
{
return new_threadstate(interp, 0);
}
void
_PyThreadState_Init(PyThreadState *tstate)
{
#ifdef WITH_THREAD
_PyGILState_NoteThreadState(tstate);
#endif
}
PyObject*
PyState_FindModule(struct PyModuleDef* module)
{
Py_ssize_t index = module->m_base.m_index;
PyInterpreterState *state = GET_INTERP_STATE();
PyObject *res;
if (module->m_slots) {
return NULL;
}
if (index == 0)
return NULL;
if (state->modules_by_index == NULL)
return NULL;
if (index >= PyList_GET_SIZE(state->modules_by_index))
return NULL;
res = PyList_GET_ITEM(state->modules_by_index, index);
return res==Py_None ? NULL : res;
}
int
_PyState_AddModule(PyObject* module, struct PyModuleDef* def)
{
PyInterpreterState *state;
if (!def) {
assert(PyErr_Occurred());
return -1;
}
if (def->m_slots) {
PyErr_SetString(PyExc_SystemError,
"PyState_AddModule called on module with slots");
return -1;
}
state = GET_INTERP_STATE();
if (!state->modules_by_index) {
state->modules_by_index = PyList_New(0);
if (!state->modules_by_index)
return -1;
}
while(PyList_GET_SIZE(state->modules_by_index) <= def->m_base.m_index)
if (PyList_Append(state->modules_by_index, Py_None) < 0)
return -1;
Py_INCREF(module);
return PyList_SetItem(state->modules_by_index,
def->m_base.m_index, module);
}
int
PyState_AddModule(PyObject* module, struct PyModuleDef* def)
{
Py_ssize_t index;
PyInterpreterState *state = GET_INTERP_STATE();
if (!def) {
Py_FatalError("PyState_AddModule: Module Definition is NULL");
return -1;
}
index = def->m_base.m_index;
if (state->modules_by_index) {
if(PyList_GET_SIZE(state->modules_by_index) >= index) {
if(module == PyList_GET_ITEM(state->modules_by_index, index)) {
Py_FatalError("PyState_AddModule: Module already added!");
return -1;
}
}
}
return _PyState_AddModule(module, def);
}
int
PyState_RemoveModule(struct PyModuleDef* def)
{
PyInterpreterState *state;
Py_ssize_t index = def->m_base.m_index;
if (def->m_slots) {
PyErr_SetString(PyExc_SystemError,
"PyState_RemoveModule called on module with slots");
return -1;
}
state = GET_INTERP_STATE();
if (index == 0) {
Py_FatalError("PyState_RemoveModule: Module index invalid.");
return -1;
}
if (state->modules_by_index == NULL) {
Py_FatalError("PyState_RemoveModule: Interpreters module-list not acessible.");
return -1;
}
if (index > PyList_GET_SIZE(state->modules_by_index)) {
Py_FatalError("PyState_RemoveModule: Module index out of bounds.");
return -1;
}
Py_INCREF(Py_None);
return PyList_SetItem(state->modules_by_index, index, Py_None);
}
/* used by import.c:PyImport_Cleanup */
void
_PyState_ClearModules(void)
{
PyInterpreterState *state = GET_INTERP_STATE();
if (state->modules_by_index) {
Py_ssize_t i;
for (i = 0; i < PyList_GET_SIZE(state->modules_by_index); i++) {
PyObject *m = PyList_GET_ITEM(state->modules_by_index, i);
if (PyModule_Check(m)) {
/* cleanup the saved copy of module dicts */
PyModuleDef *md = PyModule_GetDef(m);
if (md)
Py_CLEAR(md->m_base.m_copy);
}
}
/* Setting modules_by_index to NULL could be dangerous, so we
clear the list instead. */
if (PyList_SetSlice(state->modules_by_index,
0, PyList_GET_SIZE(state->modules_by_index),
NULL))
PyErr_WriteUnraisable(state->modules_by_index);
}
}
void
PyThreadState_Clear(PyThreadState *tstate)
{
if (Py_VerboseFlag && tstate->frame != NULL)
fprintf(stderr,
"PyThreadState_Clear: warning: thread still has a frame\n");
Py_CLEAR(tstate->frame);
Py_CLEAR(tstate->dict);
Py_CLEAR(tstate->async_exc);
Py_CLEAR(tstate->curexc_type);
Py_CLEAR(tstate->curexc_value);
Py_CLEAR(tstate->curexc_traceback);
Py_CLEAR(tstate->exc_type);
Py_CLEAR(tstate->exc_value);
Py_CLEAR(tstate->exc_traceback);
tstate->c_profilefunc = NULL;
tstate->c_tracefunc = NULL;
Py_CLEAR(tstate->c_profileobj);
Py_CLEAR(tstate->c_traceobj);
Py_CLEAR(tstate->coroutine_wrapper);
Py_CLEAR(tstate->async_gen_firstiter);
Py_CLEAR(tstate->async_gen_finalizer);
}
/* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
static void
tstate_delete_common(PyThreadState *tstate)
{
PyInterpreterState *interp;
if (tstate == NULL)
Py_FatalError("PyThreadState_Delete: NULL tstate");
interp = tstate->interp;
if (interp == NULL)
Py_FatalError("PyThreadState_Delete: NULL interp");
HEAD_LOCK();
if (tstate->prev)
tstate->prev->next = tstate->next;
else
interp->tstate_head = tstate->next;
if (tstate->next)
tstate->next->prev = tstate->prev;
HEAD_UNLOCK();
if (tstate->on_delete != NULL) {
tstate->on_delete(tstate->on_delete_data);
}
PyMem_RawFree(tstate);
}
void
PyThreadState_Delete(PyThreadState *tstate)
{
if (tstate == GET_TSTATE())
Py_FatalError("PyThreadState_Delete: tstate is still current");
#ifdef WITH_THREAD
if (autoInterpreterState && PyThread_get_key_value(autoTLSkey) == tstate)
PyThread_delete_key_value(autoTLSkey);
#endif /* WITH_THREAD */
tstate_delete_common(tstate);
}
#ifdef WITH_THREAD
void
PyThreadState_DeleteCurrent()
{
PyThreadState *tstate = GET_TSTATE();
if (tstate == NULL)
Py_FatalError(
"PyThreadState_DeleteCurrent: no current tstate");
tstate_delete_common(tstate);
if (autoInterpreterState && PyThread_get_key_value(autoTLSkey) == tstate)
PyThread_delete_key_value(autoTLSkey);
SET_TSTATE(NULL);
PyEval_ReleaseLock();
}
#endif /* WITH_THREAD */
/*
* Delete all thread states except the one passed as argument.
* Note that, if there is a current thread state, it *must* be the one
* passed as argument. Also, this won't touch any other interpreters
* than the current one, since we don't know which thread state should
* be kept in those other interpreteres.
*/
void
_PyThreadState_DeleteExcept(PyThreadState *tstate)
{
PyInterpreterState *interp = tstate->interp;
PyThreadState *p, *next, *garbage;
HEAD_LOCK();
/* Remove all thread states, except tstate, from the linked list of
thread states. This will allow calling PyThreadState_Clear()
without holding the lock. */
garbage = interp->tstate_head;
if (garbage == tstate)
garbage = tstate->next;
if (tstate->prev)
tstate->prev->next = tstate->next;
if (tstate->next)
tstate->next->prev = tstate->prev;
tstate->prev = tstate->next = NULL;
interp->tstate_head = tstate;
HEAD_UNLOCK();
/* Clear and deallocate all stale thread states. Even if this
executes Python code, we should be safe since it executes
in the current thread, not one of the stale threads. */
for (p = garbage; p; p = next) {
next = p->next;
PyThreadState_Clear(p);
PyMem_RawFree(p);
}
}
PyThreadState *
_PyThreadState_UncheckedGet(void)
{
return GET_TSTATE();
}
PyThreadState *
PyThreadState_Get(void)
{
PyThreadState *tstate = GET_TSTATE();
if (tstate == NULL)
Py_FatalError("PyThreadState_Get: no current thread");
return tstate;
}
PyThreadState *
PyThreadState_Swap(PyThreadState *newts)
{
PyThreadState *oldts = GET_TSTATE();
SET_TSTATE(newts);
/* It should not be possible for more than one thread state
to be used for a thread. Check this the best we can in debug
builds.
*/
#if defined(Py_DEBUG) && defined(WITH_THREAD)
if (newts) {
/* This can be called from PyEval_RestoreThread(). Similar
to it, we need to ensure errno doesn't change.
*/
int err = errno;
PyThreadState *check = PyGILState_GetThisThreadState();
if (check && check->interp == newts->interp && check != newts)
Py_FatalError("Invalid thread state for this thread");
errno = err;
}
#endif
return oldts;
}
__PyCodeExtraState*
__PyCodeExtraState_Get(void) {
PyInterpreterState* interp = PyThreadState_Get()->interp;
HEAD_LOCK();
for (__PyCodeExtraState* cur = coextra_head; cur != NULL; cur = cur->next) {
if (cur->interp == interp) {
HEAD_UNLOCK();
return cur;
}
}
HEAD_UNLOCK();
Py_FatalError("__PyCodeExtraState_Get: no code state for interpreter");
return NULL;
}
/* An extension mechanism to store arbitrary additional per-thread state.
PyThreadState_GetDict() returns a dictionary that can be used to hold such
state; the caller should pick a unique key and store its state there. If
PyThreadState_GetDict() returns NULL, an exception has *not* been raised
and the caller should assume no per-thread state is available. */
PyObject *
PyThreadState_GetDict(void)
{
PyThreadState *tstate = GET_TSTATE();
if (tstate == NULL)
return NULL;
if (tstate->dict == NULL) {
PyObject *d;
tstate->dict = d = PyDict_New();
if (d == NULL)
PyErr_Clear();
}
return tstate->dict;
}
/* Asynchronously raise an exception in a thread.
Requested by Just van Rossum and Alex Martelli.
To prevent naive misuse, you must write your own extension
to call this, or use ctypes. Must be called with the GIL held.
Returns the number of tstates modified (normally 1, but 0 if `id` didn't
match any known thread id). Can be called with exc=NULL to clear an
existing async exception. This raises no exceptions. */
int
PyThreadState_SetAsyncExc(long id, PyObject *exc) {
PyInterpreterState *interp = GET_INTERP_STATE();
PyThreadState *p;
/* Although the GIL is held, a few C API functions can be called
* without the GIL held, and in particular some that create and
* destroy thread and interpreter states. Those can mutate the
* list of thread states we're traversing, so to prevent that we lock
* head_mutex for the duration.
*/
HEAD_LOCK();
for (p = interp->tstate_head; p != NULL; p = p->next) {
if (p->thread_id == id) {
/* Tricky: we need to decref the current value
* (if any) in p->async_exc, but that can in turn
* allow arbitrary Python code to run, including
* perhaps calls to this function. To prevent
* deadlock, we need to release head_mutex before
* the decref.
*/
PyObject *old_exc = p->async_exc;
Py_XINCREF(exc);
p->async_exc = exc;
HEAD_UNLOCK();
Py_XDECREF(old_exc);
_PyEval_SignalAsyncExc();
return 1;
}
}
HEAD_UNLOCK();
return 0;
}
/* Routines for advanced debuggers, requested by David Beazley.
Don't use unless you know what you are doing! */
PyInterpreterState *
PyInterpreterState_Head(void)
{
return interp_head;
}
PyInterpreterState *
PyInterpreterState_Next(PyInterpreterState *interp) {
return interp->next;
}
PyThreadState *
PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
return interp->tstate_head;
}
PyThreadState *
PyThreadState_Next(PyThreadState *tstate) {
return tstate->next;
}
/* The implementation of sys._current_frames(). This is intended to be
called with the GIL held, as it will be when called via
sys._current_frames(). It's possible it would work fine even without
the GIL held, but haven't thought enough about that.
*/
PyObject *
_PyThread_CurrentFrames(void)
{
PyObject *result;
PyInterpreterState *i;
result = PyDict_New();
if (result == NULL)
return NULL;
/* for i in all interpreters:
* for t in all of i's thread states:
* if t's frame isn't NULL, map t's id to its frame
* Because these lists can mutate even when the GIL is held, we
* need to grab head_mutex for the duration.
*/
HEAD_LOCK();
for (i = interp_head; i != NULL; i = i->next) {
PyThreadState *t;
for (t = i->tstate_head; t != NULL; t = t->next) {
PyObject *id;
int stat;
struct _frame *frame = t->frame;
if (frame == NULL)
continue;
id = PyLong_FromLong(t->thread_id);
if (id == NULL)
goto Fail;
stat = PyDict_SetItem(result, id, (PyObject *)frame);
Py_DECREF(id);
if (stat < 0)
goto Fail;
}
}
HEAD_UNLOCK();
return result;
Fail:
HEAD_UNLOCK();
Py_DECREF(result);
return NULL;
}
/* Python "auto thread state" API. */
#ifdef WITH_THREAD
/* Keep this as a static, as it is not reliable! It can only
ever be compared to the state for the *current* thread.
* If not equal, then it doesn't matter that the actual
value may change immediately after comparison, as it can't
possibly change to the current thread's state.
* If equal, then the current thread holds the lock, so the value can't
change until we yield the lock.
*/
static int
PyThreadState_IsCurrent(PyThreadState *tstate)
{
/* Must be the tstate for this thread */
assert(PyGILState_GetThisThreadState()==tstate);
return tstate == GET_TSTATE();
}
/* Internal initialization/finalization functions called by
Py_Initialize/Py_FinalizeEx
*/
void
_PyGILState_Init(PyInterpreterState *i, PyThreadState *t)
{
assert(i && t); /* must init with valid states */
autoTLSkey = PyThread_create_key();
if (autoTLSkey == -1)
Py_FatalError("Could not allocate TLS entry");
autoInterpreterState = i;
assert(PyThread_get_key_value(autoTLSkey) == NULL);
assert(t->gilstate_counter == 0);
_PyGILState_NoteThreadState(t);
}
PyInterpreterState *
_PyGILState_GetInterpreterStateUnsafe(void)
{
return autoInterpreterState;
}
void
_PyGILState_Fini(void)
{
PyThread_delete_key(autoTLSkey);
autoTLSkey = -1;
autoInterpreterState = NULL;
}
/* Reset the TLS key - called by PyOS_AfterFork().
* This should not be necessary, but some - buggy - pthread implementations
* don't reset TLS upon fork(), see issue #10517.
*/
void
_PyGILState_Reinit(void)
{
#ifdef WITH_THREAD
head_mutex = NULL;
HEAD_INIT();
#endif
PyThreadState *tstate = PyGILState_GetThisThreadState();
PyThread_delete_key(autoTLSkey);
if ((autoTLSkey = PyThread_create_key()) == -1)
Py_FatalError("Could not allocate TLS entry");
/* If the thread had an associated auto thread state, reassociate it with
* the new key. */
if (tstate && PyThread_set_key_value(autoTLSkey, (void *)tstate) < 0)
Py_FatalError("Couldn't create autoTLSkey mapping");
}
/* When a thread state is created for a thread by some mechanism other than
PyGILState_Ensure, it's important that the GILState machinery knows about
it so it doesn't try to create another thread state for the thread (this is
a better fix for SF bug #1010677 than the first one attempted).
*/
static void
_PyGILState_NoteThreadState(PyThreadState* tstate)
{
/* If autoTLSkey isn't initialized, this must be the very first
threadstate created in Py_Initialize(). Don't do anything for now
(we'll be back here when _PyGILState_Init is called). */
if (!autoInterpreterState)
return;
/* Stick the thread state for this thread in thread local storage.
The only situation where you can legitimately have more than one
thread state for an OS level thread is when there are multiple
interpreters.
You shouldn't really be using the PyGILState_ APIs anyway (see issues
#10915 and #15751).
The first thread state created for that given OS level thread will
"win", which seems reasonable behaviour.
*/
if (PyThread_get_key_value(autoTLSkey) == NULL) {
if (PyThread_set_key_value(autoTLSkey, (void *)tstate) < 0)
Py_FatalError("Couldn't create autoTLSkey mapping");
}
/* PyGILState_Release must not try to delete this thread state. */
tstate->gilstate_counter = 1;
}
/* The public functions */
PyThreadState *
PyGILState_GetThisThreadState(void)
{
if (autoInterpreterState == NULL)
return NULL;
return (PyThreadState *)PyThread_get_key_value(autoTLSkey);
}
int
PyGILState_Check(void)
{
PyThreadState *tstate;
if (!_PyGILState_check_enabled)
return 1;
if (autoTLSkey == -1)
return 1;
tstate = GET_TSTATE();
if (tstate == NULL)
return 0;
return (tstate == PyGILState_GetThisThreadState());
}
PyGILState_STATE
PyGILState_Ensure(void)
{
int current;
PyThreadState *tcur;
int need_init_threads = 0;
/* Note that we do not auto-init Python here - apart from
potential races with 2 threads auto-initializing, pep-311
spells out other issues. Embedders are expected to have
called Py_Initialize() and usually PyEval_InitThreads().
*/
assert(autoInterpreterState); /* Py_Initialize() hasn't been called! */
tcur = (PyThreadState *)PyThread_get_key_value(autoTLSkey);
if (tcur == NULL) {
need_init_threads = 1;
/* Create a new thread state for this thread */
tcur = PyThreadState_New(autoInterpreterState);
if (tcur == NULL)
Py_FatalError("Couldn't create thread-state for new thread");
/* This is our thread state! We'll need to delete it in the
matching call to PyGILState_Release(). */
tcur->gilstate_counter = 0;
current = 0; /* new thread state is never current */
}
else {
current = PyThreadState_IsCurrent(tcur);
}
if (current == 0) {
PyEval_RestoreThread(tcur);
}
/* Update our counter in the thread-state - no need for locks:
- tcur will remain valid as we hold the GIL.
- the counter is safe as we are the only thread "allowed"
to modify this value
*/
++tcur->gilstate_counter;
if (need_init_threads) {
/* At startup, Python has no concrete GIL. If PyGILState_Ensure() is
called from a new thread for the first time, we need the create the
GIL. */
PyEval_InitThreads();
}
return current ? PyGILState_LOCKED : PyGILState_UNLOCKED;
}
void
PyGILState_Release(PyGILState_STATE oldstate)
{
PyThreadState *tcur = (PyThreadState *)PyThread_get_key_value(
autoTLSkey);
if (tcur == NULL)
Py_FatalError("auto-releasing thread-state, "
"but no thread-state for this thread");
/* We must hold the GIL and have our thread state current */
/* XXX - remove the check - the assert should be fine,
but while this is very new (April 2003), the extra check
by release-only users can't hurt.
*/
if (! PyThreadState_IsCurrent(tcur))
Py_FatalError("This thread state must be current when releasing");
assert(PyThreadState_IsCurrent(tcur));
--tcur->gilstate_counter;
assert(tcur->gilstate_counter >= 0); /* illegal counter value */
/* If we're going to destroy this thread-state, we must
* clear it while the GIL is held, as destructors may run.
*/
if (tcur->gilstate_counter == 0) {
/* can't have been locked when we created it */
assert(oldstate == PyGILState_UNLOCKED);
PyThreadState_Clear(tcur);
/* Delete the thread-state. Note this releases the GIL too!
* It's vital that the GIL be held here, to avoid shutdown
* races; see bugs 225673 and 1061968 (that nasty bug has a
* habit of coming back).
*/
PyThreadState_DeleteCurrent();
}
/* Release the lock if necessary */
else if (oldstate == PyGILState_UNLOCKED)
PyEval_SaveThread();
}
#endif /* WITH_THREAD */
#ifdef __cplusplus
}
#endif

26
third_party/python/Python/pystrcmp.c vendored Normal file
View file

@ -0,0 +1,26 @@
/* Cross platform case insensitive string compare functions
*/
#include "Python.h"
int
PyOS_mystrnicmp(const char *s1, const char *s2, Py_ssize_t size)
{
if (size == 0)
return 0;
while ((--size > 0) &&
(tolower((unsigned)*s1) == tolower((unsigned)*s2))) {
if (!*s1++ || !*s2++)
break;
}
return tolower((unsigned)*s1) - tolower((unsigned)*s2);
}
int
PyOS_mystricmp(const char *s1, const char *s2)
{
while (*s1 && (tolower((unsigned)*s1++) == tolower((unsigned)*s2++))) {
;
}
return (tolower((unsigned)*s1) - tolower((unsigned)*s2));
}

61
third_party/python/Python/pystrhex.c vendored Normal file
View file

@ -0,0 +1,61 @@
/* bytes to hex implementation */
#include "Python.h"
static PyObject *_Py_strhex_impl(const char* argbuf, const Py_ssize_t arglen,
int return_bytes)
{
PyObject *retval;
Py_UCS1* retbuf;
Py_ssize_t i, j;
assert(arglen >= 0);
if (arglen > PY_SSIZE_T_MAX / 2)
return PyErr_NoMemory();
if (return_bytes) {
/* If _PyBytes_FromSize() were public we could avoid malloc+copy. */
retbuf = (Py_UCS1*) PyMem_Malloc(arglen*2);
if (!retbuf)
return PyErr_NoMemory();
retval = NULL; /* silence a compiler warning, assigned later. */
} else {
retval = PyUnicode_New(arglen*2, 127);
if (!retval)
return NULL;
retbuf = PyUnicode_1BYTE_DATA(retval);
}
/* make hex version of string, taken from shamodule.c */
for (i=j=0; i < arglen; i++) {
unsigned char c;
c = (argbuf[i] >> 4) & 0xf;
retbuf[j++] = Py_hexdigits[c];
c = argbuf[i] & 0xf;
retbuf[j++] = Py_hexdigits[c];
}
if (return_bytes) {
retval = PyBytes_FromStringAndSize((const char *)retbuf, arglen*2);
PyMem_Free(retbuf);
}
#ifdef Py_DEBUG
else {
assert(_PyUnicode_CheckConsistency(retval, 1));
}
#endif
return retval;
}
PyAPI_FUNC(PyObject *) _Py_strhex(const char* argbuf, const Py_ssize_t arglen)
{
return _Py_strhex_impl(argbuf, arglen, 0);
}
/* Same as above but returns a bytes() instead of str() to avoid the
* need to decode the str() when bytes are needed. */
PyAPI_FUNC(PyObject *) _Py_strhex_bytes(const char* argbuf, const Py_ssize_t arglen)
{
return _Py_strhex_impl(argbuf, arglen, 1);
}

1307
third_party/python/Python/pystrtod.c vendored Normal file

File diff suppressed because it is too large Load diff

1604
third_party/python/Python/pythonrun.c vendored Normal file

File diff suppressed because it is too large Load diff

854
third_party/python/Python/pytime.c vendored Normal file
View file

@ -0,0 +1,854 @@
#include "Python.h"
#ifdef MS_WINDOWS
#include <windows.h>
#endif
#if defined(__APPLE__)
#include <mach/mach_time.h> /* mach_absolute_time(), mach_timebase_info() */
#endif
#define _PyTime_check_mul_overflow(a, b) \
(assert(b > 0), \
(_PyTime_t)(a) < _PyTime_MIN / (_PyTime_t)(b) \
|| _PyTime_MAX / (_PyTime_t)(b) < (_PyTime_t)(a))
/* To millisecond (10^-3) */
#define SEC_TO_MS 1000
/* To microseconds (10^-6) */
#define MS_TO_US 1000
#define SEC_TO_US (SEC_TO_MS * MS_TO_US)
/* To nanoseconds (10^-9) */
#define US_TO_NS 1000
#define MS_TO_NS (MS_TO_US * US_TO_NS)
#define SEC_TO_NS (SEC_TO_MS * MS_TO_NS)
/* Conversion from nanoseconds */
#define NS_TO_MS (1000 * 1000)
#define NS_TO_US (1000)
static void
error_time_t_overflow(void)
{
PyErr_SetString(PyExc_OverflowError,
"timestamp out of range for platform time_t");
}
time_t
_PyLong_AsTime_t(PyObject *obj)
{
#if SIZEOF_TIME_T == SIZEOF_LONG_LONG
long long val;
val = PyLong_AsLongLong(obj);
#else
long val;
Py_BUILD_ASSERT(sizeof(time_t) <= sizeof(long));
val = PyLong_AsLong(obj);
#endif
if (val == -1 && PyErr_Occurred()) {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
error_time_t_overflow();
return -1;
}
return (time_t)val;
}
PyObject *
_PyLong_FromTime_t(time_t t)
{
#if SIZEOF_TIME_T == SIZEOF_LONG_LONG
return PyLong_FromLongLong((long long)t);
#else
Py_BUILD_ASSERT(sizeof(time_t) <= sizeof(long));
return PyLong_FromLong((long)t);
#endif
}
/* Round to nearest with ties going to nearest even integer
(_PyTime_ROUND_HALF_EVEN) */
static double
_PyTime_RoundHalfEven(double x)
{
double rounded = round(x);
if (fabs(x-rounded) == 0.5)
/* halfway case: round to even */
rounded = 2.0*round(x/2.0);
return rounded;
}
static double
_PyTime_Round(double x, _PyTime_round_t round)
{
/* volatile avoids optimization changing how numbers are rounded */
volatile double d;
d = x;
if (round == _PyTime_ROUND_HALF_EVEN){
d = _PyTime_RoundHalfEven(d);
}
else if (round == _PyTime_ROUND_CEILING){
d = ceil(d);
}
else if (round == _PyTime_ROUND_FLOOR) {
d = floor(d);
}
else {
assert(round == _PyTime_ROUND_UP);
d = (d >= 0.0) ? ceil(d) : floor(d);
}
return d;
}
static int
_PyTime_DoubleToDenominator(double d, time_t *sec, long *numerator,
double denominator, _PyTime_round_t round)
{
double intpart;
/* volatile avoids optimization changing how numbers are rounded */
volatile double floatpart;
floatpart = modf(d, &intpart);
floatpart *= denominator;
floatpart = _PyTime_Round(floatpart, round);
if (floatpart >= denominator) {
floatpart -= denominator;
intpart += 1.0;
}
else if (floatpart < 0) {
floatpart += denominator;
intpart -= 1.0;
}
assert(0.0 <= floatpart && floatpart < denominator);
if (!_Py_InIntegralTypeRange(time_t, intpart)) {
error_time_t_overflow();
return -1;
}
*sec = (time_t)intpart;
*numerator = (long)floatpart;
return 0;
}
static int
_PyTime_ObjectToDenominator(PyObject *obj, time_t *sec, long *numerator,
double denominator, _PyTime_round_t round)
{
assert(denominator <= (double)LONG_MAX);
if (PyFloat_Check(obj)) {
double d = PyFloat_AsDouble(obj);
if (Py_IS_NAN(d)) {
*numerator = 0;
PyErr_SetString(PyExc_ValueError, "Invalid value NaN (not a number)");
return -1;
}
return _PyTime_DoubleToDenominator(d, sec, numerator,
denominator, round);
}
else {
*sec = _PyLong_AsTime_t(obj);
*numerator = 0;
if (*sec == (time_t)-1 && PyErr_Occurred())
return -1;
return 0;
}
}
int
_PyTime_ObjectToTime_t(PyObject *obj, time_t *sec, _PyTime_round_t round)
{
if (PyFloat_Check(obj)) {
double intpart;
/* volatile avoids optimization changing how numbers are rounded */
volatile double d;
d = PyFloat_AsDouble(obj);
if (Py_IS_NAN(d)) {
PyErr_SetString(PyExc_ValueError, "Invalid value NaN (not a number)");
return -1;
}
d = _PyTime_Round(d, round);
(void)modf(d, &intpart);
if (!_Py_InIntegralTypeRange(time_t, intpart)) {
error_time_t_overflow();
return -1;
}
*sec = (time_t)intpart;
return 0;
}
else {
*sec = _PyLong_AsTime_t(obj);
if (*sec == (time_t)-1 && PyErr_Occurred())
return -1;
return 0;
}
}
int
_PyTime_ObjectToTimespec(PyObject *obj, time_t *sec, long *nsec,
_PyTime_round_t round)
{
int res;
res = _PyTime_ObjectToDenominator(obj, sec, nsec, 1e9, round);
if (res == 0) {
assert(0 <= *nsec && *nsec < SEC_TO_NS);
}
return res;
}
int
_PyTime_ObjectToTimeval(PyObject *obj, time_t *sec, long *usec,
_PyTime_round_t round)
{
int res;
res = _PyTime_ObjectToDenominator(obj, sec, usec, 1e6, round);
if (res == 0) {
assert(0 <= *usec && *usec < SEC_TO_US);
}
return res;
}
static void
_PyTime_overflow(void)
{
PyErr_SetString(PyExc_OverflowError,
"timestamp too large to convert to C _PyTime_t");
}
_PyTime_t
_PyTime_FromSeconds(int seconds)
{
_PyTime_t t;
t = (_PyTime_t)seconds;
/* ensure that integer overflow cannot happen, int type should have 32
bits, whereas _PyTime_t type has at least 64 bits (SEC_TO_MS takes 30
bits). */
Py_BUILD_ASSERT(INT_MAX <= _PyTime_MAX / SEC_TO_NS);
Py_BUILD_ASSERT(INT_MIN >= _PyTime_MIN / SEC_TO_NS);
assert((t >= 0 && t <= _PyTime_MAX / SEC_TO_NS)
|| (t < 0 && t >= _PyTime_MIN / SEC_TO_NS));
t *= SEC_TO_NS;
return t;
}
_PyTime_t
_PyTime_FromNanoseconds(long long ns)
{
_PyTime_t t;
Py_BUILD_ASSERT(sizeof(long long) <= sizeof(_PyTime_t));
t = Py_SAFE_DOWNCAST(ns, long long, _PyTime_t);
return t;
}
#ifdef HAVE_CLOCK_GETTIME
static int
_PyTime_FromTimespec(_PyTime_t *tp, struct timespec *ts, int raise)
{
_PyTime_t t;
int res = 0;
Py_BUILD_ASSERT(sizeof(ts->tv_sec) <= sizeof(_PyTime_t));
t = (_PyTime_t)ts->tv_sec;
if (_PyTime_check_mul_overflow(t, SEC_TO_NS)) {
if (raise)
_PyTime_overflow();
res = -1;
}
t = t * SEC_TO_NS;
t += ts->tv_nsec;
*tp = t;
return res;
}
#elif !defined(MS_WINDOWS)
static int
_PyTime_FromTimeval(_PyTime_t *tp, struct timeval *tv, int raise)
{
_PyTime_t t;
int res = 0;
Py_BUILD_ASSERT(sizeof(tv->tv_sec) <= sizeof(_PyTime_t));
t = (_PyTime_t)tv->tv_sec;
if (_PyTime_check_mul_overflow(t, SEC_TO_NS)) {
if (raise)
_PyTime_overflow();
res = -1;
}
t = t * SEC_TO_NS;
t += (_PyTime_t)tv->tv_usec * US_TO_NS;
*tp = t;
return res;
}
#endif
static int
_PyTime_FromFloatObject(_PyTime_t *t, double value, _PyTime_round_t round,
long unit_to_ns)
{
/* volatile avoids optimization changing how numbers are rounded */
volatile double d;
/* convert to a number of nanoseconds */
d = value;
d *= (double)unit_to_ns;
d = _PyTime_Round(d, round);
if (!_Py_InIntegralTypeRange(_PyTime_t, d)) {
_PyTime_overflow();
return -1;
}
*t = (_PyTime_t)d;
return 0;
}
static int
_PyTime_FromObject(_PyTime_t *t, PyObject *obj, _PyTime_round_t round,
long unit_to_ns)
{
if (PyFloat_Check(obj)) {
double d;
d = PyFloat_AsDouble(obj);
if (Py_IS_NAN(d)) {
PyErr_SetString(PyExc_ValueError, "Invalid value NaN (not a number)");
return -1;
}
return _PyTime_FromFloatObject(t, d, round, unit_to_ns);
}
else {
long long sec;
Py_BUILD_ASSERT(sizeof(long long) <= sizeof(_PyTime_t));
sec = PyLong_AsLongLong(obj);
if (sec == -1 && PyErr_Occurred()) {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
_PyTime_overflow();
return -1;
}
if (_PyTime_check_mul_overflow(sec, unit_to_ns)) {
_PyTime_overflow();
return -1;
}
*t = sec * unit_to_ns;
return 0;
}
}
int
_PyTime_FromSecondsObject(_PyTime_t *t, PyObject *obj, _PyTime_round_t round)
{
return _PyTime_FromObject(t, obj, round, SEC_TO_NS);
}
int
_PyTime_FromMillisecondsObject(_PyTime_t *t, PyObject *obj, _PyTime_round_t round)
{
return _PyTime_FromObject(t, obj, round, MS_TO_NS);
}
double
_PyTime_AsSecondsDouble(_PyTime_t t)
{
/* volatile avoids optimization changing how numbers are rounded */
volatile double d;
if (t % SEC_TO_NS == 0) {
_PyTime_t secs;
/* Divide using integers to avoid rounding issues on the integer part.
1e-9 cannot be stored exactly in IEEE 64-bit. */
secs = t / SEC_TO_NS;
d = (double)secs;
}
else {
d = (double)t;
d /= 1e9;
}
return d;
}
PyObject *
_PyTime_AsNanosecondsObject(_PyTime_t t)
{
Py_BUILD_ASSERT(sizeof(long long) >= sizeof(_PyTime_t));
return PyLong_FromLongLong((long long)t);
}
static _PyTime_t
_PyTime_Divide(const _PyTime_t t, const _PyTime_t k,
const _PyTime_round_t round)
{
assert(k > 1);
if (round == _PyTime_ROUND_HALF_EVEN) {
_PyTime_t x, r, abs_r;
x = t / k;
r = t % k;
abs_r = Py_ABS(r);
if (abs_r > k / 2 || (abs_r == k / 2 && (Py_ABS(x) & 1))) {
if (t >= 0)
x++;
else
x--;
}
return x;
}
else if (round == _PyTime_ROUND_CEILING) {
if (t >= 0){
return (t + k - 1) / k;
}
else{
return t / k;
}
}
else if (round == _PyTime_ROUND_FLOOR){
if (t >= 0) {
return t / k;
}
else{
return (t - (k - 1)) / k;
}
}
else {
assert(round == _PyTime_ROUND_UP);
if (t >= 0) {
return (t + k - 1) / k;
}
else {
return (t - (k - 1)) / k;
}
}
}
_PyTime_t
_PyTime_AsMilliseconds(_PyTime_t t, _PyTime_round_t round)
{
return _PyTime_Divide(t, NS_TO_MS, round);
}
_PyTime_t
_PyTime_AsMicroseconds(_PyTime_t t, _PyTime_round_t round)
{
return _PyTime_Divide(t, NS_TO_US, round);
}
static int
_PyTime_AsTimeval_impl(_PyTime_t t, _PyTime_t *p_secs, int *p_us,
_PyTime_round_t round)
{
_PyTime_t secs, ns;
int usec;
int res = 0;
secs = t / SEC_TO_NS;
ns = t % SEC_TO_NS;
usec = (int)_PyTime_Divide(ns, US_TO_NS, round);
if (usec < 0) {
usec += SEC_TO_US;
if (secs != _PyTime_MIN)
secs -= 1;
else
res = -1;
}
else if (usec >= SEC_TO_US) {
usec -= SEC_TO_US;
if (secs != _PyTime_MAX)
secs += 1;
else
res = -1;
}
assert(0 <= usec && usec < SEC_TO_US);
*p_secs = secs;
*p_us = usec;
return res;
}
static int
_PyTime_AsTimevalStruct_impl(_PyTime_t t, struct timeval *tv,
_PyTime_round_t round, int raise)
{
_PyTime_t secs, secs2;
int us;
int res;
res = _PyTime_AsTimeval_impl(t, &secs, &us, round);
#ifdef MS_WINDOWS
tv->tv_sec = (long)secs;
#else
tv->tv_sec = secs;
#endif
tv->tv_usec = us;
secs2 = (_PyTime_t)tv->tv_sec;
if (res < 0 || secs2 != secs) {
if (raise)
error_time_t_overflow();
return -1;
}
return 0;
}
int
_PyTime_AsTimeval(_PyTime_t t, struct timeval *tv, _PyTime_round_t round)
{
return _PyTime_AsTimevalStruct_impl(t, tv, round, 1);
}
int
_PyTime_AsTimeval_noraise(_PyTime_t t, struct timeval *tv, _PyTime_round_t round)
{
return _PyTime_AsTimevalStruct_impl(t, tv, round, 0);
}
int
_PyTime_AsTimevalTime_t(_PyTime_t t, time_t *p_secs, int *us,
_PyTime_round_t round)
{
_PyTime_t secs;
int res;
res = _PyTime_AsTimeval_impl(t, &secs, us, round);
*p_secs = secs;
if (res < 0 || (_PyTime_t)*p_secs != secs) {
error_time_t_overflow();
return -1;
}
return 0;
}
#if defined(HAVE_CLOCK_GETTIME) || defined(HAVE_KQUEUE)
int
_PyTime_AsTimespec(_PyTime_t t, struct timespec *ts)
{
_PyTime_t secs, nsec;
secs = t / SEC_TO_NS;
nsec = t % SEC_TO_NS;
if (nsec < 0) {
nsec += SEC_TO_NS;
secs -= 1;
}
ts->tv_sec = (time_t)secs;
assert(0 <= nsec && nsec < SEC_TO_NS);
ts->tv_nsec = nsec;
if ((_PyTime_t)ts->tv_sec != secs) {
error_time_t_overflow();
return -1;
}
return 0;
}
#endif
static int
pygettimeofday(_PyTime_t *tp, _Py_clock_info_t *info, int raise)
{
#ifdef MS_WINDOWS
FILETIME system_time;
ULARGE_INTEGER large;
assert(info == NULL || raise);
GetSystemTimeAsFileTime(&system_time);
large.u.LowPart = system_time.dwLowDateTime;
large.u.HighPart = system_time.dwHighDateTime;
/* 11,644,473,600,000,000,000: number of nanoseconds between
the 1st january 1601 and the 1st january 1970 (369 years + 89 leap
days). */
*tp = large.QuadPart * 100 - 11644473600000000000;
if (info) {
DWORD timeAdjustment, timeIncrement;
BOOL isTimeAdjustmentDisabled, ok;
info->implementation = "GetSystemTimeAsFileTime()";
info->monotonic = 0;
ok = GetSystemTimeAdjustment(&timeAdjustment, &timeIncrement,
&isTimeAdjustmentDisabled);
if (!ok) {
PyErr_SetFromWindowsErr(0);
return -1;
}
info->resolution = timeIncrement * 1e-7;
info->adjustable = 1;
}
#else /* MS_WINDOWS */
int err;
#ifdef HAVE_CLOCK_GETTIME
struct timespec ts;
#else
struct timeval tv;
#endif
assert(info == NULL || raise);
#ifdef HAVE_CLOCK_GETTIME
err = clock_gettime(CLOCK_REALTIME, &ts);
if (err) {
if (raise)
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
if (_PyTime_FromTimespec(tp, &ts, raise) < 0)
return -1;
if (info) {
struct timespec res;
info->implementation = "clock_gettime(CLOCK_REALTIME)";
info->monotonic = 0;
info->adjustable = 1;
if (clock_getres(CLOCK_REALTIME, &res) == 0)
info->resolution = res.tv_sec + res.tv_nsec * 1e-9;
else
info->resolution = 1e-9;
}
#else /* HAVE_CLOCK_GETTIME */
/* test gettimeofday() */
#ifdef GETTIMEOFDAY_NO_TZ
err = gettimeofday(&tv);
#else
err = gettimeofday(&tv, (struct timezone *)NULL);
#endif
if (err) {
if (raise)
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
if (_PyTime_FromTimeval(tp, &tv, raise) < 0)
return -1;
if (info) {
info->implementation = "gettimeofday()";
info->resolution = 1e-6;
info->monotonic = 0;
info->adjustable = 1;
}
#endif /* !HAVE_CLOCK_GETTIME */
#endif /* !MS_WINDOWS */
return 0;
}
_PyTime_t
_PyTime_GetSystemClock(void)
{
_PyTime_t t;
if (pygettimeofday(&t, NULL, 0) < 0) {
/* should not happen, _PyTime_Init() checked the clock at startup */
assert(0);
/* use a fixed value instead of a random value from the stack */
t = 0;
}
return t;
}
int
_PyTime_GetSystemClockWithInfo(_PyTime_t *t, _Py_clock_info_t *info)
{
return pygettimeofday(t, info, 1);
}
static int
pymonotonic(_PyTime_t *tp, _Py_clock_info_t *info, int raise)
{
#if defined(MS_WINDOWS)
ULONGLONG ticks;
_PyTime_t t;
assert(info == NULL || raise);
ticks = GetTickCount64();
Py_BUILD_ASSERT(sizeof(ticks) <= sizeof(_PyTime_t));
t = (_PyTime_t)ticks;
if (_PyTime_check_mul_overflow(t, MS_TO_NS)) {
if (raise) {
_PyTime_overflow();
return -1;
}
/* Hello, time traveler! */
assert(0);
}
*tp = t * MS_TO_NS;
if (info) {
DWORD timeAdjustment, timeIncrement;
BOOL isTimeAdjustmentDisabled, ok;
info->implementation = "GetTickCount64()";
info->monotonic = 1;
ok = GetSystemTimeAdjustment(&timeAdjustment, &timeIncrement,
&isTimeAdjustmentDisabled);
if (!ok) {
PyErr_SetFromWindowsErr(0);
return -1;
}
info->resolution = timeIncrement * 1e-7;
info->adjustable = 0;
}
#elif defined(__APPLE__)
static mach_timebase_info_data_t timebase;
uint64_t time;
if (timebase.denom == 0) {
/* According to the Technical Q&A QA1398, mach_timebase_info() cannot
fail: https://developer.apple.com/library/mac/#qa/qa1398/ */
(void)mach_timebase_info(&timebase);
}
time = mach_absolute_time();
/* apply timebase factor */
time *= timebase.numer;
time /= timebase.denom;
*tp = time;
if (info) {
info->implementation = "mach_absolute_time()";
info->resolution = (double)timebase.numer / timebase.denom * 1e-9;
info->monotonic = 1;
info->adjustable = 0;
}
#else
struct timespec ts;
#ifdef CLOCK_HIGHRES
const clockid_t clk_id = CLOCK_HIGHRES;
const char *implementation = "clock_gettime(CLOCK_HIGHRES)";
#else
const clockid_t clk_id = CLOCK_MONOTONIC;
const char *implementation = "clock_gettime(CLOCK_MONOTONIC)";
#endif
assert(info == NULL || raise);
if (clock_gettime(clk_id, &ts) != 0) {
if (raise) {
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
return -1;
}
if (info) {
struct timespec res;
info->monotonic = 1;
info->implementation = implementation;
info->adjustable = 0;
if (clock_getres(clk_id, &res) != 0) {
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
info->resolution = res.tv_sec + res.tv_nsec * 1e-9;
}
if (_PyTime_FromTimespec(tp, &ts, raise) < 0)
return -1;
#endif
return 0;
}
_PyTime_t
_PyTime_GetMonotonicClock(void)
{
_PyTime_t t;
if (pymonotonic(&t, NULL, 0) < 0) {
/* should not happen, _PyTime_Init() checked that monotonic clock at
startup */
assert(0);
/* use a fixed value instead of a random value from the stack */
t = 0;
}
return t;
}
int
_PyTime_GetMonotonicClockWithInfo(_PyTime_t *tp, _Py_clock_info_t *info)
{
return pymonotonic(tp, info, 1);
}
int
_PyTime_Init(void)
{
_PyTime_t t;
/* ensure that the system clock works */
if (_PyTime_GetSystemClockWithInfo(&t, NULL) < 0)
return -1;
/* ensure that the operating system provides a monotonic clock */
if (_PyTime_GetMonotonicClockWithInfo(&t, NULL) < 0)
return -1;
return 0;
}
int
_PyTime_localtime(time_t t, struct tm *tm)
{
#ifdef MS_WINDOWS
int error;
error = localtime_s(tm, &t);
if (error != 0) {
errno = error;
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
return 0;
#else /* !MS_WINDOWS */
if (localtime_r(&t, tm) == NULL) {
#ifdef EINVAL
if (errno == 0)
errno = EINVAL;
#endif
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
return 0;
#endif /* MS_WINDOWS */
}
int
_PyTime_gmtime(time_t t, struct tm *tm)
{
#ifdef MS_WINDOWS
int error;
error = gmtime_s(tm, &t);
if (error != 0) {
errno = error;
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
return 0;
#else /* !MS_WINDOWS */
if (gmtime_r(&t, tm) == NULL) {
#ifdef EINVAL
if (errno == 0)
errno = EINVAL;
#endif
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
return 0;
#endif /* MS_WINDOWS */
}

616
third_party/python/Python/random.c vendored Normal file
View file

@ -0,0 +1,616 @@
#include "Python.h"
#ifdef MS_WINDOWS
# include <windows.h>
/* All sample MSDN wincrypt programs include the header below. It is at least
* required with Min GW. */
# include <wincrypt.h>
#else
# include <fcntl.h>
# ifdef HAVE_SYS_STAT_H
# include <sys/stat.h>
# endif
# ifdef HAVE_LINUX_RANDOM_H
# include <linux/random.h>
# endif
# if defined(HAVE_SYS_RANDOM_H) && (defined(HAVE_GETRANDOM) || defined(HAVE_GETENTROPY))
# include <sys/random.h>
# endif
# if !defined(HAVE_GETRANDOM) && defined(HAVE_GETRANDOM_SYSCALL)
# include <sys/syscall.h>
# endif
#endif
#ifdef _Py_MEMORY_SANITIZER
# include <sanitizer/msan_interface.h>
#endif
#ifdef Py_DEBUG
int _Py_HashSecret_Initialized = 0;
#else
static int _Py_HashSecret_Initialized = 0;
#endif
#ifdef MS_WINDOWS
static HCRYPTPROV hCryptProv = 0;
static int
win32_urandom_init(int raise)
{
/* Acquire context */
if (!CryptAcquireContext(&hCryptProv, NULL, NULL,
PROV_RSA_FULL, CRYPT_VERIFYCONTEXT))
goto error;
return 0;
error:
if (raise) {
PyErr_SetFromWindowsErr(0);
}
return -1;
}
/* Fill buffer with size pseudo-random bytes generated by the Windows CryptoGen
API. Return 0 on success, or raise an exception and return -1 on error. */
static int
win32_urandom(unsigned char *buffer, Py_ssize_t size, int raise)
{
Py_ssize_t chunk;
if (hCryptProv == 0)
{
if (win32_urandom_init(raise) == -1) {
return -1;
}
}
while (size > 0)
{
chunk = size > INT_MAX ? INT_MAX : size;
if (!CryptGenRandom(hCryptProv, (DWORD)chunk, buffer))
{
/* CryptGenRandom() failed */
if (raise) {
PyErr_SetFromWindowsErr(0);
}
return -1;
}
buffer += chunk;
size -= chunk;
}
return 0;
}
#else /* !MS_WINDOWS */
#if defined(HAVE_GETRANDOM) || defined(HAVE_GETRANDOM_SYSCALL)
#define PY_GETRANDOM 1
/* Call getrandom() to get random bytes:
- Return 1 on success
- Return 0 if getrandom() is not available (failed with ENOSYS or EPERM),
or if getrandom(GRND_NONBLOCK) failed with EAGAIN (system urandom not
initialized yet) and raise=0.
- Raise an exception (if raise is non-zero) and return -1 on error:
if getrandom() failed with EINTR, raise is non-zero and the Python signal
handler raised an exception, or if getrandom() failed with a different
error.
getrandom() is retried if it failed with EINTR: interrupted by a signal. */
static int
py_getrandom(void *buffer, Py_ssize_t size, int blocking, int raise)
{
/* Is getrandom() supported by the running kernel? Set to 0 if getrandom()
failed with ENOSYS or EPERM. Need Linux kernel 3.17 or newer, or Solaris
11.3 or newer */
static int getrandom_works = 1;
int flags;
char *dest;
long n;
if (!getrandom_works) {
return 0;
}
flags = blocking ? 0 : GRND_NONBLOCK;
dest = buffer;
while (0 < size) {
#ifdef sun
/* Issue #26735: On Solaris, getrandom() is limited to returning up
to 1024 bytes. Call it multiple times if more bytes are
requested. */
n = Py_MIN(size, 1024);
#else
n = Py_MIN(size, LONG_MAX);
#endif
errno = 0;
#ifdef HAVE_GETRANDOM
if (raise) {
Py_BEGIN_ALLOW_THREADS
n = getrandom(dest, n, flags);
Py_END_ALLOW_THREADS
}
else {
n = getrandom(dest, n, flags);
}
#else
/* On Linux, use the syscall() function because the GNU libc doesn't
expose the Linux getrandom() syscall yet. See:
https://sourceware.org/bugzilla/show_bug.cgi?id=17252 */
if (raise) {
Py_BEGIN_ALLOW_THREADS
n = syscall(SYS_getrandom, dest, n, flags);
Py_END_ALLOW_THREADS
}
else {
n = syscall(SYS_getrandom, dest, n, flags);
}
# ifdef _Py_MEMORY_SANITIZER
if (n > 0) {
__msan_unpoison(dest, n);
}
# endif
#endif
if (n < 0) {
/* ENOSYS: the syscall is not supported by the kernel.
EPERM: the syscall is blocked by a security policy (ex: SECCOMP)
or something else. */
if (errno == ENOSYS || errno == EPERM) {
getrandom_works = 0;
return 0;
}
/* getrandom(GRND_NONBLOCK) fails with EAGAIN if the system urandom
is not initialiazed yet. For _PyRandom_Init(), we ignore the
error and fall back on reading /dev/urandom which never blocks,
even if the system urandom is not initialized yet:
see the PEP 524. */
if (errno == EAGAIN && !raise && !blocking) {
return 0;
}
if (errno == EINTR) {
if (raise) {
if (PyErr_CheckSignals()) {
return -1;
}
}
/* retry getrandom() if it was interrupted by a signal */
continue;
}
if (raise) {
PyErr_SetFromErrno(PyExc_OSError);
}
return -1;
}
dest += n;
size -= n;
}
return 1;
}
#elif defined(HAVE_GETENTROPY)
#define PY_GETENTROPY 1
/* Fill buffer with size pseudo-random bytes generated by getentropy():
- Return 1 on success
- Return 0 if getentropy() syscall is not available (failed with ENOSYS or
EPERM).
- Raise an exception (if raise is non-zero) and return -1 on error:
if getentropy() failed with EINTR, raise is non-zero and the Python signal
handler raised an exception, or if getentropy() failed with a different
error.
getentropy() is retried if it failed with EINTR: interrupted by a signal. */
static int
py_getentropy(char *buffer, Py_ssize_t size, int raise)
{
/* Is getentropy() supported by the running kernel? Set to 0 if
getentropy() failed with ENOSYS or EPERM. */
static int getentropy_works = 1;
if (!getentropy_works) {
return 0;
}
while (size > 0) {
/* getentropy() is limited to returning up to 256 bytes. Call it
multiple times if more bytes are requested. */
Py_ssize_t len = Py_MIN(size, 256);
int res;
if (raise) {
Py_BEGIN_ALLOW_THREADS
res = getentropy(buffer, len);
Py_END_ALLOW_THREADS
}
else {
res = getentropy(buffer, len);
}
if (res < 0) {
/* ENOSYS: the syscall is not supported by the running kernel.
EPERM: the syscall is blocked by a security policy (ex: SECCOMP)
or something else. */
if (errno == ENOSYS || errno == EPERM) {
getentropy_works = 0;
return 0;
}
if (errno == EINTR) {
if (raise) {
if (PyErr_CheckSignals()) {
return -1;
}
}
/* retry getentropy() if it was interrupted by a signal */
continue;
}
if (raise) {
PyErr_SetFromErrno(PyExc_OSError);
}
return -1;
}
buffer += len;
size -= len;
}
return 1;
}
#endif /* defined(HAVE_GETENTROPY) && !defined(sun) */
static struct {
int fd;
dev_t st_dev;
ino_t st_ino;
} urandom_cache = { -1 };
/* Read random bytes from the /dev/urandom device:
- Return 0 on success
- Raise an exception (if raise is non-zero) and return -1 on error
Possible causes of errors:
- open() failed with ENOENT, ENXIO, ENODEV, EACCES: the /dev/urandom device
was not found. For example, it was removed manually or not exposed in a
chroot or container.
- open() failed with a different error
- fstat() failed
- read() failed or returned 0
read() is retried if it failed with EINTR: interrupted by a signal.
The file descriptor of the device is kept open between calls to avoid using
many file descriptors when run in parallel from multiple threads:
see the issue #18756.
st_dev and st_ino fields of the file descriptor (from fstat()) are cached to
check if the file descriptor was replaced by a different file (which is
likely a bug in the application): see the issue #21207.
If the file descriptor was closed or replaced, open a new file descriptor
but don't close the old file descriptor: it probably points to something
important for some third-party code. */
static int
dev_urandom(char *buffer, Py_ssize_t size, int raise)
{
int fd;
Py_ssize_t n;
if (raise) {
struct _Py_stat_struct st;
int fstat_result;
if (urandom_cache.fd >= 0) {
Py_BEGIN_ALLOW_THREADS
fstat_result = _Py_fstat_noraise(urandom_cache.fd, &st);
Py_END_ALLOW_THREADS
/* Does the fd point to the same thing as before? (issue #21207) */
if (fstat_result
|| st.st_dev != urandom_cache.st_dev
|| st.st_ino != urandom_cache.st_ino) {
/* Something changed: forget the cached fd (but don't close it,
since it probably points to something important for some
third-party code). */
urandom_cache.fd = -1;
}
}
if (urandom_cache.fd >= 0)
fd = urandom_cache.fd;
else {
fd = _Py_open("/dev/urandom", O_RDONLY);
if (fd < 0) {
if (errno == ENOENT || errno == ENXIO ||
errno == ENODEV || errno == EACCES) {
PyErr_SetString(PyExc_NotImplementedError,
"/dev/urandom (or equivalent) not found");
}
/* otherwise, keep the OSError exception raised by _Py_open() */
return -1;
}
if (urandom_cache.fd >= 0) {
/* urandom_fd was initialized by another thread while we were
not holding the GIL, keep it. */
close(fd);
fd = urandom_cache.fd;
}
else {
if (_Py_fstat(fd, &st)) {
close(fd);
return -1;
}
else {
urandom_cache.fd = fd;
urandom_cache.st_dev = st.st_dev;
urandom_cache.st_ino = st.st_ino;
}
}
}
do {
n = _Py_read(fd, buffer, (size_t)size);
if (n == -1)
return -1;
if (n == 0) {
PyErr_Format(PyExc_RuntimeError,
"Failed to read %zi bytes from /dev/urandom",
size);
return -1;
}
buffer += n;
size -= n;
} while (0 < size);
}
else {
fd = _Py_open_noraise("/dev/urandom", O_RDONLY);
if (fd < 0) {
return -1;
}
while (0 < size)
{
do {
n = read(fd, buffer, (size_t)size);
} while (n < 0 && errno == EINTR);
if (n <= 0) {
/* stop on error or if read(size) returned 0 */
close(fd);
return -1;
}
buffer += n;
size -= n;
}
close(fd);
}
return 0;
}
static void
dev_urandom_close(void)
{
if (urandom_cache.fd >= 0) {
close(urandom_cache.fd);
urandom_cache.fd = -1;
}
}
#endif /* !MS_WINDOWS */
/* Fill buffer with pseudo-random bytes generated by a linear congruent
generator (LCG):
x(n+1) = (x(n) * 214013 + 2531011) % 2^32
Use bits 23..16 of x(n) to generate a byte. */
static void
lcg_urandom(unsigned int x0, unsigned char *buffer, size_t size)
{
size_t index;
unsigned int x;
x = x0;
for (index=0; index < size; index++) {
x *= 214013;
x += 2531011;
/* modulo 2 ^ (8 * sizeof(int)) */
buffer[index] = (x >> 16) & 0xff;
}
}
/* Read random bytes:
- Return 0 on success
- Raise an exception (if raise is non-zero) and return -1 on error
Used sources of entropy ordered by preference, preferred source first:
- CryptGenRandom() on Windows
- getrandom() function (ex: Linux and Solaris): call py_getrandom()
- getentropy() function (ex: OpenBSD): call py_getentropy()
- /dev/urandom device
Read from the /dev/urandom device if getrandom() or getentropy() function
is not available or does not work.
Prefer getrandom() over getentropy() because getrandom() supports blocking
and non-blocking mode: see the PEP 524. Python requires non-blocking RNG at
startup to initialize its hash secret, but os.urandom() must block until the
system urandom is initialized (at least on Linux 3.17 and newer).
Prefer getrandom() and getentropy() over reading directly /dev/urandom
because these functions don't need file descriptors and so avoid ENFILE or
EMFILE errors (too many open files): see the issue #18756.
Only the getrandom() function supports non-blocking mode.
Only use RNG running in the kernel. They are more secure because it is
harder to get the internal state of a RNG running in the kernel land than a
RNG running in the user land. The kernel has a direct access to the hardware
and has access to hardware RNG, they are used as entropy sources.
Note: the OpenSSL RAND_pseudo_bytes() function does not automatically reseed
its RNG on fork(), two child processes (with the same pid) generate the same
random numbers: see issue #18747. Kernel RNGs don't have this issue,
they have access to good quality entropy sources.
If raise is zero:
- Don't raise an exception on error
- Don't call the Python signal handler (don't call PyErr_CheckSignals()) if
a function fails with EINTR: retry directly the interrupted function
- Don't release the GIL to call functions.
*/
static int
pyurandom(void *buffer, Py_ssize_t size, int blocking, int raise)
{
#if defined(PY_GETRANDOM) || defined(PY_GETENTROPY)
int res;
#endif
if (size < 0) {
if (raise) {
PyErr_Format(PyExc_ValueError,
"negative argument not allowed");
}
return -1;
}
if (size == 0) {
return 0;
}
#ifdef MS_WINDOWS
return win32_urandom((unsigned char *)buffer, size, raise);
#else
#if defined(PY_GETRANDOM) || defined(PY_GETENTROPY)
#ifdef PY_GETRANDOM
res = py_getrandom(buffer, size, blocking, raise);
#else
res = py_getentropy(buffer, size, raise);
#endif
if (res < 0) {
return -1;
}
if (res == 1) {
return 0;
}
/* getrandom() or getentropy() function is not available: failed with
ENOSYS or EPERM. Fall back on reading from /dev/urandom. */
#endif
return dev_urandom(buffer, size, raise);
#endif
}
/* Fill buffer with size pseudo-random bytes from the operating system random
number generator (RNG). It is suitable for most cryptographic purposes
except long living private keys for asymmetric encryption.
On Linux 3.17 and newer, the getrandom() syscall is used in blocking mode:
block until the system urandom entropy pool is initialized (128 bits are
collected by the kernel).
Return 0 on success. Raise an exception and return -1 on error. */
int
_PyOS_URandom(void *buffer, Py_ssize_t size)
{
return pyurandom(buffer, size, 1, 1);
}
/* Fill buffer with size pseudo-random bytes from the operating system random
number generator (RNG). It is not suitable for cryptographic purpose.
On Linux 3.17 and newer (when getrandom() syscall is used), if the system
urandom is not initialized yet, the function returns "weak" entropy read
from /dev/urandom.
Return 0 on success. Raise an exception and return -1 on error. */
int
_PyOS_URandomNonblock(void *buffer, Py_ssize_t size)
{
return pyurandom(buffer, size, 0, 1);
}
void
_PyRandom_Init(void)
{
char *env;
unsigned char *secret = (unsigned char *)&_Py_HashSecret.uc;
Py_ssize_t secret_size = sizeof(_Py_HashSecret_t);
Py_BUILD_ASSERT(sizeof(_Py_HashSecret_t) == sizeof(_Py_HashSecret.uc));
if (_Py_HashSecret_Initialized)
return;
_Py_HashSecret_Initialized = 1;
/*
Hash randomization is enabled. Generate a per-process secret,
using PYTHONHASHSEED if provided.
*/
env = Py_GETENV("PYTHONHASHSEED");
if (env && *env != '\0' && strcmp(env, "random") != 0) {
char *endptr = env;
unsigned long seed;
seed = strtoul(env, &endptr, 10);
if (*endptr != '\0'
|| seed > 4294967295UL
|| (errno == ERANGE && seed == ULONG_MAX))
{
Py_FatalError("PYTHONHASHSEED must be \"random\" or an integer "
"in range [0; 4294967295]");
}
if (seed == 0) {
/* disable the randomized hash */
memset(secret, 0, secret_size);
Py_HashRandomizationFlag = 0;
}
else {
lcg_urandom(seed, secret, secret_size);
Py_HashRandomizationFlag = 1;
}
}
else {
int res;
/* _PyRandom_Init() is called very early in the Python initialization
and so exceptions cannot be used (use raise=0).
_PyRandom_Init() must not block Python initialization: call
pyurandom() is non-blocking mode (blocking=0): see the PEP 524. */
res = pyurandom(secret, secret_size, 0, 0);
if (res < 0) {
Py_FatalError("failed to get random numbers to initialize Python");
}
Py_HashRandomizationFlag = 1;
}
}
void
_PyRandom_Fini(void)
{
#ifdef MS_WINDOWS
if (hCryptProv) {
CryptReleaseContext(hCryptProv, 0);
hCryptProv = 0;
}
#else
dev_urandom_close();
#endif
}

19
third_party/python/Python/sigcheck.c vendored Normal file
View file

@ -0,0 +1,19 @@
/* Sigcheck is similar to intrcheck() but sets an exception when an
interrupt occurs. It can't be in the intrcheck.c file since that
file (and the whole directory it is in) doesn't know about objects
or exceptions. It can't be in errors.c because it can be
overridden (at link time) by a more powerful version implemented in
signalmodule.c. */
#include "Python.h"
/* ARGSUSED */
int
PyErr_CheckSignals(void)
{
if (!PyOS_InterruptOccurred())
return 0;
PyErr_SetNone(PyExc_KeyboardInterrupt);
return -1;
}

14
third_party/python/Python/strdup.c vendored Normal file
View file

@ -0,0 +1,14 @@
/* strdup() replacement (from stdwin, if you must know) */
#include "pgenheaders.h"
char *
strdup(const char *str)
{
if (str != NULL) {
char *copy = malloc(strlen(str) + 1);
if (copy != NULL)
return strcpy(copy, str);
}
return NULL;
}

292
third_party/python/Python/structmember.c vendored Normal file
View file

@ -0,0 +1,292 @@
/* Map C struct members to Python object attributes */
#include "Python.h"
#include "structmember.h"
PyObject *
PyMember_GetOne(const char *addr, PyMemberDef *l)
{
PyObject *v;
addr += l->offset;
switch (l->type) {
case T_BOOL:
v = PyBool_FromLong(*(char*)addr);
break;
case T_BYTE:
v = PyLong_FromLong(*(char*)addr);
break;
case T_UBYTE:
v = PyLong_FromUnsignedLong(*(unsigned char*)addr);
break;
case T_SHORT:
v = PyLong_FromLong(*(short*)addr);
break;
case T_USHORT:
v = PyLong_FromUnsignedLong(*(unsigned short*)addr);
break;
case T_INT:
v = PyLong_FromLong(*(int*)addr);
break;
case T_UINT:
v = PyLong_FromUnsignedLong(*(unsigned int*)addr);
break;
case T_LONG:
v = PyLong_FromLong(*(long*)addr);
break;
case T_ULONG:
v = PyLong_FromUnsignedLong(*(unsigned long*)addr);
break;
case T_PYSSIZET:
v = PyLong_FromSsize_t(*(Py_ssize_t*)addr);
break;
case T_FLOAT:
v = PyFloat_FromDouble((double)*(float*)addr);
break;
case T_DOUBLE:
v = PyFloat_FromDouble(*(double*)addr);
break;
case T_STRING:
if (*(char**)addr == NULL) {
Py_INCREF(Py_None);
v = Py_None;
}
else
v = PyUnicode_FromString(*(char**)addr);
break;
case T_STRING_INPLACE:
v = PyUnicode_FromString((char*)addr);
break;
case T_CHAR:
v = PyUnicode_FromStringAndSize((char*)addr, 1);
break;
case T_OBJECT:
v = *(PyObject **)addr;
if (v == NULL)
v = Py_None;
Py_INCREF(v);
break;
case T_OBJECT_EX:
v = *(PyObject **)addr;
if (v == NULL)
PyErr_SetString(PyExc_AttributeError, l->name);
Py_XINCREF(v);
break;
case T_LONGLONG:
v = PyLong_FromLongLong(*(long long *)addr);
break;
case T_ULONGLONG:
v = PyLong_FromUnsignedLongLong(*(unsigned long long *)addr);
break;
case T_NONE:
v = Py_None;
Py_INCREF(v);
break;
default:
PyErr_SetString(PyExc_SystemError, "bad memberdescr type");
v = NULL;
}
return v;
}
#define WARN(msg) \
do { \
if (PyErr_WarnEx(PyExc_RuntimeWarning, msg, 1) < 0) \
return -1; \
} while (0)
int
PyMember_SetOne(char *addr, PyMemberDef *l, PyObject *v)
{
PyObject *oldv;
addr += l->offset;
if ((l->flags & READONLY))
{
PyErr_SetString(PyExc_AttributeError, "readonly attribute");
return -1;
}
if (v == NULL) {
if (l->type == T_OBJECT_EX) {
/* Check if the attribute is set. */
if (*(PyObject **)addr == NULL) {
PyErr_SetString(PyExc_AttributeError, l->name);
return -1;
}
}
else if (l->type != T_OBJECT) {
PyErr_SetString(PyExc_TypeError,
"can't delete numeric/char attribute");
return -1;
}
}
switch (l->type) {
case T_BOOL:{
if (!PyBool_Check(v)) {
PyErr_SetString(PyExc_TypeError,
"attribute value type must be bool");
return -1;
}
if (v == Py_True)
*(char*)addr = (char) 1;
else
*(char*)addr = (char) 0;
break;
}
case T_BYTE:{
long long_val = PyLong_AsLong(v);
if ((long_val == -1) && PyErr_Occurred())
return -1;
*(char*)addr = (char)long_val;
/* XXX: For compatibility, only warn about truncations
for now. */
if ((long_val > CHAR_MAX) || (long_val < CHAR_MIN))
WARN("Truncation of value to char");
break;
}
case T_UBYTE:{
long long_val = PyLong_AsLong(v);
if ((long_val == -1) && PyErr_Occurred())
return -1;
*(unsigned char*)addr = (unsigned char)long_val;
if ((long_val > UCHAR_MAX) || (long_val < 0))
WARN("Truncation of value to unsigned char");
break;
}
case T_SHORT:{
long long_val = PyLong_AsLong(v);
if ((long_val == -1) && PyErr_Occurred())
return -1;
*(short*)addr = (short)long_val;
if ((long_val > SHRT_MAX) || (long_val < SHRT_MIN))
WARN("Truncation of value to short");
break;
}
case T_USHORT:{
long long_val = PyLong_AsLong(v);
if ((long_val == -1) && PyErr_Occurred())
return -1;
*(unsigned short*)addr = (unsigned short)long_val;
if ((long_val > USHRT_MAX) || (long_val < 0))
WARN("Truncation of value to unsigned short");
break;
}
case T_INT:{
long long_val = PyLong_AsLong(v);
if ((long_val == -1) && PyErr_Occurred())
return -1;
*(int *)addr = (int)long_val;
if ((long_val > INT_MAX) || (long_val < INT_MIN))
WARN("Truncation of value to int");
break;
}
case T_UINT:{
unsigned long ulong_val = PyLong_AsUnsignedLong(v);
if ((ulong_val == (unsigned long)-1) && PyErr_Occurred()) {
/* XXX: For compatibility, accept negative int values
as well. */
PyErr_Clear();
ulong_val = PyLong_AsLong(v);
if ((ulong_val == (unsigned long)-1) &&
PyErr_Occurred())
return -1;
*(unsigned int *)addr = (unsigned int)ulong_val;
WARN("Writing negative value into unsigned field");
} else
*(unsigned int *)addr = (unsigned int)ulong_val;
if (ulong_val > UINT_MAX)
WARN("Truncation of value to unsigned int");
break;
}
case T_LONG:{
*(long*)addr = PyLong_AsLong(v);
if ((*(long*)addr == -1) && PyErr_Occurred())
return -1;
break;
}
case T_ULONG:{
*(unsigned long*)addr = PyLong_AsUnsignedLong(v);
if ((*(unsigned long*)addr == (unsigned long)-1)
&& PyErr_Occurred()) {
/* XXX: For compatibility, accept negative int values
as well. */
PyErr_Clear();
*(unsigned long*)addr = PyLong_AsLong(v);
if ((*(unsigned long*)addr == (unsigned long)-1)
&& PyErr_Occurred())
return -1;
WARN("Writing negative value into unsigned field");
}
break;
}
case T_PYSSIZET:{
*(Py_ssize_t*)addr = PyLong_AsSsize_t(v);
if ((*(Py_ssize_t*)addr == (Py_ssize_t)-1)
&& PyErr_Occurred())
return -1;
break;
}
case T_FLOAT:{
double double_val = PyFloat_AsDouble(v);
if ((double_val == -1) && PyErr_Occurred())
return -1;
*(float*)addr = (float)double_val;
break;
}
case T_DOUBLE:
*(double*)addr = PyFloat_AsDouble(v);
if ((*(double*)addr == -1) && PyErr_Occurred())
return -1;
break;
case T_OBJECT:
case T_OBJECT_EX:
Py_XINCREF(v);
oldv = *(PyObject **)addr;
*(PyObject **)addr = v;
Py_XDECREF(oldv);
break;
case T_CHAR: {
char *string;
Py_ssize_t len;
string = PyUnicode_AsUTF8AndSize(v, &len);
if (string == NULL || len != 1) {
PyErr_BadArgument();
return -1;
}
*(char*)addr = string[0];
break;
}
case T_STRING:
case T_STRING_INPLACE:
PyErr_SetString(PyExc_TypeError, "readonly attribute");
return -1;
case T_LONGLONG:{
long long value;
*(long long*)addr = value = PyLong_AsLongLong(v);
if ((value == -1) && PyErr_Occurred())
return -1;
break;
}
case T_ULONGLONG:{
unsigned long long value;
/* ??? PyLong_AsLongLong accepts an int, but PyLong_AsUnsignedLongLong
doesn't ??? */
if (PyLong_Check(v))
*(unsigned long long*)addr = value = PyLong_AsUnsignedLongLong(v);
else
*(unsigned long long*)addr = value = PyLong_AsLong(v);
if ((value == (unsigned long long)-1) && PyErr_Occurred())
return -1;
break;
}
default:
PyErr_Format(PyExc_SystemError,
"bad memberdescr type for %s", l->name);
return -1;
}
return 0;
}

1793
third_party/python/Python/symtable.c vendored Normal file

File diff suppressed because it is too large Load diff

2441
third_party/python/Python/sysmodule.c vendored Normal file

File diff suppressed because it is too large Load diff

447
third_party/python/Python/thread.c vendored Normal file
View file

@ -0,0 +1,447 @@
/* Thread package.
This is intended to be usable independently from Python.
The implementation for system foobar is in a file thread_foobar.h
which is included by this file dependent on config settings.
Stuff shared by all thread_*.h files is collected here. */
#include "Python.h"
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#ifndef DONT_HAVE_STDIO_H
#include <stdio.h>
#endif
#include <stdlib.h>
#include "pythread.h"
#ifndef _POSIX_THREADS
/* Check if we're running on HP-UX and _SC_THREADS is defined. If so, then
enough of the Posix threads package is implemented to support python
threads.
This is valid for HP-UX 11.23 running on an ia64 system. If needed, add
a check of __ia64 to verify that we're running on an ia64 system instead
of a pa-risc system.
*/
#ifdef __hpux
#ifdef _SC_THREADS
#define _POSIX_THREADS
#endif
#endif
#endif /* _POSIX_THREADS */
#ifdef Py_DEBUG
static int thread_debug = 0;
#define dprintf(args) (void)((thread_debug & 1) && printf args)
#define d2printf(args) ((thread_debug & 8) && printf args)
#else
#define dprintf(args)
#define d2printf(args)
#endif
static int initialized;
static void PyThread__init_thread(void); /* Forward */
void
PyThread_init_thread(void)
{
#ifdef Py_DEBUG
char *p = Py_GETENV("PYTHONTHREADDEBUG");
if (p) {
if (*p)
thread_debug = atoi(p);
else
thread_debug = 1;
}
#endif /* Py_DEBUG */
if (initialized)
return;
initialized = 1;
dprintf(("PyThread_init_thread called\n"));
PyThread__init_thread();
}
/* Support for runtime thread stack size tuning.
A value of 0 means using the platform's default stack size
or the size specified by the THREAD_STACK_SIZE macro. */
static size_t _pythread_stacksize = 0;
#ifdef _POSIX_THREADS
#define PYTHREAD_NAME "pthread"
#include "thread_pthread.h"
#endif
#ifdef NT_THREADS
#define PYTHREAD_NAME "nt"
#include "thread_nt.h"
#endif
/*
#ifdef FOOBAR_THREADS
#include "thread_foobar.h"
#endif
*/
/* return the current thread stack size */
size_t
PyThread_get_stacksize(void)
{
return _pythread_stacksize;
}
/* Only platforms defining a THREAD_SET_STACKSIZE() macro
in thread_<platform>.h support changing the stack size.
Return 0 if stack size is valid,
-1 if stack size value is invalid,
-2 if setting stack size is not supported. */
int
PyThread_set_stacksize(size_t size)
{
#if defined(THREAD_SET_STACKSIZE)
return THREAD_SET_STACKSIZE(size);
#else
return -2;
#endif
}
#ifndef Py_HAVE_NATIVE_TLS
/* If the platform has not supplied a platform specific
TLS implementation, provide our own.
This code stolen from "thread_sgi.h", where it was the only
implementation of an existing Python TLS API.
*/
/* ------------------------------------------------------------------------
Per-thread data ("key") support.
Use PyThread_create_key() to create a new key. This is typically shared
across threads.
Use PyThread_set_key_value(thekey, value) to associate void* value with
thekey in the current thread. Each thread has a distinct mapping of thekey
to a void* value. Caution: if the current thread already has a mapping
for thekey, value is ignored.
Use PyThread_get_key_value(thekey) to retrieve the void* value associated
with thekey in the current thread. This returns NULL if no value is
associated with thekey in the current thread.
Use PyThread_delete_key_value(thekey) to forget the current thread's associated
value for thekey. PyThread_delete_key(thekey) forgets the values associated
with thekey across *all* threads.
While some of these functions have error-return values, none set any
Python exception.
None of the functions does memory management on behalf of the void* values.
You need to allocate and deallocate them yourself. If the void* values
happen to be PyObject*, these functions don't do refcount operations on
them either.
The GIL does not need to be held when calling these functions; they supply
their own locking. This isn't true of PyThread_create_key(), though (see
next paragraph).
There's a hidden assumption that PyThread_create_key() will be called before
any of the other functions are called. There's also a hidden assumption
that calls to PyThread_create_key() are serialized externally.
------------------------------------------------------------------------ */
/* A singly-linked list of struct key objects remembers all the key->value
* associations. File static keyhead heads the list. keymutex is used
* to enforce exclusion internally.
*/
struct key {
/* Next record in the list, or NULL if this is the last record. */
struct key *next;
/* The thread id, according to PyThread_get_thread_ident(). */
long id;
/* The key and its associated value. */
int key;
void *value;
};
static struct key *keyhead = NULL;
static PyThread_type_lock keymutex = NULL;
static int nkeys = 0; /* PyThread_create_key() hands out nkeys+1 next */
/* Internal helper.
* If the current thread has a mapping for key, the appropriate struct key*
* is returned. NB: value is ignored in this case!
* If there is no mapping for key in the current thread, then:
* If value is NULL, NULL is returned.
* Else a mapping of key to value is created for the current thread,
* and a pointer to a new struct key* is returned; except that if
* malloc() can't find room for a new struct key*, NULL is returned.
* So when value==NULL, this acts like a pure lookup routine, and when
* value!=NULL, this acts like dict.setdefault(), returning an existing
* mapping if one exists, else creating a new mapping.
*
* Caution: this used to be too clever, trying to hold keymutex only
* around the "p->next = keyhead; keyhead = p" pair. That allowed
* another thread to mutate the list, via key deletion, concurrent with
* find_key() crawling over the list. Hilarity ensued. For example, when
* the for-loop here does "p = p->next", p could end up pointing at a
* record that PyThread_delete_key_value() was concurrently free()'ing.
* That could lead to anything, from failing to find a key that exists, to
* segfaults. Now we lock the whole routine.
*/
static struct key *
find_key(int set_value, int key, void *value)
{
struct key *p, *prev_p;
long id = PyThread_get_thread_ident();
if (!keymutex)
return NULL;
PyThread_acquire_lock(keymutex, 1);
prev_p = NULL;
for (p = keyhead; p != NULL; p = p->next) {
if (p->id == id && p->key == key) {
if (set_value)
p->value = value;
goto Done;
}
/* Sanity check. These states should never happen but if
* they do we must abort. Otherwise we'll end up spinning
* in a tight loop with the lock held. A similar check is done
* in pystate.c tstate_delete_common(). */
if (p == prev_p)
Py_FatalError("tls find_key: small circular list(!)");
prev_p = p;
if (p->next == keyhead)
Py_FatalError("tls find_key: circular list(!)");
}
if (!set_value && value == NULL) {
assert(p == NULL);
goto Done;
}
p = (struct key *)PyMem_RawMalloc(sizeof(struct key));
if (p != NULL) {
p->id = id;
p->key = key;
p->value = value;
p->next = keyhead;
keyhead = p;
}
Done:
PyThread_release_lock(keymutex);
return p;
}
/* Return a new key. This must be called before any other functions in
* this family, and callers must arrange to serialize calls to this
* function. No violations are detected.
*/
int
PyThread_create_key(void)
{
/* All parts of this function are wrong if it's called by multiple
* threads simultaneously.
*/
if (keymutex == NULL)
keymutex = PyThread_allocate_lock();
return ++nkeys;
}
/* Forget the associations for key across *all* threads. */
void
PyThread_delete_key(int key)
{
struct key *p, **q;
PyThread_acquire_lock(keymutex, 1);
q = &keyhead;
while ((p = *q) != NULL) {
if (p->key == key) {
*q = p->next;
PyMem_RawFree((void *)p);
/* NB This does *not* free p->value! */
}
else
q = &p->next;
}
PyThread_release_lock(keymutex);
}
int
PyThread_set_key_value(int key, void *value)
{
struct key *p;
p = find_key(1, key, value);
if (p == NULL)
return -1;
else
return 0;
}
/* Retrieve the value associated with key in the current thread, or NULL
* if the current thread doesn't have an association for key.
*/
void *
PyThread_get_key_value(int key)
{
struct key *p = find_key(0, key, NULL);
if (p == NULL)
return NULL;
else
return p->value;
}
/* Forget the current thread's association for key, if any. */
void
PyThread_delete_key_value(int key)
{
long id = PyThread_get_thread_ident();
struct key *p, **q;
PyThread_acquire_lock(keymutex, 1);
q = &keyhead;
while ((p = *q) != NULL) {
if (p->key == key && p->id == id) {
*q = p->next;
PyMem_RawFree((void *)p);
/* NB This does *not* free p->value! */
break;
}
else
q = &p->next;
}
PyThread_release_lock(keymutex);
}
/* Forget everything not associated with the current thread id.
* This function is called from PyOS_AfterFork(). It is necessary
* because other thread ids which were in use at the time of the fork
* may be reused for new threads created in the forked process.
*/
void
PyThread_ReInitTLS(void)
{
long id = PyThread_get_thread_ident();
struct key *p, **q;
if (!keymutex)
return;
/* As with interpreter_lock in PyEval_ReInitThreads()
we just create a new lock without freeing the old one */
keymutex = PyThread_allocate_lock();
/* Delete all keys which do not match the current thread id */
q = &keyhead;
while ((p = *q) != NULL) {
if (p->id != id) {
*q = p->next;
PyMem_RawFree((void *)p);
/* NB This does *not* free p->value! */
}
else
q = &p->next;
}
}
#endif /* Py_HAVE_NATIVE_TLS */
PyDoc_STRVAR(threadinfo__doc__,
"sys.thread_info\n\
\n\
A struct sequence holding information about the thread implementation.");
static PyStructSequence_Field threadinfo_fields[] = {
{"name", "name of the thread implementation"},
{"lock", "name of the lock implementation"},
{"version", "name and version of the thread library"},
{0}
};
static PyStructSequence_Desc threadinfo_desc = {
"sys.thread_info", /* name */
threadinfo__doc__, /* doc */
threadinfo_fields, /* fields */
3
};
static PyTypeObject ThreadInfoType;
PyObject*
PyThread_GetInfo(void)
{
PyObject *threadinfo, *value;
int pos = 0;
#if (defined(_POSIX_THREADS) && defined(HAVE_CONFSTR) \
&& defined(_CS_GNU_LIBPTHREAD_VERSION))
char buffer[255];
int len;
#endif
if (ThreadInfoType.tp_name == 0) {
if (PyStructSequence_InitType2(&ThreadInfoType, &threadinfo_desc) < 0)
return NULL;
}
threadinfo = PyStructSequence_New(&ThreadInfoType);
if (threadinfo == NULL)
return NULL;
value = PyUnicode_FromString(PYTHREAD_NAME);
if (value == NULL) {
Py_DECREF(threadinfo);
return NULL;
}
PyStructSequence_SET_ITEM(threadinfo, pos++, value);
#ifdef _POSIX_THREADS
#ifdef USE_SEMAPHORES
value = PyUnicode_FromString("semaphore");
#else
value = PyUnicode_FromString("mutex+cond");
#endif
if (value == NULL) {
Py_DECREF(threadinfo);
return NULL;
}
#else
Py_INCREF(Py_None);
value = Py_None;
#endif
PyStructSequence_SET_ITEM(threadinfo, pos++, value);
#if (defined(_POSIX_THREADS) && defined(HAVE_CONFSTR) \
&& defined(_CS_GNU_LIBPTHREAD_VERSION))
value = NULL;
len = confstr(_CS_GNU_LIBPTHREAD_VERSION, buffer, sizeof(buffer));
if (1 < len && (size_t)len < sizeof(buffer)) {
value = PyUnicode_DecodeFSDefaultAndSize(buffer, len-1);
if (value == NULL)
PyErr_Clear();
}
if (value == NULL)
#endif
{
Py_INCREF(Py_None);
value = Py_None;
}
PyStructSequence_SET_ITEM(threadinfo, pos++, value);
return threadinfo;
}

View file

@ -0,0 +1,132 @@
/*
* Initialization.
*/
static void
PyThread__init_thread(void)
{
}
/*
* Thread support.
*/
long
PyThread_start_new_thread(void (*func)(void *), void *arg)
{
int success = 0; /* init not needed when SOLARIS_THREADS and */
/* C_THREADS implemented properly */
dprintf(("PyThread_start_new_thread called\n"));
if (!initialized)
PyThread_init_thread();
return success < 0 ? -1 : 0;
}
long
PyThread_get_thread_ident(void)
{
if (!initialized)
PyThread_init_thread();
}
void
PyThread_exit_thread(void)
{
dprintf(("PyThread_exit_thread called\n"));
if (!initialized)
exit(0);
}
/*
* Lock support.
*/
PyThread_type_lock
PyThread_allocate_lock(void)
{
dprintf(("PyThread_allocate_lock called\n"));
if (!initialized)
PyThread_init_thread();
dprintf(("PyThread_allocate_lock() -> %p\n", lock));
return (PyThread_type_lock) lock;
}
void
PyThread_free_lock(PyThread_type_lock lock)
{
dprintf(("PyThread_free_lock(%p) called\n", lock));
}
int
PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
{
return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, 0);
}
PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
int intr_flag)
{
int success;
dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n", lock, microseconds, intr_flag));
dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
lock, microseconds, intr_flag, success));
return success;
}
void
PyThread_release_lock(PyThread_type_lock lock)
{
dprintf(("PyThread_release_lock(%p) called\n", lock));
}
/* The following are only needed if native TLS support exists */
#define Py_HAVE_NATIVE_TLS
#ifdef Py_HAVE_NATIVE_TLS
int
PyThread_create_key(void)
{
int result;
return result;
}
void
PyThread_delete_key(int key)
{
}
int
PyThread_set_key_value(int key, void *value)
{
int ok;
/* A failure in this case returns -1 */
if (!ok)
return -1;
return 0;
}
void *
PyThread_get_key_value(int key)
{
void *result;
return result;
}
void
PyThread_delete_key_value(int key)
{
}
void
PyThread_ReInitTLS(void)
{
}
#endif

412
third_party/python/Python/thread_nt.h vendored Normal file
View file

@ -0,0 +1,412 @@
/* This code implemented by Dag.Gruneau@elsa.preseco.comm.se */
/* Fast NonRecursiveMutex support by Yakov Markovitch, markovitch@iso.ru */
/* Eliminated some memory leaks, gsw@agere.com */
#include <windows.h>
#include <limits.h>
#ifdef HAVE_PROCESS_H
#include <process.h>
#endif
/* options */
#ifndef _PY_USE_CV_LOCKS
#define _PY_USE_CV_LOCKS 1 /* use locks based on cond vars */
#endif
/* Now, define a non-recursive mutex using either condition variables
* and critical sections (fast) or using operating system mutexes
* (slow)
*/
#if _PY_USE_CV_LOCKS
#include "condvar.h"
typedef struct _NRMUTEX
{
PyMUTEX_T cs;
PyCOND_T cv;
int locked;
} NRMUTEX;
typedef NRMUTEX *PNRMUTEX;
PNRMUTEX
AllocNonRecursiveMutex()
{
PNRMUTEX m = (PNRMUTEX)PyMem_RawMalloc(sizeof(NRMUTEX));
if (!m)
return NULL;
if (PyCOND_INIT(&m->cv))
goto fail;
if (PyMUTEX_INIT(&m->cs)) {
PyCOND_FINI(&m->cv);
goto fail;
}
m->locked = 0;
return m;
fail:
PyMem_RawFree(m);
return NULL;
}
VOID
FreeNonRecursiveMutex(PNRMUTEX mutex)
{
if (mutex) {
PyCOND_FINI(&mutex->cv);
PyMUTEX_FINI(&mutex->cs);
PyMem_RawFree(mutex);
}
}
DWORD
EnterNonRecursiveMutex(PNRMUTEX mutex, DWORD milliseconds)
{
DWORD result = WAIT_OBJECT_0;
if (PyMUTEX_LOCK(&mutex->cs))
return WAIT_FAILED;
if (milliseconds == INFINITE) {
while (mutex->locked) {
if (PyCOND_WAIT(&mutex->cv, &mutex->cs)) {
result = WAIT_FAILED;
break;
}
}
} else if (milliseconds != 0) {
/* wait at least until the target */
DWORD now, target = GetTickCount() + milliseconds;
while (mutex->locked) {
if (PyCOND_TIMEDWAIT(&mutex->cv, &mutex->cs, (long long)milliseconds*1000) < 0) {
result = WAIT_FAILED;
break;
}
now = GetTickCount();
if (target <= now)
break;
milliseconds = target-now;
}
}
if (!mutex->locked) {
mutex->locked = 1;
result = WAIT_OBJECT_0;
} else if (result == WAIT_OBJECT_0)
result = WAIT_TIMEOUT;
/* else, it is WAIT_FAILED */
PyMUTEX_UNLOCK(&mutex->cs); /* must ignore result here */
return result;
}
BOOL
LeaveNonRecursiveMutex(PNRMUTEX mutex)
{
BOOL result;
if (PyMUTEX_LOCK(&mutex->cs))
return FALSE;
mutex->locked = 0;
result = PyCOND_SIGNAL(&mutex->cv);
result &= PyMUTEX_UNLOCK(&mutex->cs);
return result;
}
#else /* if ! _PY_USE_CV_LOCKS */
/* NR-locks based on a kernel mutex */
#define PNRMUTEX HANDLE
PNRMUTEX
AllocNonRecursiveMutex()
{
return CreateSemaphore(NULL, 1, 1, NULL);
}
VOID
FreeNonRecursiveMutex(PNRMUTEX mutex)
{
/* No in-use check */
CloseHandle(mutex);
}
DWORD
EnterNonRecursiveMutex(PNRMUTEX mutex, DWORD milliseconds)
{
return WaitForSingleObjectEx(mutex, milliseconds, FALSE);
}
BOOL
LeaveNonRecursiveMutex(PNRMUTEX mutex)
{
return ReleaseSemaphore(mutex, 1, NULL);
}
#endif /* _PY_USE_CV_LOCKS */
long PyThread_get_thread_ident(void);
/*
* Initialization of the C package, should not be needed.
*/
static void
PyThread__init_thread(void)
{
}
/*
* Thread support.
*/
typedef struct {
void (*func)(void*);
void *arg;
} callobj;
/* thunker to call adapt between the function type used by the system's
thread start function and the internally used one. */
static unsigned __stdcall
bootstrap(void *call)
{
callobj *obj = (callobj*)call;
void (*func)(void*) = obj->func;
void *arg = obj->arg;
HeapFree(GetProcessHeap(), 0, obj);
func(arg);
return 0;
}
long
PyThread_start_new_thread(void (*func)(void *), void *arg)
{
HANDLE hThread;
unsigned threadID;
callobj *obj;
dprintf(("%ld: PyThread_start_new_thread called\n",
PyThread_get_thread_ident()));
if (!initialized)
PyThread_init_thread();
obj = (callobj*)HeapAlloc(GetProcessHeap(), 0, sizeof(*obj));
if (!obj)
return -1;
obj->func = func;
obj->arg = arg;
hThread = (HANDLE)_beginthreadex(0,
Py_SAFE_DOWNCAST(_pythread_stacksize,
Py_ssize_t, unsigned int),
bootstrap, obj,
0, &threadID);
if (hThread == 0) {
/* I've seen errno == EAGAIN here, which means "there are
* too many threads".
*/
int e = errno;
dprintf(("%ld: PyThread_start_new_thread failed, errno %d\n",
PyThread_get_thread_ident(), e));
threadID = (unsigned)-1;
HeapFree(GetProcessHeap(), 0, obj);
}
else {
dprintf(("%ld: PyThread_start_new_thread succeeded: %p\n",
PyThread_get_thread_ident(), (void*)hThread));
CloseHandle(hThread);
}
return (long) threadID;
}
/*
* Return the thread Id instead of a handle. The Id is said to uniquely identify the
* thread in the system
*/
long
PyThread_get_thread_ident(void)
{
if (!initialized)
PyThread_init_thread();
return GetCurrentThreadId();
}
void
PyThread_exit_thread(void)
{
dprintf(("%ld: PyThread_exit_thread called\n", PyThread_get_thread_ident()));
if (!initialized)
exit(0);
_endthreadex(0);
}
/*
* Lock support. It has to be implemented as semaphores.
* I [Dag] tried to implement it with mutex but I could find a way to
* tell whether a thread already own the lock or not.
*/
PyThread_type_lock
PyThread_allocate_lock(void)
{
PNRMUTEX aLock;
dprintf(("PyThread_allocate_lock called\n"));
if (!initialized)
PyThread_init_thread();
aLock = AllocNonRecursiveMutex() ;
dprintf(("%ld: PyThread_allocate_lock() -> %p\n", PyThread_get_thread_ident(), aLock));
return (PyThread_type_lock) aLock;
}
void
PyThread_free_lock(PyThread_type_lock aLock)
{
dprintf(("%ld: PyThread_free_lock(%p) called\n", PyThread_get_thread_ident(),aLock));
FreeNonRecursiveMutex(aLock) ;
}
/*
* Return 1 on success if the lock was acquired
*
* and 0 if the lock was not acquired. This means a 0 is returned
* if the lock has already been acquired by this thread!
*/
PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock aLock,
PY_TIMEOUT_T microseconds, int intr_flag)
{
/* Fow now, intr_flag does nothing on Windows, and lock acquires are
* uninterruptible. */
PyLockStatus success;
PY_TIMEOUT_T milliseconds;
if (microseconds >= 0) {
milliseconds = microseconds / 1000;
if (microseconds % 1000 > 0)
++milliseconds;
if ((DWORD) milliseconds != milliseconds)
Py_FatalError("Timeout too large for a DWORD, "
"please check PY_TIMEOUT_MAX");
}
else
milliseconds = INFINITE;
dprintf(("%ld: PyThread_acquire_lock_timed(%p, %lld) called\n",
PyThread_get_thread_ident(), aLock, microseconds));
if (aLock && EnterNonRecursiveMutex((PNRMUTEX)aLock,
(DWORD)milliseconds) == WAIT_OBJECT_0) {
success = PY_LOCK_ACQUIRED;
}
else {
success = PY_LOCK_FAILURE;
}
dprintf(("%ld: PyThread_acquire_lock(%p, %lld) -> %d\n",
PyThread_get_thread_ident(), aLock, microseconds, success));
return success;
}
int
PyThread_acquire_lock(PyThread_type_lock aLock, int waitflag)
{
return PyThread_acquire_lock_timed(aLock, waitflag ? -1 : 0, 0);
}
void
PyThread_release_lock(PyThread_type_lock aLock)
{
dprintf(("%ld: PyThread_release_lock(%p) called\n", PyThread_get_thread_ident(),aLock));
if (!(aLock && LeaveNonRecursiveMutex((PNRMUTEX) aLock)))
dprintf(("%ld: Could not PyThread_release_lock(%p) error: %ld\n", PyThread_get_thread_ident(), aLock, GetLastError()));
}
/* minimum/maximum thread stack sizes supported */
#define THREAD_MIN_STACKSIZE 0x8000 /* 32kB */
#define THREAD_MAX_STACKSIZE 0x10000000 /* 256MB */
/* set the thread stack size.
* Return 0 if size is valid, -1 otherwise.
*/
static int
_pythread_nt_set_stacksize(size_t size)
{
/* set to default */
if (size == 0) {
_pythread_stacksize = 0;
return 0;
}
/* valid range? */
if (size >= THREAD_MIN_STACKSIZE && size < THREAD_MAX_STACKSIZE) {
_pythread_stacksize = size;
return 0;
}
return -1;
}
#define THREAD_SET_STACKSIZE(x) _pythread_nt_set_stacksize(x)
/* use native Windows TLS functions */
#define Py_HAVE_NATIVE_TLS
#ifdef Py_HAVE_NATIVE_TLS
int
PyThread_create_key(void)
{
DWORD result= TlsAlloc();
if (result == TLS_OUT_OF_INDEXES)
return -1;
return (int)result;
}
void
PyThread_delete_key(int key)
{
TlsFree(key);
}
int
PyThread_set_key_value(int key, void *value)
{
BOOL ok;
ok = TlsSetValue(key, value);
if (!ok)
return -1;
return 0;
}
void *
PyThread_get_key_value(int key)
{
/* because TLS is used in the Py_END_ALLOW_THREAD macro,
* it is necessary to preserve the windows error state, because
* it is assumed to be preserved across the call to the macro.
* Ideally, the macro should be fixed, but it is simpler to
* do it here.
*/
DWORD error = GetLastError();
void *result = TlsGetValue(key);
SetLastError(error);
return result;
}
void
PyThread_delete_key_value(int key)
{
/* NULL is used as "key missing", and it is also the default
* given by TlsGetValue() if nothing has been set yet.
*/
TlsSetValue(key, NULL);
}
/* reinitialization of TLS is not necessary after fork when using
* the native TLS functions. And forking isn't supported on Windows either.
*/
void
PyThread_ReInitTLS(void)
{}
#endif

View file

@ -0,0 +1,689 @@
/* Posix threads interface */
#include <stdlib.h>
#include <string.h>
#if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
#define destructor xxdestructor
#endif
#include <pthread.h>
#if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
#undef destructor
#endif
#include <signal.h>
/* The POSIX spec requires that use of pthread_attr_setstacksize
be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
#ifdef _POSIX_THREAD_ATTR_STACKSIZE
#ifndef THREAD_STACK_SIZE
#define THREAD_STACK_SIZE 0 /* use default stack size */
#endif
/* The default stack size for new threads on OSX and BSD is small enough that
* we'll get hard crashes instead of 'maximum recursion depth exceeded'
* exceptions.
*
* The default stack sizes below are the empirically determined minimal stack
* sizes where a simple recursive function doesn't cause a hard crash.
*/
#if defined(__APPLE__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
#undef THREAD_STACK_SIZE
#define THREAD_STACK_SIZE 0x500000
#endif
#if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
#undef THREAD_STACK_SIZE
#define THREAD_STACK_SIZE 0x400000
#endif
/* for safety, ensure a viable minimum stacksize */
#define THREAD_STACK_MIN 0x8000 /* 32kB */
#else /* !_POSIX_THREAD_ATTR_STACKSIZE */
#ifdef THREAD_STACK_SIZE
#error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
#endif
#endif
/* The POSIX spec says that implementations supporting the sem_*
family of functions must indicate this by defining
_POSIX_SEMAPHORES. */
#ifdef _POSIX_SEMAPHORES
/* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
we need to add 0 to make it work there as well. */
#if (_POSIX_SEMAPHORES+0) == -1
#define HAVE_BROKEN_POSIX_SEMAPHORES
#else
#include <semaphore.h>
#include <errno.h>
#endif
#endif
/* Before FreeBSD 5.4, system scope threads was very limited resource
in default setting. So the process scope is preferred to get
enough number of threads to work. */
#ifdef __FreeBSD__
#include <osreldate.h>
#if __FreeBSD_version >= 500000 && __FreeBSD_version < 504101
#undef PTHREAD_SYSTEM_SCHED_SUPPORTED
#endif
#endif
#if !defined(pthread_attr_default)
# define pthread_attr_default ((pthread_attr_t *)NULL)
#endif
#if !defined(pthread_mutexattr_default)
# define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)
#endif
#if !defined(pthread_condattr_default)
# define pthread_condattr_default ((pthread_condattr_t *)NULL)
#endif
/* Whether or not to use semaphores directly rather than emulating them with
* mutexes and condition variables:
*/
#if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
defined(HAVE_SEM_TIMEDWAIT))
# define USE_SEMAPHORES
#else
# undef USE_SEMAPHORES
#endif
/* On platforms that don't use standard POSIX threads pthread_sigmask()
* isn't present. DEC threads uses sigprocmask() instead as do most
* other UNIX International compliant systems that don't have the full
* pthread implementation.
*/
#if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK)
# define SET_THREAD_SIGMASK pthread_sigmask
#else
# define SET_THREAD_SIGMASK sigprocmask
#endif
/* We assume all modern POSIX systems have gettimeofday() */
#ifdef GETTIMEOFDAY_NO_TZ
#define GETTIMEOFDAY(ptv) gettimeofday(ptv)
#else
#define GETTIMEOFDAY(ptv) gettimeofday(ptv, (struct timezone *)NULL)
#endif
#define MICROSECONDS_TO_TIMESPEC(microseconds, ts) \
do { \
struct timeval tv; \
GETTIMEOFDAY(&tv); \
tv.tv_usec += microseconds % 1000000; \
tv.tv_sec += microseconds / 1000000; \
tv.tv_sec += tv.tv_usec / 1000000; \
tv.tv_usec %= 1000000; \
ts.tv_sec = tv.tv_sec; \
ts.tv_nsec = tv.tv_usec * 1000; \
} while(0)
/* A pthread mutex isn't sufficient to model the Python lock type
* because, according to Draft 5 of the docs (P1003.4a/D5), both of the
* following are undefined:
* -> a thread tries to lock a mutex it already has locked
* -> a thread tries to unlock a mutex locked by a different thread
* pthread mutexes are designed for serializing threads over short pieces
* of code anyway, so wouldn't be an appropriate implementation of
* Python's locks regardless.
*
* The pthread_lock struct implements a Python lock as a "locked?" bit
* and a <condition, mutex> pair. In general, if the bit can be acquired
* instantly, it is, else the pair is used to block the thread until the
* bit is cleared. 9 May 1994 tim@ksr.com
*/
typedef struct {
char locked; /* 0=unlocked, 1=locked */
/* a <cond, mutex> pair to handle an acquire of a locked lock */
pthread_cond_t lock_released;
pthread_mutex_t mut;
} pthread_lock;
#define CHECK_STATUS(name) if (status != 0) { perror(name); error = 1; }
#define CHECK_STATUS_PTHREAD(name) if (status != 0) { fprintf(stderr, \
"%s: %s\n", name, strerror(status)); error = 1; }
/*
* Initialization.
*/
#if defined(_HAVE_BSDI)
static
void _noop(void)
{
}
static void
PyThread__init_thread(void)
{
/* DO AN INIT BY STARTING THE THREAD */
static int dummy = 0;
pthread_t thread1;
pthread_create(&thread1, NULL, (void *) _noop, &dummy);
pthread_join(thread1, NULL);
}
#else /* !_HAVE_BSDI */
static void
PyThread__init_thread(void)
{
#if defined(_AIX) && defined(__GNUC__)
extern void pthread_init(void);
pthread_init();
#endif
}
#endif /* !_HAVE_BSDI */
/*
* Thread support.
*/
/* bpo-33015: pythread_callback struct and pythread_wrapper() cast
"void func(void *)" to "void* func(void *)": always return NULL.
PyThread_start_new_thread() uses "void func(void *)" type, whereas
pthread_create() requires a void* return value. */
typedef struct {
void (*func) (void *);
void *arg;
} pythread_callback;
static void *
pythread_wrapper(void *arg)
{
/* copy func and func_arg and free the temporary structure */
pythread_callback *callback = arg;
void (*func)(void *) = callback->func;
void *func_arg = callback->arg;
PyMem_RawFree(arg);
func(func_arg);
return NULL;
}
long
PyThread_start_new_thread(void (*func)(void *), void *arg)
{
pthread_t th;
int status;
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
pthread_attr_t attrs;
#endif
#if defined(THREAD_STACK_SIZE)
size_t tss;
#endif
dprintf(("PyThread_start_new_thread called\n"));
if (!initialized)
PyThread_init_thread();
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
if (pthread_attr_init(&attrs) != 0)
return -1;
#endif
#if defined(THREAD_STACK_SIZE)
tss = (_pythread_stacksize != 0) ? _pythread_stacksize
: THREAD_STACK_SIZE;
if (tss != 0) {
if (pthread_attr_setstacksize(&attrs, tss) != 0) {
pthread_attr_destroy(&attrs);
return -1;
}
}
#endif
#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
#endif
pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback));
if (callback == NULL) {
return -1;
}
callback->func = func;
callback->arg = arg;
status = pthread_create(&th,
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
&attrs,
#else
(pthread_attr_t*)NULL,
#endif
pythread_wrapper, callback);
#if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
pthread_attr_destroy(&attrs);
#endif
if (status != 0) {
PyMem_RawFree(callback);
return -1;
}
pthread_detach(th);
#if SIZEOF_PTHREAD_T <= SIZEOF_LONG
return (long) th;
#else
return (long) *(long *) &th;
#endif
}
/* XXX This implementation is considered (to quote Tim Peters) "inherently
hosed" because:
- It does not guarantee the promise that a non-zero integer is returned.
- The cast to long is inherently unsafe.
- It is not clear that the 'volatile' (for AIX?) are any longer necessary.
*/
long
PyThread_get_thread_ident(void)
{
volatile pthread_t threadid;
if (!initialized)
PyThread_init_thread();
threadid = pthread_self();
return (long) threadid;
}
void
PyThread_exit_thread(void)
{
dprintf(("PyThread_exit_thread called\n"));
if (!initialized)
exit(0);
pthread_exit(0);
}
#ifdef USE_SEMAPHORES
/*
* Lock support.
*/
PyThread_type_lock
PyThread_allocate_lock(void)
{
sem_t *lock;
int status, error = 0;
dprintf(("PyThread_allocate_lock called\n"));
if (!initialized)
PyThread_init_thread();
lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
if (lock) {
status = sem_init(lock,0,1);
CHECK_STATUS("sem_init");
if (error) {
PyMem_RawFree((void *)lock);
lock = NULL;
}
}
dprintf(("PyThread_allocate_lock() -> %p\n", lock));
return (PyThread_type_lock)lock;
}
void
PyThread_free_lock(PyThread_type_lock lock)
{
sem_t *thelock = (sem_t *)lock;
int status, error = 0;
(void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_free_lock(%p) called\n", lock));
if (!thelock)
return;
status = sem_destroy(thelock);
CHECK_STATUS("sem_destroy");
PyMem_RawFree((void *)thelock);
}
/*
* As of February 2002, Cygwin thread implementations mistakenly report error
* codes in the return value of the sem_ calls (like the pthread_ functions).
* Correct implementations return -1 and put the code in errno. This supports
* either.
*/
static int
fix_status(int status)
{
return (status == -1) ? errno : status;
}
PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
int intr_flag)
{
PyLockStatus success;
sem_t *thelock = (sem_t *)lock;
int status, error = 0;
struct timespec ts;
(void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
lock, microseconds, intr_flag));
if (microseconds > 0)
MICROSECONDS_TO_TIMESPEC(microseconds, ts);
do {
if (microseconds > 0)
status = fix_status(sem_timedwait(thelock, &ts));
else if (microseconds == 0)
status = fix_status(sem_trywait(thelock));
else
status = fix_status(sem_wait(thelock));
/* Retry if interrupted by a signal, unless the caller wants to be
notified. */
} while (!intr_flag && status == EINTR);
/* Don't check the status if we're stopping because of an interrupt. */
if (!(intr_flag && status == EINTR)) {
if (microseconds > 0) {
if (status != ETIMEDOUT)
CHECK_STATUS("sem_timedwait");
}
else if (microseconds == 0) {
if (status != EAGAIN)
CHECK_STATUS("sem_trywait");
}
else {
CHECK_STATUS("sem_wait");
}
}
if (status == 0) {
success = PY_LOCK_ACQUIRED;
} else if (intr_flag && status == EINTR) {
success = PY_LOCK_INTR;
} else {
success = PY_LOCK_FAILURE;
}
dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
lock, microseconds, intr_flag, success));
return success;
}
void
PyThread_release_lock(PyThread_type_lock lock)
{
sem_t *thelock = (sem_t *)lock;
int status, error = 0;
(void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_release_lock(%p) called\n", lock));
status = sem_post(thelock);
CHECK_STATUS("sem_post");
}
#else /* USE_SEMAPHORES */
/*
* Lock support.
*/
PyThread_type_lock
PyThread_allocate_lock(void)
{
pthread_lock *lock;
int status, error = 0;
dprintf(("PyThread_allocate_lock called\n"));
if (!initialized)
PyThread_init_thread();
lock = (pthread_lock *) PyMem_RawMalloc(sizeof(pthread_lock));
if (lock) {
memset((void *)lock, '\0', sizeof(pthread_lock));
lock->locked = 0;
status = pthread_mutex_init(&lock->mut,
pthread_mutexattr_default);
CHECK_STATUS_PTHREAD("pthread_mutex_init");
/* Mark the pthread mutex underlying a Python mutex as
pure happens-before. We can't simply mark the
Python-level mutex as a mutex because it can be
acquired and released in different threads, which
will cause errors. */
_Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
status = pthread_cond_init(&lock->lock_released,
pthread_condattr_default);
CHECK_STATUS_PTHREAD("pthread_cond_init");
if (error) {
PyMem_RawFree((void *)lock);
lock = 0;
}
}
dprintf(("PyThread_allocate_lock() -> %p\n", lock));
return (PyThread_type_lock) lock;
}
void
PyThread_free_lock(PyThread_type_lock lock)
{
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
(void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_free_lock(%p) called\n", lock));
/* some pthread-like implementations tie the mutex to the cond
* and must have the cond destroyed first.
*/
status = pthread_cond_destroy( &thelock->lock_released );
CHECK_STATUS_PTHREAD("pthread_cond_destroy");
status = pthread_mutex_destroy( &thelock->mut );
CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
PyMem_RawFree((void *)thelock);
}
PyLockStatus
PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
int intr_flag)
{
PyLockStatus success = PY_LOCK_FAILURE;
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) called\n",
lock, microseconds, intr_flag));
if (microseconds == 0) {
status = pthread_mutex_trylock( &thelock->mut );
if (status != EBUSY)
CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
}
else {
status = pthread_mutex_lock( &thelock->mut );
CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
}
if (status == 0) {
if (thelock->locked == 0) {
success = PY_LOCK_ACQUIRED;
}
else if (microseconds != 0) {
struct timespec ts;
if (microseconds > 0)
MICROSECONDS_TO_TIMESPEC(microseconds, ts);
/* continue trying until we get the lock */
/* mut must be locked by me -- part of the condition
* protocol */
while (success == PY_LOCK_FAILURE) {
if (microseconds > 0) {
status = pthread_cond_timedwait(
&thelock->lock_released,
&thelock->mut, &ts);
if (status == ETIMEDOUT)
break;
CHECK_STATUS_PTHREAD("pthread_cond_timed_wait");
}
else {
status = pthread_cond_wait(
&thelock->lock_released,
&thelock->mut);
CHECK_STATUS_PTHREAD("pthread_cond_wait");
}
if (intr_flag && status == 0 && thelock->locked) {
/* We were woken up, but didn't get the lock. We probably received
* a signal. Return PY_LOCK_INTR to allow the caller to handle
* it and retry. */
success = PY_LOCK_INTR;
break;
}
else if (status == 0 && !thelock->locked) {
success = PY_LOCK_ACQUIRED;
}
}
}
if (success == PY_LOCK_ACQUIRED) thelock->locked = 1;
status = pthread_mutex_unlock( &thelock->mut );
CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
}
if (error) success = PY_LOCK_FAILURE;
dprintf(("PyThread_acquire_lock_timed(%p, %lld, %d) -> %d\n",
lock, microseconds, intr_flag, success));
return success;
}
void
PyThread_release_lock(PyThread_type_lock lock)
{
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
(void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_release_lock(%p) called\n", lock));
status = pthread_mutex_lock( &thelock->mut );
CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
thelock->locked = 0;
/* wake up someone (anyone, if any) waiting on the lock */
status = pthread_cond_signal( &thelock->lock_released );
CHECK_STATUS_PTHREAD("pthread_cond_signal");
status = pthread_mutex_unlock( &thelock->mut );
CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
}
#endif /* USE_SEMAPHORES */
int
PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
{
return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
}
/* set the thread stack size.
* Return 0 if size is valid, -1 if size is invalid,
* -2 if setting stack size is not supported.
*/
static int
_pythread_pthread_set_stacksize(size_t size)
{
#if defined(THREAD_STACK_SIZE)
pthread_attr_t attrs;
size_t tss_min;
int rc = 0;
#endif
/* set to default */
if (size == 0) {
_pythread_stacksize = 0;
return 0;
}
#if defined(THREAD_STACK_SIZE)
#if defined(PTHREAD_STACK_MIN)
tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
: THREAD_STACK_MIN;
#else
tss_min = THREAD_STACK_MIN;
#endif
if (size >= tss_min) {
/* validate stack size by setting thread attribute */
if (pthread_attr_init(&attrs) == 0) {
rc = pthread_attr_setstacksize(&attrs, size);
pthread_attr_destroy(&attrs);
if (rc == 0) {
_pythread_stacksize = size;
return 0;
}
}
}
return -1;
#else
return -2;
#endif
}
#define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x)
#define Py_HAVE_NATIVE_TLS
int
PyThread_create_key(void)
{
pthread_key_t key;
int fail = pthread_key_create(&key, NULL);
if (fail)
return -1;
if (key > INT_MAX) {
/* Issue #22206: handle integer overflow */
pthread_key_delete(key);
errno = ENOMEM;
return -1;
}
return (int)key;
}
void
PyThread_delete_key(int key)
{
pthread_key_delete(key);
}
void
PyThread_delete_key_value(int key)
{
pthread_setspecific(key, NULL);
}
int
PyThread_set_key_value(int key, void *value)
{
int fail;
fail = pthread_setspecific(key, value);
return fail ? -1 : 0;
}
void *
PyThread_get_key_value(int key)
{
return pthread_getspecific(key);
}
void
PyThread_ReInitTLS(void)
{}

826
third_party/python/Python/traceback.c vendored Normal file
View file

@ -0,0 +1,826 @@
/* Traceback implementation */
#include "Python.h"
#include "code.h"
#include "frameobject.h"
#include "structmember.h"
#include "osdefs.h"
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
#define OFF(x) offsetof(PyTracebackObject, x)
#define PUTS(fd, str) _Py_write_noraise(fd, str, (int)strlen(str))
#define MAX_STRING_LENGTH 500
#define MAX_FRAME_DEPTH 100
#define MAX_NTHREADS 100
/* Function from Parser/tokenizer.c */
extern char * PyTokenizer_FindEncodingFilename(int, PyObject *);
_Py_IDENTIFIER(TextIOWrapper);
_Py_IDENTIFIER(close);
_Py_IDENTIFIER(open);
_Py_IDENTIFIER(path);
static PyObject *
tb_dir(PyTracebackObject *self)
{
return Py_BuildValue("[ssss]", "tb_frame", "tb_next",
"tb_lasti", "tb_lineno");
}
static PyMethodDef tb_methods[] = {
{"__dir__", (PyCFunction)tb_dir, METH_NOARGS},
{NULL, NULL, 0, NULL},
};
static PyMemberDef tb_memberlist[] = {
{"tb_next", T_OBJECT, OFF(tb_next), READONLY},
{"tb_frame", T_OBJECT, OFF(tb_frame), READONLY},
{"tb_lasti", T_INT, OFF(tb_lasti), READONLY},
{"tb_lineno", T_INT, OFF(tb_lineno), READONLY},
{NULL} /* Sentinel */
};
static void
tb_dealloc(PyTracebackObject *tb)
{
PyObject_GC_UnTrack(tb);
Py_TRASHCAN_SAFE_BEGIN(tb)
Py_XDECREF(tb->tb_next);
Py_XDECREF(tb->tb_frame);
PyObject_GC_Del(tb);
Py_TRASHCAN_SAFE_END(tb)
}
static int
tb_traverse(PyTracebackObject *tb, visitproc visit, void *arg)
{
Py_VISIT(tb->tb_next);
Py_VISIT(tb->tb_frame);
return 0;
}
static int
tb_clear(PyTracebackObject *tb)
{
Py_CLEAR(tb->tb_next);
Py_CLEAR(tb->tb_frame);
return 0;
}
PyTypeObject PyTraceBack_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"traceback",
sizeof(PyTracebackObject),
0,
(destructor)tb_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
0, /* tp_doc */
(traverseproc)tb_traverse, /* tp_traverse */
(inquiry)tb_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
tb_methods, /* tp_methods */
tb_memberlist, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
};
static PyTracebackObject *
newtracebackobject(PyTracebackObject *next, PyFrameObject *frame)
{
PyTracebackObject *tb;
if ((next != NULL && !PyTraceBack_Check(next)) ||
frame == NULL || !PyFrame_Check(frame)) {
PyErr_BadInternalCall();
return NULL;
}
tb = PyObject_GC_New(PyTracebackObject, &PyTraceBack_Type);
if (tb != NULL) {
Py_XINCREF(next);
tb->tb_next = next;
Py_XINCREF(frame);
tb->tb_frame = frame;
tb->tb_lasti = frame->f_lasti;
tb->tb_lineno = PyFrame_GetLineNumber(frame);
PyObject_GC_Track(tb);
}
return tb;
}
int
PyTraceBack_Here(PyFrameObject *frame)
{
PyObject *exc, *val, *tb, *newtb;
PyErr_Fetch(&exc, &val, &tb);
newtb = (PyObject *)newtracebackobject((PyTracebackObject *)tb, frame);
if (newtb == NULL) {
_PyErr_ChainExceptions(exc, val, tb);
return -1;
}
PyErr_Restore(exc, val, newtb);
Py_XDECREF(tb);
return 0;
}
/* Insert a frame into the traceback for (funcname, filename, lineno). */
void _PyTraceback_Add(const char *funcname, const char *filename, int lineno)
{
PyObject *globals;
PyCodeObject *code;
PyFrameObject *frame;
PyObject *exc, *val, *tb;
/* Save and clear the current exception. Python functions must not be
called with an exception set. Calling Python functions happens when
the codec of the filesystem encoding is implemented in pure Python. */
PyErr_Fetch(&exc, &val, &tb);
globals = PyDict_New();
if (!globals)
goto error;
code = PyCode_NewEmpty(filename, funcname, lineno);
if (!code) {
Py_DECREF(globals);
goto error;
}
frame = PyFrame_New(PyThreadState_Get(), code, globals, NULL);
Py_DECREF(globals);
Py_DECREF(code);
if (!frame)
goto error;
frame->f_lineno = lineno;
PyErr_Restore(exc, val, tb);
PyTraceBack_Here(frame);
Py_DECREF(frame);
return;
error:
_PyErr_ChainExceptions(exc, val, tb);
}
static PyObject *
_Py_FindSourceFile(PyObject *filename, char* namebuf, size_t namelen, PyObject *io)
{
Py_ssize_t i;
PyObject *binary;
PyObject *v;
Py_ssize_t npath;
size_t taillen;
PyObject *syspath;
PyObject *path;
const char* tail;
PyObject *filebytes;
const char* filepath;
Py_ssize_t len;
PyObject* result;
filebytes = PyUnicode_EncodeFSDefault(filename);
if (filebytes == NULL) {
PyErr_Clear();
return NULL;
}
filepath = PyBytes_AS_STRING(filebytes);
/* Search tail of filename in sys.path before giving up */
tail = strrchr(filepath, SEP);
if (tail == NULL)
tail = filepath;
else
tail++;
taillen = strlen(tail);
syspath = _PySys_GetObjectId(&PyId_path);
if (syspath == NULL || !PyList_Check(syspath))
goto error;
npath = PyList_Size(syspath);
for (i = 0; i < npath; i++) {
v = PyList_GetItem(syspath, i);
if (v == NULL) {
PyErr_Clear();
break;
}
if (!PyUnicode_Check(v))
continue;
path = PyUnicode_EncodeFSDefault(v);
if (path == NULL) {
PyErr_Clear();
continue;
}
len = PyBytes_GET_SIZE(path);
if (len + 1 + (Py_ssize_t)taillen >= (Py_ssize_t)namelen - 1) {
Py_DECREF(path);
continue; /* Too long */
}
strcpy(namebuf, PyBytes_AS_STRING(path));
Py_DECREF(path);
if (strlen(namebuf) != (size_t)len)
continue; /* v contains '\0' */
if (len > 0 && namebuf[len-1] != SEP)
namebuf[len++] = SEP;
strcpy(namebuf+len, tail);
binary = _PyObject_CallMethodId(io, &PyId_open, "ss", namebuf, "rb");
if (binary != NULL) {
result = binary;
goto finally;
}
PyErr_Clear();
}
goto error;
error:
result = NULL;
finally:
Py_DECREF(filebytes);
return result;
}
int
_Py_DisplaySourceLine(PyObject *f, PyObject *filename, int lineno, int indent)
{
int err = 0;
int fd;
int i;
char *found_encoding;
char *encoding;
PyObject *io;
PyObject *binary;
PyObject *fob = NULL;
PyObject *lineobj = NULL;
PyObject *res;
char buf[MAXPATHLEN+1];
int kind;
void *data;
/* open the file */
if (filename == NULL)
return 0;
io = PyImport_ImportModuleNoBlock("io");
if (io == NULL)
return -1;
binary = _PyObject_CallMethodId(io, &PyId_open, "Os", filename, "rb");
if (binary == NULL) {
PyErr_Clear();
binary = _Py_FindSourceFile(filename, buf, sizeof(buf), io);
if (binary == NULL) {
Py_DECREF(io);
return -1;
}
}
/* use the right encoding to decode the file as unicode */
fd = PyObject_AsFileDescriptor(binary);
if (fd < 0) {
Py_DECREF(io);
Py_DECREF(binary);
return 0;
}
found_encoding = PyTokenizer_FindEncodingFilename(fd, filename);
if (found_encoding == NULL)
PyErr_Clear();
encoding = (found_encoding != NULL) ? found_encoding : "utf-8";
/* Reset position */
if (lseek(fd, 0, SEEK_SET) == (off_t)-1) {
Py_DECREF(io);
Py_DECREF(binary);
PyMem_FREE(found_encoding);
return 0;
}
fob = _PyObject_CallMethodId(io, &PyId_TextIOWrapper, "Os", binary, encoding);
Py_DECREF(io);
PyMem_FREE(found_encoding);
if (fob == NULL) {
PyErr_Clear();
res = _PyObject_CallMethodId(binary, &PyId_close, NULL);
Py_DECREF(binary);
if (res)
Py_DECREF(res);
else
PyErr_Clear();
return 0;
}
Py_DECREF(binary);
/* get the line number lineno */
for (i = 0; i < lineno; i++) {
Py_XDECREF(lineobj);
lineobj = PyFile_GetLine(fob, -1);
if (!lineobj) {
PyErr_Clear();
err = -1;
break;
}
}
res = _PyObject_CallMethodId(fob, &PyId_close, NULL);
if (res)
Py_DECREF(res);
else
PyErr_Clear();
Py_DECREF(fob);
if (!lineobj || !PyUnicode_Check(lineobj)) {
Py_XDECREF(lineobj);
return err;
}
/* remove the indentation of the line */
kind = PyUnicode_KIND(lineobj);
data = PyUnicode_DATA(lineobj);
for (i=0; i < PyUnicode_GET_LENGTH(lineobj); i++) {
Py_UCS4 ch = PyUnicode_READ(kind, data, i);
if (ch != ' ' && ch != '\t' && ch != '\014')
break;
}
if (i) {
PyObject *truncated;
truncated = PyUnicode_Substring(lineobj, i, PyUnicode_GET_LENGTH(lineobj));
if (truncated) {
Py_DECREF(lineobj);
lineobj = truncated;
} else {
PyErr_Clear();
}
}
/* Write some spaces before the line */
strcpy(buf, " ");
assert (strlen(buf) == 10);
while (indent > 0) {
if (indent < 10)
buf[indent] = '\0';
err = PyFile_WriteString(buf, f);
if (err != 0)
break;
indent -= 10;
}
/* finally display the line */
if (err == 0)
err = PyFile_WriteObject(lineobj, f, Py_PRINT_RAW);
Py_DECREF(lineobj);
if (err == 0)
err = PyFile_WriteString("\n", f);
return err;
}
static int
tb_displayline(PyObject *f, PyObject *filename, int lineno, PyObject *name)
{
int err;
PyObject *line;
if (filename == NULL || name == NULL)
return -1;
line = PyUnicode_FromFormat(" File \"%U\", line %d, in %U\n",
filename, lineno, name);
if (line == NULL)
return -1;
err = PyFile_WriteObject(line, f, Py_PRINT_RAW);
Py_DECREF(line);
if (err != 0)
return err;
/* ignore errors since we can't report them, can we? */
if (_Py_DisplaySourceLine(f, filename, lineno, 4))
PyErr_Clear();
return err;
}
static const int TB_RECURSIVE_CUTOFF = 3; // Also hardcoded in traceback.py.
static int
tb_print_line_repeated(PyObject *f, long cnt)
{
cnt -= TB_RECURSIVE_CUTOFF;
PyObject *line = PyUnicode_FromFormat(
(cnt > 1)
? " [Previous line repeated %ld more times]\n"
: " [Previous line repeated %ld more time]\n",
cnt);
if (line == NULL) {
return -1;
}
int err = PyFile_WriteObject(line, f, Py_PRINT_RAW);
Py_DECREF(line);
return err;
}
static int
tb_printinternal(PyTracebackObject *tb, PyObject *f, long limit)
{
int err = 0;
Py_ssize_t depth = 0;
PyObject *last_file = NULL;
int last_line = -1;
PyObject *last_name = NULL;
long cnt = 0;
PyTracebackObject *tb1 = tb;
while (tb1 != NULL) {
depth++;
tb1 = tb1->tb_next;
}
while (tb != NULL && depth > limit) {
depth--;
tb = tb->tb_next;
}
while (tb != NULL && err == 0) {
if (last_file == NULL ||
tb->tb_frame->f_code->co_filename != last_file ||
last_line == -1 || tb->tb_lineno != last_line ||
last_name == NULL || tb->tb_frame->f_code->co_name != last_name) {
if (cnt > TB_RECURSIVE_CUTOFF) {
err = tb_print_line_repeated(f, cnt);
}
last_file = tb->tb_frame->f_code->co_filename;
last_line = tb->tb_lineno;
last_name = tb->tb_frame->f_code->co_name;
cnt = 0;
}
cnt++;
if (err == 0 && cnt <= TB_RECURSIVE_CUTOFF) {
err = tb_displayline(f,
tb->tb_frame->f_code->co_filename,
tb->tb_lineno,
tb->tb_frame->f_code->co_name);
if (err == 0) {
err = PyErr_CheckSignals();
}
}
tb = tb->tb_next;
}
if (err == 0 && cnt > TB_RECURSIVE_CUTOFF) {
err = tb_print_line_repeated(f, cnt);
}
return err;
}
#define PyTraceBack_LIMIT 1000
int
PyTraceBack_Print(PyObject *v, PyObject *f)
{
int err;
PyObject *limitv;
long limit = PyTraceBack_LIMIT;
if (v == NULL)
return 0;
if (!PyTraceBack_Check(v)) {
PyErr_BadInternalCall();
return -1;
}
limitv = PySys_GetObject("tracebacklimit");
if (limitv && PyLong_Check(limitv)) {
int overflow;
limit = PyLong_AsLongAndOverflow(limitv, &overflow);
if (overflow > 0) {
limit = LONG_MAX;
}
else if (limit <= 0) {
return 0;
}
}
err = PyFile_WriteString("Traceback (most recent call last):\n", f);
if (!err)
err = tb_printinternal((PyTracebackObject *)v, f, limit);
return err;
}
/* Reverse a string. For example, "abcd" becomes "dcba".
This function is signal safe. */
void
_Py_DumpDecimal(int fd, unsigned long value)
{
/* maximum number of characters required for output of %lld or %p.
We need at most ceil(log10(256)*SIZEOF_LONG_LONG) digits,
plus 1 for the null byte. 53/22 is an upper bound for log10(256). */
char buffer[1 + (sizeof(unsigned long)*53-1) / 22 + 1];
char *ptr, *end;
end = &buffer[Py_ARRAY_LENGTH(buffer) - 1];
ptr = end;
*ptr = '\0';
do {
--ptr;
assert(ptr >= buffer);
*ptr = '0' + (value % 10);
value /= 10;
} while (value);
_Py_write_noraise(fd, ptr, end - ptr);
}
/* Format an integer in range [0; 0xffffffff] to hexadecimal of 'width' digits,
and write it into the file fd.
This function is signal safe. */
void
_Py_DumpHexadecimal(int fd, unsigned long value, Py_ssize_t width)
{
char buffer[sizeof(unsigned long) * 2 + 1], *ptr, *end;
const Py_ssize_t size = Py_ARRAY_LENGTH(buffer) - 1;
if (width > size)
width = size;
/* it's ok if width is negative */
end = &buffer[size];
ptr = end;
*ptr = '\0';
do {
--ptr;
assert(ptr >= buffer);
*ptr = Py_hexdigits[value & 15];
value >>= 4;
} while ((end - ptr) < width || value);
_Py_write_noraise(fd, ptr, end - ptr);
}
void
_Py_DumpASCII(int fd, PyObject *text)
{
PyASCIIObject *ascii = (PyASCIIObject *)text;
Py_ssize_t i, size;
int truncated;
int kind;
void *data = NULL;
wchar_t *wstr = NULL;
Py_UCS4 ch;
if (!PyUnicode_Check(text))
return;
size = ascii->length;
kind = ascii->state.kind;
if (kind == PyUnicode_WCHAR_KIND) {
wstr = ((PyASCIIObject *)text)->wstr;
if (wstr == NULL)
return;
size = ((PyCompactUnicodeObject *)text)->wstr_length;
}
else if (ascii->state.compact) {
if (ascii->state.ascii)
data = ((PyASCIIObject*)text) + 1;
else
data = ((PyCompactUnicodeObject*)text) + 1;
}
else {
data = ((PyUnicodeObject *)text)->data.any;
if (data == NULL)
return;
}
if (MAX_STRING_LENGTH < size) {
size = MAX_STRING_LENGTH;
truncated = 1;
}
else {
truncated = 0;
}
for (i=0; i < size; i++) {
if (kind != PyUnicode_WCHAR_KIND)
ch = PyUnicode_READ(kind, data, i);
else
ch = wstr[i];
if (' ' <= ch && ch <= 126) {
/* printable ASCII character */
char c = (char)ch;
_Py_write_noraise(fd, &c, 1);
}
else if (ch <= 0xff) {
PUTS(fd, "\\x");
_Py_DumpHexadecimal(fd, ch, 2);
}
else if (ch <= 0xffff) {
PUTS(fd, "\\u");
_Py_DumpHexadecimal(fd, ch, 4);
}
else {
PUTS(fd, "\\U");
_Py_DumpHexadecimal(fd, ch, 8);
}
}
if (truncated) {
PUTS(fd, "...");
}
}
/* Write a frame into the file fd: "File "xxx", line xxx in xxx".
This function is signal safe. */
static void
dump_frame(int fd, PyFrameObject *frame)
{
PyCodeObject *code;
int lineno;
code = frame->f_code;
PUTS(fd, " File ");
if (code != NULL && code->co_filename != NULL
&& PyUnicode_Check(code->co_filename))
{
PUTS(fd, "\"");
_Py_DumpASCII(fd, code->co_filename);
PUTS(fd, "\"");
} else {
PUTS(fd, "???");
}
/* PyFrame_GetLineNumber() was introduced in Python 2.7.0 and 3.2.0 */
lineno = PyCode_Addr2Line(code, frame->f_lasti);
PUTS(fd, ", line ");
if (lineno >= 0) {
_Py_DumpDecimal(fd, (unsigned long)lineno);
}
else {
PUTS(fd, "???");
}
PUTS(fd, " in ");
if (code != NULL && code->co_name != NULL
&& PyUnicode_Check(code->co_name)) {
_Py_DumpASCII(fd, code->co_name);
}
else {
PUTS(fd, "???");
}
PUTS(fd, "\n");
}
static void
dump_traceback(int fd, PyThreadState *tstate, int write_header)
{
PyFrameObject *frame;
unsigned int depth;
if (write_header)
PUTS(fd, "Stack (most recent call first):\n");
frame = _PyThreadState_GetFrame(tstate);
if (frame == NULL)
return;
depth = 0;
while (frame != NULL) {
if (MAX_FRAME_DEPTH <= depth) {
PUTS(fd, " ...\n");
break;
}
if (!PyFrame_Check(frame))
break;
dump_frame(fd, frame);
frame = frame->f_back;
depth++;
}
}
/* Dump the traceback of a Python thread into fd. Use write() to write the
traceback and retry if write() is interrupted by a signal (failed with
EINTR), but don't call the Python signal handler.
The caller is responsible to call PyErr_CheckSignals() to call Python signal
handlers if signals were received. */
void
_Py_DumpTraceback(int fd, PyThreadState *tstate)
{
dump_traceback(fd, tstate, 1);
}
/* Write the thread identifier into the file 'fd': "Current thread 0xHHHH:\" if
is_current is true, "Thread 0xHHHH:\n" otherwise.
This function is signal safe. */
static void
write_thread_id(int fd, PyThreadState *tstate, int is_current)
{
if (is_current)
PUTS(fd, "Current thread 0x");
else
PUTS(fd, "Thread 0x");
_Py_DumpHexadecimal(fd,
(unsigned long)tstate->thread_id,
sizeof(unsigned long) * 2);
PUTS(fd, " (most recent call first):\n");
}
/* Dump the traceback of all Python threads into fd. Use write() to write the
traceback and retry if write() is interrupted by a signal (failed with
EINTR), but don't call the Python signal handler.
The caller is responsible to call PyErr_CheckSignals() to call Python signal
handlers if signals were received. */
const char*
_Py_DumpTracebackThreads(int fd, PyInterpreterState *interp,
PyThreadState *current_tstate)
{
PyThreadState *tstate;
unsigned int nthreads;
#ifdef WITH_THREAD
if (current_tstate == NULL) {
/* _Py_DumpTracebackThreads() is called from signal handlers by
faulthandler.
SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL are synchronous signals
and are thus delivered to the thread that caused the fault. Get the
Python thread state of the current thread.
PyThreadState_Get() doesn't give the state of the thread that caused
the fault if the thread released the GIL, and so this function
cannot be used. Read the thread local storage (TLS) instead: call
PyGILState_GetThisThreadState(). */
current_tstate = PyGILState_GetThisThreadState();
}
if (interp == NULL) {
if (current_tstate == NULL) {
interp = _PyGILState_GetInterpreterStateUnsafe();
if (interp == NULL) {
/* We need the interpreter state to get Python threads */
return "unable to get the interpreter state";
}
}
else {
interp = current_tstate->interp;
}
}
#else
if (current_tstate == NULL) {
/* Call _PyThreadState_UncheckedGet() instead of PyThreadState_Get()
to not fail with a fatal error if the thread state is NULL. */
current_tstate = _PyThreadState_UncheckedGet();
}
if (interp == NULL) {
if (current_tstate == NULL) {
/* We need the interpreter state to get Python threads */
return "unable to get the interpreter state";
}
interp = current_tstate->interp;
}
#endif
assert(interp != NULL);
/* Get the current interpreter from the current thread */
tstate = PyInterpreterState_ThreadHead(interp);
if (tstate == NULL)
return "unable to get the thread head state";
/* Dump the traceback of each thread */
tstate = PyInterpreterState_ThreadHead(interp);
nthreads = 0;
_Py_BEGIN_SUPPRESS_IPH
do
{
if (nthreads != 0)
PUTS(fd, "\n");
if (nthreads >= MAX_NTHREADS) {
PUTS(fd, "...\n");
break;
}
write_thread_id(fd, tstate, tstate == current_tstate);
dump_traceback(fd, tstate, 0);
tstate = PyThreadState_Next(tstate);
nthreads++;
} while (tstate != NULL);
_Py_END_SUPPRESS_IPH
return NULL;
}

View file

@ -0,0 +1,44 @@
/* This file contains code shared by the compiler and the peephole
optimizer.
*/
#ifdef WORDS_BIGENDIAN
# define PACKOPARG(opcode, oparg) ((_Py_CODEUNIT)(((opcode) << 8) | (oparg)))
#else
# define PACKOPARG(opcode, oparg) ((_Py_CODEUNIT)(((oparg) << 8) | (opcode)))
#endif
/* Minimum number of code units necessary to encode instruction with
EXTENDED_ARGs */
static int
instrsize(unsigned int oparg)
{
return oparg <= 0xff ? 1 :
oparg <= 0xffff ? 2 :
oparg <= 0xffffff ? 3 :
4;
}
/* Spits out op/oparg pair using ilen bytes. codestr should be pointed at the
desired location of the first EXTENDED_ARG */
static void
write_op_arg(_Py_CODEUNIT *codestr, unsigned char opcode,
unsigned int oparg, int ilen)
{
switch (ilen) {
case 4:
*codestr++ = PACKOPARG(EXTENDED_ARG, (oparg >> 24) & 0xff);
/* fall through */
case 3:
*codestr++ = PACKOPARG(EXTENDED_ARG, (oparg >> 16) & 0xff);
/* fall through */
case 2:
*codestr++ = PACKOPARG(EXTENDED_ARG, (oparg >> 8) & 0xff);
/* fall through */
case 1:
*codestr++ = PACKOPARG(opcode, oparg & 0xff);
break;
default:
assert(0);
}
}