cosmopolitan/third_party/python/Tools/unicode/makeunicodedata.py
Justine Tunney 47a53e143b Productionize new APE loader and more
The APE_NO_MODIFY_SELF loader payload has been moved out of the examples
folder and improved so that it works on BSD systems, and permits general
elf program headers. This brings its quality up enough that it should be
acceptable to use by default for many programs, e.g. Python, Lua, SQLite
and Python. It's the responsibility of the user to define an appropriate
TMPDIR if /tmp is considered an adversarial environment. Mac OS shall be
supported by APE_NO_MODIFY_SELF soon.

Fixes and improvements have been made to program_executable_name as it's
now the one true way to get the absolute path of the executing image.

This change fixes a memory leak in linenoise history loading, introduced
by performance optimizations in 51904e2687
This change fixes a longstanding regression with Mach system calls, that
23ae9dfceb back in February which impacted
our sched_yield() implementation, which is why no one noticed until now.

The Blinkenlights PC emulator has been improved. We now fix rendering on
XNU and BSD by not making the assumption that the kernel terminal driver
understands UTF8 since that seems to break its internal modeling of \r\n
which is now being addressed by using \e[𝑦H instead. The paneling is now
more compact in real mode so you won't need to make your font as tiny if
you're only emulating an 8086 program. The CLMUL ISA is now emulated too

This change also makes improvement to time. CLOCK_MONOTONIC now does the
right thing on Windows NT. The nanosecond time module functions added in
Python 3.7 have been backported.

This change doubles the performance of Argon2 password stretching simply
by not using its copy_block and xor_block helper functions, as they were
trivial to inline thus resulting in us needing to iterate over each 1024
byte block four fewer times.

This change makes code size improvements. _PyUnicode_ToNumeric() was 64k
in size and now it's 10k. The CJK codec lookup tables now use lazy delta
zigzag deflate (δzd) encoding which reduces their size from 600k to 200k
plus the code bloat caused by macro abuse in _decimal.c is now addressed
so our fully-loaded statically-linked hermetically-sealed Python virtual
interpreter container is now 9.4 megs in the default build mode and 5.5m
in MODE=tiny which leaves plenty of room for chibicc.

The pydoc web server now accommodates the use case of people who work by
SSH'ing into a different machine w/ python.com -m pydoc -p8080 -h0.0.0.0

Finally Python Capsulae delenda est and won't be supported in the future
2021-10-02 08:27:03 -07:00

1567 lines
58 KiB
Python

#
# (re)generate unicode property and type databases
#
# this script converts a unicode 3.2 database file to
# Modules/unicodedata_db.h, Modules/unicodename_db.h,
# and Objects/unicodetype_db.h
#
# history:
# 2000-09-24 fl created (based on bits and pieces from unidb)
# 2000-09-25 fl merged tim's splitbin fixes, separate decomposition table
# 2000-09-25 fl added character type table
# 2000-09-26 fl added LINEBREAK, DECIMAL, and DIGIT flags/fields (2.0)
# 2000-11-03 fl expand first/last ranges
# 2001-01-19 fl added character name tables (2.1)
# 2001-01-21 fl added decomp compression; dynamic phrasebook threshold
# 2002-09-11 wd use string methods
# 2002-10-18 mvl update to Unicode 3.2
# 2002-10-22 mvl generate NFC tables
# 2002-11-24 mvl expand all ranges, sort names version-independently
# 2002-11-25 mvl add UNIDATA_VERSION
# 2004-05-29 perky add east asian width information
# 2006-03-10 mvl update to Unicode 4.1; add UCD 3.2 delta
# 2008-06-11 gb add PRINTABLE_MASK for Atsuo Ishimoto's ascii() patch
# 2011-10-21 ezio add support for name aliases and named sequences
# 2012-01 benjamin add full case mappings
#
# written by Fredrik Lundh (fredrik@pythonware.com)
#
import os
import sys
import bz2
import zlib
import zipfile
from textwrap import dedent
SCRIPT = sys.argv[0]
VERSION = "3.2"
# The Unicode Database
# --------------------
# When changing UCD version please update
# * Doc/library/stdtypes.rst, and
# * Doc/library/unicodedata.rst
# * Doc/reference/lexical_analysis.rst (two occurrences)
UNIDATA_VERSION = "13.0.0"
UNICODE_DATA = "UnicodeData%s.txt"
COMPOSITION_EXCLUSIONS = "CompositionExclusions%s.txt"
EASTASIAN_WIDTH = "EastAsianWidth%s.txt"
UNIHAN = "Unihan%s.zip"
DERIVED_CORE_PROPERTIES = "DerivedCoreProperties%s.txt"
DERIVEDNORMALIZATION_PROPS = "DerivedNormalizationProps%s.txt"
LINE_BREAK = "LineBreak%s.txt"
NAME_ALIASES = "NameAliases%s.txt"
NAMED_SEQUENCES = "NamedSequences%s.txt"
SPECIAL_CASING = "SpecialCasing%s.txt"
CASE_FOLDING = "CaseFolding%s.txt"
# Private Use Areas -- in planes 1, 15, 16
PUA_1 = range(0xE000, 0xF900)
PUA_15 = range(0xF0000, 0xFFFFE)
PUA_16 = range(0x100000, 0x10FFFE)
# we use this ranges of PUA_15 to store name aliases and named sequences
NAME_ALIASES_START = 0xF0000
NAMED_SEQUENCES_START = 0xF0200
old_versions = ["3.2.0"]
CATEGORY_NAMES = [ "Cn", "Lu", "Ll", "Lt", "Mn", "Mc", "Me", "Nd",
"Nl", "No", "Zs", "Zl", "Zp", "Cc", "Cf", "Cs", "Co", "Cn", "Lm",
"Lo", "Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po", "Sm", "Sc", "Sk",
"So" ]
BIDIRECTIONAL_NAMES = [ "", "L", "LRE", "LRO", "R", "AL", "RLE", "RLO",
"PDF", "EN", "ES", "ET", "AN", "CS", "NSM", "BN", "B", "S", "WS",
"ON", "LRI", "RLI", "FSI", "PDI" ]
EASTASIANWIDTH_NAMES = [ "F", "H", "W", "Na", "A", "N" ]
MANDATORY_LINE_BREAKS = [ "BK", "CR", "LF", "NL" ]
# note: should match definitions in Objects/unicodectype.c
ALPHA_MASK = 0x01
DECIMAL_MASK = 0x02
DIGIT_MASK = 0x04
LOWER_MASK = 0x08
LINEBREAK_MASK = 0x10
SPACE_MASK = 0x20
TITLE_MASK = 0x40
UPPER_MASK = 0x80
XID_START_MASK = 0x100
XID_CONTINUE_MASK = 0x200
PRINTABLE_MASK = 0x400
NUMERIC_MASK = 0x800
CASE_IGNORABLE_MASK = 0x1000
CASED_MASK = 0x2000
EXTENDED_CASE_MASK = 0x4000
# these ranges need to match unicodedata.c:is_unified_ideograph
cjk_ranges = [
('3400', '4DB5'),
('4E00', '9FD5'),
('20000', '2A6D6'),
('2A700', '2B734'),
('2B740', '2B81D'),
('2B820', '2CEA1'),
]
def bias(c):
# if c <= 0xffff:
# return True
# if 0x1f600 <= c <= 0x1f64f:
# return True
return True
def maketables(trace=0):
if not os.path.isdir("third_party/python"):
print("please cd to cosmopolitan root")
sys.exit(1)
print("--- Reading", UNICODE_DATA % "", "...")
version = ""
unicode = UnicodeData(UNIDATA_VERSION, select=bias)
print(len(list(filter(None, unicode.table))), "characters")
for version in old_versions:
print("--- Reading", UNICODE_DATA % ("-"+version), "...")
old_unicode = UnicodeData(version, cjk_check=False, select=bias)
print(len(list(filter(None, old_unicode.table))), "characters")
merge_old_version(version, unicode, old_unicode)
with open("third_party/python/Modules/unicodedata_unidata.h", "w") as hdr:
print("""\
#ifndef COSMOPOLITAN_THIRD_PARTY_PYTHON_MODULES_UNICODEDATA_UNIDATA_H_
#define COSMOPOLITAN_THIRD_PARTY_PYTHON_MODULES_UNICODEDATA_UNIDATA_H_
#include "third_party/python/Modules/unicodedata.h"
COSMOPOLITAN_C_START_
/* GENERATED BY %s %s */""" % (SCRIPT, VERSION), file=hdr)
print('#define UNIDATA_VERSION "%s"' % UNIDATA_VERSION, file=hdr)
makeunicodename(hdr, unicode, trace)
makeunicodedata(hdr, unicode, trace)
makeunicodetype(hdr, unicode, trace)
hdr.write("""\
COSMOPOLITAN_C_END_
#endif /* COSMOPOLITAN_THIRD_PARTY_PYTHON_MODULES_UNICODEDATA_UNIDATA_H_ */
""")
def startfile(fp):
print('#include "libc/nexgen32e/kompressor.h"', file=fp)
print('#include "third_party/python/Modules/unicodedata.h"', file=fp)
print("/* clang-format off */", file=fp)
print("/* GENERATED BY %s %s */" % (SCRIPT, VERSION), file=fp)
print(file=fp)
def makestringarray(name, strings, fp, hdr):
ml = max(len(s) for s in strings)
if ml < 8:
print('extern const char %s[%d][%d];' % (name, len(strings), ml+1), file=hdr)
print("const char %s[%d][%d] = {" % (name, len(strings), ml+1), file=fp)
else:
print('extern const char *const %s[%d];' % (name, len(strings)), file=hdr)
print("const char *const %s[%d] = {" % (name, len(strings)), file=fp)
for s in strings:
print(" \"%s\"," % (s), file=fp)
print("};", file=fp)
# --------------------------------------------------------------------
# unicode character properties
def makeunicodedata(hdr, unicode, trace):
dummy = (0, 0, 0, 0, 0, 0)
table = [dummy]
cache = {0: dummy}
index = [0] * len(unicode.chars)
# 1) database properties
for char in unicode.chars:
record = unicode.table[char]
if record:
# extract database properties
category = CATEGORY_NAMES.index(record[2])
combining = int(record[3])
bidirectional = BIDIRECTIONAL_NAMES.index(record[4])
mirrored = record[9] == "Y"
eastasianwidth = EASTASIANWIDTH_NAMES.index(record[15])
normalizationquickcheck = record[17]
item = (
category, combining, bidirectional, mirrored, eastasianwidth,
normalizationquickcheck
)
# add entry to index and item tables
i = cache.get(item)
if i is None:
cache[item] = i = len(table)
table.append(item)
index[char] = i
# 2) decomposition data
decomp_data = [0]
decomp_prefix = [""]
decomp_index = [0] * len(unicode.chars)
decomp_size = 0
comp_pairs = []
comp_first = [None] * len(unicode.chars)
comp_last = [None] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
if record[5]:
decomp = record[5].split()
if len(decomp) > 19:
raise Exception("character %x has a decomposition too large for nfd_nfkd" % char)
# prefix
if decomp[0][0] == "<":
prefix = decomp.pop(0)
else:
prefix = ""
try:
i = decomp_prefix.index(prefix)
except ValueError:
i = len(decomp_prefix)
decomp_prefix.append(prefix)
prefix = i
assert prefix < 256
# content
decomp = [prefix + (len(decomp)<<8)] + [int(s, 16) for s in decomp]
# Collect NFC pairs
if not prefix and len(decomp) == 3 and \
char not in unicode.exclusions and \
unicode.table[decomp[1]][3] == "0":
p, l, r = decomp
comp_first[l] = 1
comp_last[r] = 1
comp_pairs.append((l,r,char))
try:
i = decomp_data.index(decomp)
except ValueError:
i = len(decomp_data)
decomp_data.extend(decomp)
decomp_size = decomp_size + len(decomp) * 2
else:
i = 0
decomp_index[char] = i
f = l = 0
comp_first_ranges = []
comp_last_ranges = []
prev_f = prev_l = None
for i in unicode.chars:
if comp_first[i] is not None:
comp_first[i] = f
f += 1
if prev_f is None:
prev_f = (i,i)
elif prev_f[1]+1 == i:
prev_f = prev_f[0],i
else:
comp_first_ranges.append(prev_f)
prev_f = (i,i)
if comp_last[i] is not None:
comp_last[i] = l
l += 1
if prev_l is None:
prev_l = (i,i)
elif prev_l[1]+1 == i:
prev_l = prev_l[0],i
else:
comp_last_ranges.append(prev_l)
prev_l = (i,i)
comp_first_ranges.append(prev_f)
comp_last_ranges.append(prev_l)
total_first = f
total_last = l
comp_data = [0]*(total_first*total_last)
for f,l,char in comp_pairs:
f = comp_first[f]
l = comp_last[l]
comp_data[f*total_last+l] = char
print(len(table), "unique properties")
print(len(decomp_prefix), "unique decomposition prefixes")
print(len(decomp_data), "unique decomposition entries:", end=' ')
print(decomp_size, "bytes")
print(total_first, "first characters in NFC")
print(total_last, "last characters in NFC")
print(len(comp_pairs), "NFC pairs")
# a list of unique records
with open("third_party/python/Modules/unicodedata_records.c", "w") as fp:
startfile(fp)
print("extern const _PyUnicode_Record _PyUnicode_Records[%d];" % (len(table)), file=hdr)
print("const _PyUnicode_Record _PyUnicode_Records[] = {", file=fp)
for item in table:
print(" {%3d, %3d, %3d, %3d, %3d, %3d}," % item, file=fp)
print("};", file=fp)
print(file=fp)
index1, index2, shift = splitbins(index, trace)
print("#define _PyUnicode_RecordsShift", shift, file=hdr)
Array("_PyUnicode_RecordsIndex1", index1, rle=True).dump(fp, hdr, trace)
Array("_PyUnicode_RecordsIndex2", index2, rle=True).dump(fp, hdr, trace)
print("#define UNIDATA_TOTAL_FIRST", total_first, file=hdr)
print("#define UNIDATA_TOTAL_LAST", total_last, file=hdr)
with open("third_party/python/Modules/unicodedata_nfcfirst.c", "w") as fp:
startfile(fp)
print("extern const _PyUnicode_Reindex _PyUnicode_NfcFirst[%d];" % (len(comp_first_ranges)), file=hdr)
print("const _PyUnicode_Reindex _PyUnicode_NfcFirst[] = {", file=fp)
for start,end in comp_first_ranges:
print(" {%#07x, %3d, %3d}," % (start,end-start,comp_first[start]), file=fp)
print(" {0}", file=fp)
print("};\n", file=fp)
with open("third_party/python/Modules/unicodedata_nfclast.c", "w") as fp:
startfile(fp)
print("extern const _PyUnicode_Reindex _PyUnicode_NfcLast[%d];" % (len(comp_last_ranges)), file=hdr)
print("const _PyUnicode_Reindex _PyUnicode_NfcLast[] = {", file=fp)
for start,end in comp_last_ranges:
print(" {%#07x, %3d, %3d}," % (start,end-start,comp_last[start]), file=fp)
print(" {0}", file=fp)
print("};\n", file=fp)
with open("third_party/python/Modules/unicodedata_categorynames.c", "w") as fp:
startfile(fp)
makestringarray("_PyUnicode_CategoryNames", CATEGORY_NAMES, fp, hdr)
with open("third_party/python/Modules/unicodedata_bidirectionalnames.c", "w") as fp:
startfile(fp)
makestringarray("_PyUnicode_BidirectionalNames", BIDIRECTIONAL_NAMES, fp, hdr)
with open("third_party/python/Modules/unicodedata_eastasianwidthnames.c", "w") as fp:
startfile(fp)
makestringarray("_PyUnicode_EastAsianWidthNames", EASTASIANWIDTH_NAMES, fp, hdr)
with open("third_party/python/Modules/unicodedata_decompprefix.c", "w") as fp:
startfile(fp)
makestringarray("_PyUnicode_DecompPrefix", decomp_prefix, fp, hdr)
with open("third_party/python/Modules/unicodedata_decomp.c", "w") as fp:
startfile(fp)
index1, index2, shift = splitbins(decomp_index, trace)
print("#define _PyUnicode_DecompShift", shift, file=hdr)
Array("_PyUnicode_Decomp", decomp_data, pack=True).dump(fp, hdr, trace)
Array("_PyUnicode_DecompIndex1", index1, rle=True).dump(fp, hdr, trace)
Array("_PyUnicode_DecompIndex2", index2).dump(fp, hdr, trace)
with open("third_party/python/Modules/unicodedata_comp.c", "w") as fp:
startfile(fp)
index, index2, shift = splitbins(comp_data, trace)
print("#define _PyUnicode_CompShift", shift, file=hdr)
Array("_PyUnicode_CompIndex", index, rle=True).dump(fp, hdr, trace)
Array("_PyUnicode_CompData", index2, pack=True).dump(fp, hdr, trace)
# Generate delta tables for old versions [because punycode is pinned to 3.2.0]
for version, table, normalization in unicode.changed:
with open("third_party/python/Modules/unicodedata_%s.c" % (version), "w") as fp:
startfile(fp)
cversion = version.replace(".","_")
records = [table[0]]
cache = {table[0]:0}
index = [0] * len(table)
for i, record in enumerate(table):
try:
index[i] = cache[record]
except KeyError:
index[i] = cache[record] = len(records)
records.append(record)
index1, index2, shift = splitbins(index, trace)
print("const _PyUnicode_ChangeRecord _PyUnicode_ChangeRecords_%s[] = {" % cversion, file=fp)
for record in records:
print("\t{ %s }," % ", ".join(map(str,record)), file=fp)
print("};", file=fp)
print(file=fp)
Array("_PyUnicode_ChangeIndex_%s" % cversion, index1, rle=True).dump(fp, hdr, trace)
Array("_PyUnicode_ChangeData_%s" % cversion, index2, rle=True).dump(fp, hdr, trace)
print("const _PyUnicode_ChangeRecord *_PyUnicode_GetChange_%s(Py_UCS4);" % cversion, file=hdr)
print("const _PyUnicode_ChangeRecord *_PyUnicode_GetChange_%s(Py_UCS4 n)" % cversion, file=fp)
print("{", file=fp)
print(" int i;", file=fp)
print(" if (n >= 0x110000) {", file=fp)
print(" i = 0;", file=fp)
print(" } else {", file=fp)
print(" i = _PyUnicode_ChangeIndex_%s[n>>%d];" % (cversion, shift), file=fp)
print(" i = _PyUnicode_ChangeData_%s[(i<<%d)+(n & %d)];" % (cversion, shift, ((1<<shift)-1)), file=fp)
print(" }", file=fp)
print(" return _PyUnicode_ChangeRecords_%s + i;" % cversion, file=fp)
print("}", file=fp)
print(file=fp)
print("Py_UCS4 _PyUnicode_Normalization_%s(Py_UCS4);" % (cversion), file=hdr)
print("Py_UCS4 _PyUnicode_Normalization_%s(Py_UCS4 n)" % (cversion), file=fp)
print("{", file=fp)
print(" switch(n) {", file=fp)
for k, v in normalization:
print(" case 0x%04x:" % (k), file=fp)
print(" return 0x%s;" % (v), file=fp)
print(" default:", file=fp)
print(" return 0;", file=fp)
print(" }", file=fp)
print("}", file=fp)
def GenerateToNumeric(db, fp):
dubble=[]
normal=[]
astral=[]
for k,v in db:
dubble.append(k)
i = len(dubble) - 1
for c in v:
if c < 0x10000:
normal.append((c, i))
else:
astral.append((c, i))
if len(dubble) < 255:
t = 'uint8_t'
else:
t = 'uint16_t'
print('static const double kNumeric[] = {', file=fp)
for d in dubble:
print(' %s.,' % (d), file=fp)
print('};', file=fp)
print(file=fp)
normal.sort()
print('static const uint32_t kNumericCodes[] = {', file=fp)
for c,i in normal:
print(' 0x%04x,' % (c), file=fp)
print('};', file=fp)
print(file=fp)
print('static const %s kNumericIndices[] = {' % (t), file=fp)
for c,i in normal:
print(' %d,' % (i), file=fp)
print('};', file=fp)
print(file=fp)
astral.sort()
print('static const uint32_t kNumericAstralCodes[] = {', file=fp)
for c,i in astral:
print(' 0x%05x,' % (c), file=fp)
print('};', file=fp)
print(file=fp)
print('static const %s kNumericAstralIndices[] = {' % (t), file=fp)
for c,i in astral:
print(' %d,' % (i), file=fp)
print('};', file=fp)
print("""
/* Returns the numeric value as double for Unicode characters
* having this property, -1.0 otherwise.
*/
double _PyUnicode_ToNumeric(Py_UCS4 c)
{
int l, m, r;
if (c <= 0xFFFF) {
l = 0;
r = sizeof(kNumericCodes) / sizeof(kNumericCodes[0]) - 1;
while (l <= r) {
m = (l + r) >> 1;
if (kNumericCodes[m] < c) {
l = m + 1;
} else if (kNumericCodes[m] > c) {
r = m - 1;
} else {
return kNumeric[kNumericIndices[m]];
}
}
} else {
l = 0;
r = sizeof(kNumericAstralCodes) / sizeof(kNumericAstralCodes[0]) - 1;
while (l <= r) {
m = (l + r) >> 1;
if (kNumericAstralCodes[m] < c) {
l = m + 1;
} else if (kNumericAstralCodes[m] > c) {
r = m - 1;
} else {
return kNumeric[kNumericAstralIndices[m]];
}
}
}
return -1;
}
""", file=fp)
# --------------------------------------------------------------------
# unicode character type tables
def makeunicodetype(hdr, unicode, trace):
# extract unicode types
dummy = (0, 0, 0, 0, 0, 0)
table = [dummy]
cache = {0: dummy}
index = [0] * len(unicode.chars)
numeric = {}
spaces = []
linebreaks = []
extra_casing = []
for char in unicode.chars:
record = unicode.table[char]
if record:
# extract database properties
category = record[2]
bidirectional = record[4]
properties = record[16]
flags = 0
delta = True
if category in ["Lm", "Lt", "Lu", "Ll", "Lo"]:
flags |= ALPHA_MASK
if "Lowercase" in properties:
flags |= LOWER_MASK
if 'Line_Break' in properties or bidirectional == "B":
flags |= LINEBREAK_MASK
linebreaks.append(char)
if category == "Zs" or bidirectional in ("WS", "B", "S"):
flags |= SPACE_MASK
spaces.append(char)
if category == "Lt":
flags |= TITLE_MASK
if "Uppercase" in properties:
flags |= UPPER_MASK
if char == ord(" ") or category[0] not in ("C", "Z"):
flags |= PRINTABLE_MASK
if "XID_Start" in properties:
flags |= XID_START_MASK
if "XID_Continue" in properties:
flags |= XID_CONTINUE_MASK
if "Cased" in properties:
flags |= CASED_MASK
if "Case_Ignorable" in properties:
flags |= CASE_IGNORABLE_MASK
sc = unicode.special_casing.get(char)
cf = unicode.case_folding.get(char, [char])
if record[12]:
upper = int(record[12], 16)
else:
upper = char
if record[13]:
lower = int(record[13], 16)
else:
lower = char
if record[14]:
title = int(record[14], 16)
else:
title = upper
if sc is None and cf != [lower]:
sc = ([lower], [title], [upper])
if sc is None:
if upper == lower == title:
upper = lower = title = 0
else:
upper = upper - char
lower = lower - char
title = title - char
assert (abs(upper) <= 2147483647 and
abs(lower) <= 2147483647 and
abs(title) <= 2147483647)
else:
# This happens either when some character maps to more than one
# character in uppercase, lowercase, or titlecase or the
# casefolded version of the character is different from the
# lowercase. The extra characters are stored in a different
# array.
flags |= EXTENDED_CASE_MASK
lower = len(extra_casing) | (len(sc[0]) << 24)
extra_casing.extend(sc[0])
if cf != sc[0]:
lower |= len(cf) << 20
extra_casing.extend(cf)
upper = len(extra_casing) | (len(sc[2]) << 24)
extra_casing.extend(sc[2])
# Title is probably equal to upper.
if sc[1] == sc[2]:
title = upper
else:
title = len(extra_casing) | (len(sc[1]) << 24)
extra_casing.extend(sc[1])
# decimal digit, integer digit
decimal = 0
if record[6]:
flags |= DECIMAL_MASK
decimal = int(record[6])
digit = 0
if record[7]:
flags |= DIGIT_MASK
digit = int(record[7])
if record[8]:
flags |= NUMERIC_MASK
numeric.setdefault(record[8], []).append(char)
item = (
upper, lower, title, decimal, digit, flags
)
# add entry to index and item tables
i = cache.get(item)
if i is None:
cache[item] = i = len(table)
table.append(item)
index[char] = i
print(len(table), "unique character type entries")
print(sum(map(len, numeric.values())), "numeric code points")
print(len(spaces), "whitespace code points")
print(len(linebreaks), "linebreak code points")
print(len(extra_casing), "extended case array")
with open("third_party/python/Modules/unicodedata_typerecords.c", "w") as fp:
startfile(fp)
print("extern const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[%d];" % (len(table)), file=hdr)
print("const _PyUnicode_TypeRecord _PyUnicode_TypeRecords[%d] = {" % (len(table)), file=fp)
for item in table:
print(" {%3d, %3d, %3d, %3d, %3d, %3d}," % item, file=fp)
print("};", file=fp)
index1, index2, shift = splitbins(index, trace)
print("#define _PyUnicode_TypeRecordsShift", shift, file=hdr)
Array("_PyUnicode_TypeRecordsIndex1", index1, rle=True).dump(fp, hdr, trace)
Array("_PyUnicode_TypeRecordsIndex2", index2, rle=True).dump(fp, hdr, trace)
with open("third_party/python/Modules/unicodedata_extendedcase.c", "w") as fp:
startfile(fp)
type_ = "char16_t"
for c in extra_casing:
if c > 0xffff:
type_ = "Py_UCS4"
break
print("extern const %s _PyUnicode_ExtendedCase[%d];" % (type_, len(extra_casing)), file=hdr)
print("const %s _PyUnicode_ExtendedCase[%d] = {" % (type_, len(extra_casing)), file=fp)
for c in extra_casing:
print(" %d," % c, file=fp)
print("};", file=fp)
with open("third_party/python/Modules/unicodedata_tonumeric.c", "w") as fp:
startfile(fp)
GenerateToNumeric(sorted(numeric.items()), fp)
with open("third_party/python/Modules/unicodedata_iswhitespace.c", "w") as fp:
startfile(fp)
print("/* Returns 1 for Unicode characters having the bidirectional", file=fp)
print(" * type 'WS', 'B' or 'S' or the category 'Zs', 0 otherwise.", file=fp)
print(" */", file=fp)
print('int _PyUnicode_IsWhitespace(Py_UCS4 ch)', file=fp)
print('{', file=fp)
print(' switch (ch) {', file=fp)
for codepoint in sorted(spaces):
print(' case 0x%04X:' % (codepoint,), file=fp)
print(' return 1;', file=fp)
print(' }', file=fp)
print(' return 0;', file=fp)
print('}', file=fp)
with open("third_party/python/Modules/unicodedata_islinebreak.c", "w") as fp:
startfile(fp)
print("/* Returns 1 for Unicode characters having the line break", file=fp)
print(" * property 'BK', 'CR', 'LF' or 'NL' or having bidirectional", file=fp)
print(" * type 'B', 0 otherwise.", file=fp)
print(" */", file=fp)
print('int _PyUnicode_IsLinebreak(Py_UCS4 ch)', file=fp)
print('{', file=fp)
print(' switch (ch) {', file=fp)
for codepoint in sorted(linebreaks):
print(' case 0x%04X:' % (codepoint,), file=fp)
print(' return 1;', file=fp)
print(' }', file=fp)
print(' return 0;', file=fp)
print('}', file=fp)
# --------------------------------------------------------------------
# unicode name database
def makeunicodename(hdr, unicode, trace):
# collect names
names = [None] * len(unicode.chars)
for char in unicode.chars:
record = unicode.table[char]
if record:
name = record[1].strip()
if name and name[0] != "<":
names[char] = name + chr(0)
print(len(list(n for n in names if n is not None)), "distinct names")
# collect unique words from names (note that we differ between
# words inside a sentence, and words ending a sentence. the
# latter includes the trailing null byte.
words = {}
n = b = 0
for char in unicode.chars:
name = names[char]
if name:
w = name.split()
b = b + len(name)
n = n + len(w)
for w in w:
l = words.get(w)
if l:
l.append(None)
else:
words[w] = [len(words)]
print(n, "words in text;", b, "bytes")
wordlist = list(words.items())
# sort on falling frequency, then by name
def word_key(a):
aword, alist = a
return -len(alist), aword
wordlist.sort(key=word_key)
# figure out how many phrasebook escapes we need
escapes = 0
while escapes * 256 < len(wordlist):
escapes = escapes + 1
print(escapes, "escapes")
short = 256 - escapes
assert short > 0
# [jart] is this right?
short = min(short, len(wordlist))
print(short, "short indexes in lexicon")
# statistics
n = 0
for i in range(short):
n = n + len(wordlist[i][1])
print(n, "short indexes in phrasebook")
# pick the most commonly used words, and sort the rest on falling
# length (to maximize overlap)
wordlist, wordtail = wordlist[:short], wordlist[short:]
wordtail.sort(key=lambda a: a[0], reverse=True)
wordlist.extend(wordtail)
# generate lexicon from words
lexicon_offset = [0]
lexicon = ""
words = {}
# build a lexicon string
offset = 0
for w, x in wordlist:
# encoding: bit 7 indicates last character in word (chr(128)
# indicates the last character in an entire string)
ww = w[:-1] + chr(ord(w[-1])+128)
# reuse string tails, when possible
o = lexicon.find(ww)
if o < 0:
o = offset
lexicon = lexicon + ww
offset = offset + len(w)
words[w] = len(lexicon_offset)
lexicon_offset.append(o)
lexicon = list(map(ord, lexicon))
# generate phrasebook from names and lexicon
phrasebook = [0]
phrasebook_offset = [0] * len(unicode.chars)
for char in unicode.chars:
name = names[char]
if name:
w = name.split()
phrasebook_offset[char] = len(phrasebook)
for w in w:
i = words[w]
if i < short:
phrasebook.append(i)
else:
# store as two bytes
phrasebook.append((i>>8) + short)
phrasebook.append(i&255)
assert getsize(phrasebook) == 1
#
# unicode name hash table
# extract names
data = []
for char in unicode.chars:
record = unicode.table[char]
if record:
name = record[1].strip()
if name and name[0] != "<":
data.append((name, char))
# the magic number 47 was chosen to minimize the number of
# collisions on the current data set. if you like, change it
# and see what happens...
codehash = Hash("_PyUnicode_Code", data, 47)
print("#define UNIDATA_NAME_MAXLEN", 256, file=hdr)
with open("third_party/python/Modules/unicodedata_lexicon.c", "w") as fp:
startfile(fp)
Array("_PyUnicode_Lexicon", lexicon).dump(fp, hdr, trace)
Array("_PyUnicode_LexiconOffset", lexicon_offset, pack=True).dump(fp, hdr, trace)
# split decomposition index table
offset1, offset2, shift = splitbins(phrasebook_offset, trace)
print("#define _PyUnicode_PhrasebookShift", shift, file=hdr)
print("#define _PyUnicode_PhrasebookShort", short, file=hdr)
with open("third_party/python/Modules/unicodedata_phrasebook.c", "w") as fp:
startfile(fp)
Array("_PyUnicode_Phrasebook", phrasebook).dump(fp, hdr, trace)
Array("_PyUnicode_PhrasebookOffset1", offset1, rle=True).dump(fp, hdr, trace)
Array("_PyUnicode_PhrasebookOffset2", offset2, pack=True).dump(fp, hdr, trace)
with open("third_party/python/Modules/unicodedata_codehash.c", "w") as fp:
startfile(fp)
codehash.dump(fp, hdr, trace)
print('#define _PyUnicode_AliasesStart %#x' % (NAME_ALIASES_START), file=hdr)
print('#define _PyUnicode_AliasesEnd %#x' % (NAME_ALIASES_START + len(unicode.aliases)), file=hdr)
print('extern const unsigned int _PyUnicode_NameAliases[%d];' % (len(unicode.aliases)), file=hdr)
with open("third_party/python/Modules/unicodedata_aliases.c", "w") as fp:
startfile(fp)
print('const unsigned int _PyUnicode_NameAliases[%d] = {' % (len(unicode.aliases)), file=fp)
for name, codepoint in unicode.aliases:
print(' 0x%04X,' % codepoint, file=fp)
print('};', file=fp)
print('#define _PyUnicode_NamedSequencesStart %#x' % (NAMED_SEQUENCES_START), file=hdr)
print('#define _PyUnicode_NamedSequencesEnd %#x' %
(NAMED_SEQUENCES_START + len(unicode.named_sequences)), file=hdr)
print('extern const _PyUnicode_NamedSequence _PyUnicode_NamedSequences[%d];' % (len(unicode.named_sequences)), file=hdr)
with open("third_party/python/Modules/unicodedata_namedsequences.c", "w") as fp:
startfile(fp)
print('const _PyUnicode_NamedSequence _PyUnicode_NamedSequences[%d] = {' % (len(unicode.named_sequences)), file=fp)
for name, sequence in unicode.named_sequences:
seq_str = ', '.join('0x%04X' % cp for cp in sequence)
print(' {%d, {%s}},' % (len(sequence), seq_str), file=fp)
print('};', file=fp)
def merge_old_version(version, new, old):
# Changes to exclusion file not implemented yet
if old.exclusions != new.exclusions:
raise NotImplementedError("exclusions differ")
# In these change records, 0xFF means "no change"
bidir_changes = [0xFF]*0x110000
category_changes = [0xFF]*0x110000
decimal_changes = [0xFF]*0x110000
mirrored_changes = [0xFF]*0x110000
east_asian_width_changes = [0xFF]*0x110000
# In numeric data, 0 means "no change",
# -1 means "did not have a numeric value
numeric_changes = [0] * 0x110000
# normalization_changes is a list of key-value pairs
normalization_changes = []
for i in range(0x110000):
if new.table[i] is None:
# Characters unassigned in the new version ought to
# be unassigned in the old one
assert old.table[i] is None
continue
# check characters unassigned in the old version
if old.table[i] is None:
# category 0 is "unassigned"
category_changes[i] = 0
continue
# check characters that differ
if old.table[i] != new.table[i]:
for k in range(len(old.table[i])):
if old.table[i][k] != new.table[i][k]:
value = old.table[i][k]
if k == 1 and i in PUA_15:
# the name is not set in the old.table, but in the
# new.table we are using it for aliases and named seq
assert value == ''
elif k == 2:
#print "CATEGORY",hex(i), old.table[i][k], new.table[i][k]
category_changes[i] = CATEGORY_NAMES.index(value)
elif k == 4:
#print "BIDIR",hex(i), old.table[i][k], new.table[i][k]
bidir_changes[i] = BIDIRECTIONAL_NAMES.index(value)
elif k == 5:
#print "DECOMP",hex(i), old.table[i][k], new.table[i][k]
# We assume that all normalization changes are in 1:1 mappings
assert " " not in value
normalization_changes.append((i, value))
elif k == 6:
#print "DECIMAL",hex(i), old.table[i][k], new.table[i][k]
# we only support changes where the old value is a single digit
assert value in "0123456789"
decimal_changes[i] = int(value)
elif k == 8:
# print "NUMERIC",hex(i), `old.table[i][k]`, new.table[i][k]
# Since 0 encodes "no change", the old value is better not 0
if not value:
numeric_changes[i] = -1
else:
numeric_changes[i] = float(value)
assert numeric_changes[i] not in (0, -1)
elif k == 9:
if value == 'Y':
mirrored_changes[i] = '1'
else:
mirrored_changes[i] = '0'
elif k == 11:
# change to ISO comment, ignore
pass
elif k == 12:
# change to simple uppercase mapping; ignore
pass
elif k == 13:
# change to simple lowercase mapping; ignore
pass
elif k == 14:
# change to simple titlecase mapping; ignore
pass
elif k == 15:
# change to east asian width
east_asian_width_changes[i] = EASTASIANWIDTH_NAMES.index(value)
elif k == 16:
# derived property changes; not yet
pass
elif k == 17:
# normalization quickchecks are not performed
# for older versions
pass
else:
class Difference(Exception):pass
raise Difference(hex(i), k, old.table[i], new.table[i])
new.changed.append((version, list(zip(bidir_changes, category_changes,
decimal_changes, mirrored_changes,
east_asian_width_changes,
numeric_changes)),
normalization_changes))
def open_data(template, version):
if not os.path.isdir('o/unicode'):
os.makedirs('o/unicode')
name = template % ('-'+version,)
local = os.path.join('o/unicode', name)
if not os.path.exists(local):
import urllib.request
if version == '3.2.0':
# irregular url structure
url = 'http://www.unicode.org/Public/3.2-Update/' + name
else:
url = ('http://www.unicode.org/Public/%s/ucd/'+template) % (version, '')
print('Downloading %s' % (url))
urllib.request.urlretrieve(url, filename=local)
if local.endswith('.txt'):
return open(local, encoding='utf-8')
else:
# Unihan.zip
return open(local, 'rb')
# --------------------------------------------------------------------
# the following support code is taken from the unidb utilities
# Copyright (c) 1999-2000 by Secret Labs AB
# load a unicode-data file from disk
class UnicodeData:
# Record structure:
# [ID, name, category, combining, bidi, decomp, (6)
# decimal, digit, numeric, bidi-mirrored, Unicode-1-name, (11)
# ISO-comment, uppercase, lowercase, titlecase, ea-width, (16)
# derived-props] (17)
def __init__(self, version,
linebreakprops=False,
expand=1,
cjk_check=True,
select=lambda c: True):
self.changed = []
table = [None] * 0x110000
with open_data(UNICODE_DATA, version) as file:
while 1:
s = file.readline()
if not s:
break
s = s.strip().split(";")
char = int(s[0], 16)
if select(char):
table[char] = s
cjk_ranges_found = []
cjk_ranger = [(a,b) for a,b in cjk_ranges
if select(int(a,16)) and select(int(b,16))]
# expand first-last ranges
if expand:
field = None
for i in range(0, 0x110000):
if not select(i):
continue
s = table[i]
if s:
if s[1][-6:] == "First>":
s[1] = ""
field = s
elif s[1][-5:] == "Last>":
if s[1].startswith("<CJK Ideograph"):
cjk_ranges_found.append((field[0],
s[0]))
s[1] = ""
field = None
elif field:
f2 = field[:]
f2[0] = "%X" % i
table[i] = f2
# if cjk_check and cjk_ranger != cjk_ranges_found:
# raise ValueError("CJK ranges deviate: have %r want %r" %
# (cjk_ranges_found, cjk_ranger))
# public attributes
self.filename = UNICODE_DATA % ''
self.table = table
self.chars = list(range(0x110000)) # unicode 3.2
# check for name aliases and named sequences, see #12753
# aliases and named sequences are not in 3.2.0
if version != '3.2.0':
self.aliases = []
# store aliases in the Private Use Area 15, in range U+F0000..U+F00FF,
# in order to take advantage of the compression and lookup
# algorithms used for the other characters
pua_index = NAME_ALIASES_START
with open_data(NAME_ALIASES, version) as file:
for s in file:
s = s.strip()
if not s or s.startswith('#'):
continue
char, name, abbrev = s.split(';')
char = int(char, 16)
if select(pua_index) and select(char):
self.aliases.append((name, char))
# also store the name in the PUA 1
self.table[pua_index][1] = name
pua_index += 1
assert pua_index - NAME_ALIASES_START == len(self.aliases)
self.named_sequences = []
# store named sequences in the PUA 1, in range U+F0100..,
# in order to take advantage of the compression and lookup
# algorithms used for the other characters.
assert pua_index < NAMED_SEQUENCES_START
pua_index = NAMED_SEQUENCES_START
if select(pua_index):
with open_data(NAMED_SEQUENCES, version) as file:
for s in file:
s = s.strip()
if not s or s.startswith('#'):
continue
name, chars = s.split(';')
chars = tuple(int(char, 16) for char in chars.split())
chars = tuple(c for c in chars if select(c))
# check that the strutcure defined in makeunicodename is OK
assert 2 <= len(chars) <= 4, "change the Py_UCS2 array size"
assert all(c <= 0xFFFF for c in chars), ("use Py_UCS4 in "
"the NamedSequence struct and in unicodedata_lookup")
self.named_sequences.append((name, chars))
# also store these in the PUA 1
self.table[pua_index][1] = name
pua_index += 1
assert pua_index - NAMED_SEQUENCES_START == len(self.named_sequences)
self.exclusions = {}
with open_data(COMPOSITION_EXCLUSIONS, version) as file:
for s in file:
s = s.strip()
if not s:
continue
if s[0] == '#':
continue
char = int(s.split()[0],16)
if select(char):
self.exclusions[char] = 1
widths = [None] * 0x110000
with open_data(EASTASIAN_WIDTH, version) as file:
for s in file:
s = s.strip()
if not s:
continue
if s[0] == '#':
continue
s = s.split()[0].split(';')
if '..' in s[0]:
first, last = [int(c, 16) for c in s[0].split('..')]
chars = list(range(first, last+1))
else:
chars = [int(s[0], 16)]
for char in chars:
if select(char):
widths[char] = s[1]
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(widths[i])
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(set())
with open_data(DERIVED_CORE_PROPERTIES, version) as file:
for s in file:
s = s.split('#', 1)[0].strip()
if not s:
continue
r, p = s.split(";")
r = r.strip()
p = p.strip()
if ".." in r:
first, last = [int(c, 16) for c in r.split('..')]
chars = list(range(first, last+1))
else:
chars = [int(r, 16)]
for char in chars:
if table[char]:
# Some properties (e.g. Default_Ignorable_Code_Point)
# apply to unassigned code points; ignore them
table[char][-1].add(p)
with open_data(LINE_BREAK, version) as file:
for s in file:
s = s.partition('#')[0]
s = [i.strip() for i in s.split(';')]
if len(s) < 2 or s[1] not in MANDATORY_LINE_BREAKS:
continue
if '..' not in s[0]:
first = last = int(s[0], 16)
else:
first, last = [int(c, 16) for c in s[0].split('..')]
for char in range(first, last+1):
if select(char):
table[char][-1].add('Line_Break')
# We only want the quickcheck properties
# Format: NF?_QC; Y(es)/N(o)/M(aybe)
# Yes is the default, hence only N and M occur
# In 3.2.0, the format was different (NF?_NO)
# The parsing will incorrectly determine these as
# "yes", however, unicodedata.c will not perform quickchecks
# for older versions, and no delta records will be created.
quickchecks = [0] * 0x110000
qc_order = 'NFD_QC NFKD_QC NFC_QC NFKC_QC'.split()
with open_data(DERIVEDNORMALIZATION_PROPS, version) as file:
for s in file:
if '#' in s:
s = s[:s.index('#')]
s = [i.strip() for i in s.split(';')]
if len(s) < 2 or s[1] not in qc_order:
continue
quickcheck = 'MN'.index(s[2]) + 1 # Maybe or No
quickcheck_shift = qc_order.index(s[1])*2
quickcheck <<= quickcheck_shift
if '..' not in s[0]:
first = last = int(s[0], 16)
else:
first, last = [int(c, 16) for c in s[0].split('..')]
for char in range(first, last+1):
if select(char):
assert not (quickchecks[char]>>quickcheck_shift)&3
quickchecks[char] |= quickcheck
for i in range(0, 0x110000):
if table[i] is not None:
table[i].append(quickchecks[i])
with open_data(UNIHAN, version) as file:
zip = zipfile.ZipFile(file)
if version == '3.2.0':
data = zip.open('Unihan-3.2.0.txt').read()
else:
data = zip.open('Unihan_NumericValues.txt').read()
for line in data.decode("utf-8").splitlines():
if not line.startswith('U+'):
continue
code, tag, value = line.split(None, 3)[:3]
if tag not in ('kAccountingNumeric', 'kPrimaryNumeric',
'kOtherNumeric'):
continue
value = value.strip().replace(',', '')
i = int(code[2:], 16)
# Patch the numeric field
if table[i] is not None:
table[i][8] = value
sc = self.special_casing = {}
with open_data(SPECIAL_CASING, version) as file:
for s in file:
s = s[:-1].split('#', 1)[0]
if not s:
continue
data = s.split("; ")
if data[4]:
# We ignore all conditionals (since they depend on
# languages) except for one, which is hardcoded. See
# handle_capital_sigma in unicodeobject.c.
continue
c = int(data[0], 16)
if select(c):
lower = [int(char, 16) for char in data[1].split() if select(int(char, 16))]
title = [int(char, 16) for char in data[2].split() if select(int(char, 16))]
upper = [int(char, 16) for char in data[3].split() if select(int(char, 16))]
sc[c] = (lower, title, upper)
cf = self.case_folding = {}
if version != '3.2.0':
with open_data(CASE_FOLDING, version) as file:
for s in file:
s = s[:-1].split('#', 1)[0]
if not s:
continue
data = s.split("; ")
if data[1] in "CF":
c = int(data[0], 16)
if select(c):
cf[c] = [int(char, 16) for char in data[2].split()]
def uselatin1(self):
# restrict character range to ISO Latin 1
self.chars = list(range(256))
# hash table tools
# this is a straight-forward reimplementation of Python's built-in
# dictionary type, using a static data structure, and a custom string
# hash algorithm.
def myhash(s, magic):
h = 0
for c in map(ord, s.upper()):
h = (h * magic) + c
ix = h & 0xff000000
if ix:
h = (h ^ ((ix>>24) & 0xff)) & 0x00ffffff
return h
SIZES = [
(4,3), (8,3), (16,3), (32,5), (64,3), (128,3), (256,29), (512,17),
(1024,9), (2048,5), (4096,83), (8192,27), (16384,43), (32768,3),
(65536,45), (131072,9), (262144,39), (524288,39), (1048576,9),
(2097152,5), (4194304,3), (8388608,33), (16777216,27)
]
class Hash:
def __init__(self, name, data, magic):
# turn a (key, value) list into a static hash table structure
# determine table size
for size, poly in SIZES:
if size > len(data):
poly = size + poly
break
else:
raise AssertionError("ran out of polynomials")
print(size, "slots in hash table")
table = [None] * size
mask = size-1
n = 0
hash = myhash
# initialize hash table
for key, value in data:
h = hash(key, magic)
i = (~h) & mask
v = table[i]
if v is None:
table[i] = value
continue
incr = (h ^ (h >> 3)) & mask;
if not incr:
incr = mask
while 1:
n = n + 1
i = (i + incr) & mask
v = table[i]
if v is None:
table[i] = value
break
incr = incr << 1
if incr > mask:
incr = incr ^ poly
print(n, "collisions")
self.collisions = n
for i in range(len(table)):
if table[i] is None:
table[i] = 0
self.data = Array(name + "Hash", table, pack=True)
self.magic = magic
self.name = name
self.size = size
self.poly = poly
def dump(self, file, hdr, trace):
# write data to file, as a C array
self.data.dump(file, hdr, trace)
hdr.write("#define %sMagic %d\n" % (self.name, self.magic))
hdr.write("#define %sSize %d\n" % (self.name, self.size))
hdr.write("#define %sPoly %d\n" % (self.name, self.poly))
# stuff to deal with arrays of unsigned integers
def pack(data, bits, word=32):
assert 0 < bits < word
bitn = (bits * len(data) + word - 1) // word
bita = 0
for x in reversed(data):
bita <<= bits
bita |= x
for i in range(bitn):
yield bita & ((1 << word) - 1)
bita >>= 32
def spack(data, bits, word=32):
assert 0 < bits < word
bitn = (bits * len(data) + word - 1) // word
bita = 0
sign = 1 << (bits - 1)
mask = sign - 1
for x in reversed(data):
assert -sign <= x < sign, "x=%d bits=%d" % (x, bits)
x = (x & mask) | (sign if x < 0 else 0)
bita <<= bits
bita |= x
for i in range(bitn):
yield bita & ((1 << word) - 1)
bita >>= 32
def bzip(data):
return bz2.compress(data)
def deflate(data):
# z = zlib.compressobj(zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, zlib.Z_RLE)
z = zlib.compressobj(zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS)
b = z.compress(data)
b += z.flush(zlib.Z_FINISH)
return b
def rle(data, maxval):
i = 0
j = 0
for i,x in enumerate(data):
if j == 0:
y = x
j = 1
elif y == x and j < maxval:
j += 1
else:
yield (j, y)
y = x
j = 1
if j:
yield (j, y)
def uleb(a, x):
while True:
b = x & 127
x >>= 7
if x:
a.append(b | 128)
else:
a.append(b)
break
def zig(x):
m = (2 << x.bit_length()) - 1
return ((x & (m >> 1)) << 1) ^ (m if x < 0 else 0)
def zleb(a, x):
return uleb(a, zig(x))
def sleb(a, x):
t = 0
while not t:
b = x & 127
x >>= 7
if (x == 0 and not (b & 64)) or (x == -1 and (b & 64)):
t = 1
else:
b |= 128
a.append(b)
def δleb(data):
i = 0
p = 0
a = bytearray()
for x in data:
sleb(a, x - p)
p = x
return a
def δzd(data):
n = 0;
i = 0
p = 0
a = bytearray()
for x in data:
zleb(a, x - p)
p = x
return deflate(a), len(a)
def com(x):
return '{:,}'.format(x)
class Array:
def __init__(self, name, data, rle=False, pack=False, δzd=False):
self.name = name
self.data = data
self.pack = pack
self.rle = rle # adds 90µs latency to startup
self.δzd = δzd
def to_bytes(self, size, order):
return b''.join(i.to_bytes(size, order) for i in self.data)
def dump(self, file, hdr, trace=0):
# write data to f, as a C array
f = file
bits = max(x.bit_length() for x in self.data)
size = getsize(self.data)
if trace:
print("%s: %d bits" % (self.name, bits))
print("%s: size is %12s bytes" % (self.name, com(size*len(self.data))))
print("%s: packed size is %12s bytes" % (self.name, com((bits*len(self.data)+31)//32*4)))
print("%s: rle size is %12s bytes" % (self.name, com(len(tuple(rle(self.data, (1<<(8*size))-1)))*size*2)))
print("%s: deflate size is %12s bytes" % (self.name, com(len(deflate(self.to_bytes(size, 'little'))))))
print("%s: bz2 size is %12s bytes" % (self.name, com(len(bzip(self.to_bytes(size, 'little'))))))
print("%s: δleb size is %12s bytes" % (self.name, com(len(δleb(self.data)))))
print("%s: δzd size is %12s bytes" % (self.name, com(len(δzd(self.data)[0]))))
if self.pack:
hdr.write("#define %sBits %d\n" % (self.name, bits))
self.data = tuple(pack(self.data, bits))
size = 4
if self.δzd:
m = size
self.data, n = δzd(self.data)
size = 1
if size == 1:
t = "unsigned char"
elif size == 2:
t = "unsigned short"
else:
t = "unsigned int"
hdr.write("extern const %s %s[%d];\n" % (t, self.name, len(self.data)))
if self.rle:
codes = tuple(rle(self.data, (1<<(8*size))-1))
f.write("%s %s[%d];\n" % (t, self.name, len(self.data)))
f.write("static const %s %s_rodata[%d+1][2] = { /* %g%% profit */\n" % (t, self.name, len(codes), len(codes) * size * 2 / float(len(self.data) * size) * 100))
for a,b in codes:
f.write(" {%3d, 0x%02x},\n" % (a, b))
f.write(" {0},\n")
f.write("};\n")
f.write("static textstartup void %s_init(void) {\n" % (self.name));
if size == 1:
f.write(" rldecode2(%s, (void *)%s_rodata);\n" % (self.name, self.name));
else:
f.write(" int i, j, k;\n");
f.write(" for (k = i = 0; i < %d; ++i) {\n" % (len(codes)));
f.write(" for (j = 0; j < %s_rodata[i][0]; ++j) {\n" % (self.name));
f.write(" %s[k++] = %s_rodata[i][1];\n" % (self.name, self.name));
f.write(" }\n");
f.write(" }\n");
f.write("}\n");
f.write("const void *const %s_ctor[] initarray = {\n" % (self.name));
f.write(" %s_init,\n" % (self.name));
f.write("};\n");
f.write("\n");
else:
f.write("const %s %s[%d] = {\n" % (t, self.name, len(self.data)))
if self.data:
s = " "
for item in self.data:
i = str(item) + ", "
if len(s) + len(i) > 78:
f.write(s + "\n")
s = " " + i
else:
s = s + i
if s.strip():
f.write(s + "\n")
f.write("};\n\n")
if self.δzd:
f.write("/* %d %d */\n" % (n, m))
def getsize(data):
# return smallest possible integer size for the given array
maxdata = max(data)
if maxdata < 256:
return 1
elif maxdata < 65536:
return 2
else:
return 4
def splitbins(t, trace=0):
"""t, trace=0 -> (t1, t2, shift). Split a table to save space.
t is a sequence of ints. This function can be useful to save space if
many of the ints are the same. t1 and t2 are lists of ints, and shift
is an int, chosen to minimize the combined size of t1 and t2 (in C
code), and where for each i in range(len(t)),
t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
where mask is a bitmask isolating the last "shift" bits.
If optional arg trace is non-zero (default zero), progress info
is printed to sys.stderr. The higher the value, the more info
you'll get.
"""
if trace:
def dump(t1, t2, shift, bytes):
print("%d+%d bins at shift %d; %d bytes" % (
len(t1), len(t2), shift, bytes))
print("Size of original table:", len(t)*getsize(t), \
"bytes")
n = len(t)-1 # last valid index
maxshift = 0 # the most we can shift n and still have something left
if n > 0:
while n >> 1:
n >>= 1
maxshift += 1
del n
bytes = sys.maxsize # smallest total size so far
t = tuple(t) # so slices can be dict keys
for shift in range(maxshift + 1):
t1 = []
t2 = []
size = 2**shift
bincache = {}
for i in range(0, len(t), size):
bin = t[i:i+size]
index = bincache.get(bin)
if index is None:
index = len(t2)
bincache[bin] = index
t2.extend(bin)
t1.append(index >> shift)
# determine memory size
b = len(t1)*getsize(t1) + len(t2)*getsize(t2)
if trace > 1:
dump(t1, t2, shift, b)
if b < bytes:
best = t1, t2, shift
bytes = b
t1, t2, shift = best
if trace:
print("Best:", end=' ')
dump(t1, t2, shift, bytes)
if __debug__:
# exhaustively verify that the decomposition is correct
mask = ~((~0) << shift) # i.e., low-bit mask of shift bits
for i in range(len(t)):
assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
return best
if __name__ == "__main__":
maketables(1)