path
stringlengths 14
112
| content
stringlengths 0
6.32M
| size
int64 0
6.32M
| max_lines
int64 1
100k
| repo_name
stringclasses 2
values | autogenerated
bool 1
class |
---|---|---|---|---|---|
cosmopolitan/third_party/python/Tools/scripts/win_add2path.py | """Add Python to the search path on Windows
This is a simple script to add Python to the Windows search path. It
modifies the current user (HKCU) tree of the registry.
Copyright (c) 2008 by Christian Heimes <[email protected]>
Licensed to PSF under a Contributor Agreement.
"""
import sys
import site
import os
import winreg
HKCU = winreg.HKEY_CURRENT_USER
ENV = "Environment"
PATH = "PATH"
DEFAULT = "%PATH%"
def modify():
pythonpath = os.path.dirname(os.path.normpath(sys.executable))
scripts = os.path.join(pythonpath, "Scripts")
appdata = os.environ["APPDATA"]
if hasattr(site, "USER_SITE"):
usersite = site.USER_SITE.replace(appdata, "%APPDATA%")
userpath = os.path.dirname(usersite)
userscripts = os.path.join(userpath, "Scripts")
else:
userscripts = None
with winreg.CreateKey(HKCU, ENV) as key:
try:
envpath = winreg.QueryValueEx(key, PATH)[0]
except OSError:
envpath = DEFAULT
paths = [envpath]
for path in (pythonpath, scripts, userscripts):
if path and path not in envpath and os.path.isdir(path):
paths.append(path)
envpath = os.pathsep.join(paths)
winreg.SetValueEx(key, PATH, 0, winreg.REG_EXPAND_SZ, envpath)
return paths, envpath
def main():
paths, envpath = modify()
if len(paths) > 1:
print("Path(s) added:")
print('\n'.join(paths[1:]))
else:
print("No path was added")
print("\nPATH is now:\n%s\n" % envpath)
print("Expanded:")
print(winreg.ExpandEnvironmentStrings(envpath))
if __name__ == '__main__':
main()
| 1,658 | 59 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/findlinksto.py | #! /usr/bin/env python3
# findlinksto
#
# find symbolic links to a path matching a regular expression
import os
import sys
import re
import getopt
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], '')
if len(args) < 2:
raise getopt.GetoptError('not enough arguments', None)
except getopt.GetoptError as msg:
sys.stdout = sys.stderr
print(msg)
print('usage: findlinksto pattern directory ...')
sys.exit(2)
pat, dirs = args[0], args[1:]
prog = re.compile(pat)
for dirname in dirs:
os.walk(dirname, visit, prog)
def visit(prog, dirname, names):
if os.path.islink(dirname):
names[:] = []
return
if os.path.ismount(dirname):
print('descend into', dirname)
for name in names:
name = os.path.join(dirname, name)
try:
linkto = os.readlink(name)
if prog.search(linkto) is not None:
print(name, '->', linkto)
except OSError:
pass
if __name__ == '__main__':
main()
| 1,071 | 44 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/reindent-rst.py | #!/usr/bin/env python3
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
import sys
import patchcheck
def main(argv=sys.argv):
patchcheck.normalize_docs_whitespace(argv[1:])
if __name__ == '__main__':
sys.exit(main())
| 279 | 15 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/byteyears.py | #! /usr/bin/env python3
# Print the product of age and size of each file, in suitable units.
#
# Usage: byteyears [ -a | -m | -c ] file ...
#
# Options -[amc] select atime, mtime (default) or ctime as age.
import sys, os, time
from stat import *
def main():
# Use lstat() to stat files if it exists, else stat()
try:
statfunc = os.lstat
except AttributeError:
statfunc = os.stat
# Parse options
if sys.argv[1] == '-m':
itime = ST_MTIME
del sys.argv[1]
elif sys.argv[1] == '-c':
itime = ST_CTIME
del sys.argv[1]
elif sys.argv[1] == '-a':
itime = ST_CTIME
del sys.argv[1]
else:
itime = ST_MTIME
secs_per_year = 365.0 * 24.0 * 3600.0 # Scale factor
now = time.time() # Current time, for age computations
status = 0 # Exit status, set to 1 on errors
# Compute max file name length
maxlen = 1
for filename in sys.argv[1:]:
maxlen = max(maxlen, len(filename))
# Process each argument in turn
for filename in sys.argv[1:]:
try:
st = statfunc(filename)
except OSError as msg:
sys.stderr.write("can't stat %r: %r\n" % (filename, msg))
status = 1
st = ()
if st:
anytime = st[itime]
size = st[ST_SIZE]
age = now - anytime
byteyears = float(size) * float(age) / secs_per_year
print(filename.ljust(maxlen), end=' ')
print(repr(int(byteyears)).rjust(8))
sys.exit(status)
if __name__ == '__main__':
main()
| 1,650 | 62 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/lfcr.py | #! /usr/bin/env python3
"Replace LF with CRLF in argument files. Print names of changed files."
import sys, re, os
def main():
for filename in sys.argv[1:]:
if os.path.isdir(filename):
print(filename, "Directory!")
continue
with open(filename, "rb") as f:
data = f.read()
if b'\0' in data:
print(filename, "Binary!")
continue
newdata = re.sub(b"\r?\n", b"\r\n", data)
if newdata != data:
print(filename)
with open(filename, "wb") as f:
f.write(newdata)
if __name__ == '__main__':
main()
| 640 | 25 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/pathfix.py | #!/usr/bin/env python3
# Change the #! line occurring in Python scripts. The new interpreter
# pathname must be given with a -i option.
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments).
# The original file is kept as a back-up (with a "~" attached to its name),
# -n flag can be used to disable this.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixfile() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
import getopt
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
new_interpreter = None
preserve_timestamps = False
create_backup = True
def main():
global new_interpreter
global preserve_timestamps
global create_backup
usage = ('usage: %s -i /interpreter -p -n file-or-directory ...\n' %
sys.argv[0])
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:pn')
except getopt.error as msg:
err(str(msg) + '\n')
err(usage)
sys.exit(2)
for o, a in opts:
if o == '-i':
new_interpreter = a.encode()
if o == '-p':
preserve_timestamps = True
if o == '-n':
create_backup = False
if not new_interpreter or not new_interpreter.startswith(b'/') or \
not args:
err('-i option or file-or-directory missing\n')
err(usage)
sys.exit(2)
bad = 0
for arg in args:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile(r'^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return bool(ispythonprog.match(name))
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except OSError as msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (filename,))
try:
f = open(filename, 'rb')
except IOError as msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
line = f.readline()
fixed = fixline(line)
if line == fixed:
rep(filename+': no change\n')
f.close()
return
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
try:
g = open(tempname, 'wb')
except IOError as msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
rep(filename + ': updating\n')
g.write(fixed)
BUFSIZE = 8*1024
while 1:
buf = f.read(BUFSIZE)
if not buf: break
g.write(buf)
g.close()
f.close()
# Finishing touch -- move files
mtime = None
atime = None
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
mtime = statbuf.st_mtime
atime = statbuf.st_atime
os.chmod(tempname, statbuf[ST_MODE] & 0o7777)
except OSError as msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
if create_backup:
try:
os.rename(filename, filename + '~')
except OSError as msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
else:
try:
os.remove(filename)
except OSError as msg:
err('%s: warning: removing failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except OSError as msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
if preserve_timestamps:
if atime and mtime:
try:
os.utime(filename, (atime, mtime))
except OSError as msg:
err('%s: reset of timestamp failed (%r)\n' % (filename, msg))
return 1
# Return success
return 0
def fixline(line):
if not line.startswith(b'#!'):
return line
if b"python" not in line:
return line
return b'#! ' + new_interpreter + b'\n'
if __name__ == '__main__':
main()
| 5,207 | 178 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/ndiff.py | #! /usr/bin/env python3
# Module ndiff version 1.7.0
# Released to the public domain 08-Dec-2000,
# by Tim Peters ([email protected]).
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
# ndiff.py is now simply a front-end to the difflib.ndiff() function.
# Originally, it contained the difflib.SequenceMatcher class as well.
# This completes the raiding of reusable code from this formerly
# self-contained script.
"""ndiff [-q] file1 file2
or
ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
Print a human-friendly file difference report to stdout. Both inter-
and intra-line differences are noted. In the second form, recreate file1
(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
In the first form, if -q ("quiet") is not specified, the first two lines
of output are
-: file1
+: file2
Each remaining line begins with a two-letter code:
"- " line unique to file1
"+ " line unique to file2
" " line common to both files
"? " line not present in either input file
Lines beginning with "? " attempt to guide the eye to intraline
differences, and were not present in either input file. These lines can be
confusing if the source files contain tab characters.
The first file can be recovered by retaining only lines that begin with
" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
The second file can be recovered similarly, but by retaining only " " and
"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
recovered by piping the output through
sed -n '/^[+ ] /s/^..//p'
"""
__version__ = 1, 7, 0
import difflib, sys
def fail(msg):
out = sys.stderr.write
out(msg + "\n\n")
out(__doc__)
return 0
# open a file & return the file object; gripe and return 0 if it
# couldn't be opened
def fopen(fname):
try:
return open(fname)
except IOError as detail:
return fail("couldn't open " + fname + ": " + str(detail))
# open two files & spray the diff to stdout; return false iff a problem
def fcompare(f1name, f2name):
f1 = fopen(f1name)
f2 = fopen(f2name)
if not f1 or not f2:
return 0
a = f1.readlines(); f1.close()
b = f2.readlines(); f2.close()
for line in difflib.ndiff(a, b):
print(line, end=' ')
return 1
# crack args (sys.argv[1:] is normal) & compare;
# return false iff a problem
def main(args):
import getopt
try:
opts, args = getopt.getopt(args, "qr:")
except getopt.error as detail:
return fail(str(detail))
noisy = 1
qseen = rseen = 0
for opt, val in opts:
if opt == "-q":
qseen = 1
noisy = 0
elif opt == "-r":
rseen = 1
whichfile = val
if qseen and rseen:
return fail("can't specify both -q and -r")
if rseen:
if args:
return fail("no args allowed with -r option")
if whichfile in ("1", "2"):
restore(whichfile)
return 1
return fail("-r value must be 1 or 2")
if len(args) != 2:
return fail("need 2 filename args")
f1name, f2name = args
if noisy:
print('-:', f1name)
print('+:', f2name)
return fcompare(f1name, f2name)
# read ndiff output from stdin, and print file1 (which=='1') or
# file2 (which=='2') to stdout
def restore(which):
restored = difflib.restore(sys.stdin.readlines(), which)
sys.stdout.writelines(restored)
if __name__ == '__main__':
args = sys.argv[1:]
if "-profile" in args:
import profile, pstats
args.remove("-profile")
statf = "ndiff.pro"
profile.run("main(args)", statf)
stats = pstats.Stats(statf)
stats.strip_dirs().sort_stats('time').print_stats()
else:
main(args)
| 3,820 | 134 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/ptags.py | #! /usr/bin/env python3
# ptags
#
# Create a tags file for Python programs, usable with vi.
# Tagged are:
# - functions (even inside other defs or classes)
# - classes
# - filenames
# Warns about files it cannot open.
# No warnings about duplicate tags.
import sys, re, os
tags = [] # Modified global variable!
def main():
args = sys.argv[1:]
for filename in args:
treat_file(filename)
if tags:
fp = open('tags', 'w')
tags.sort()
for s in tags: fp.write(s)
expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]'
matcher = re.compile(expr)
def treat_file(filename):
try:
fp = open(filename, 'r')
except:
sys.stderr.write('Cannot open %s\n' % filename)
return
base = os.path.basename(filename)
if base[-3:] == '.py':
base = base[:-3]
s = base + '\t' + filename + '\t' + '1\n'
tags.append(s)
while 1:
line = fp.readline()
if not line:
break
m = matcher.match(line)
if m:
content = m.group(0)
name = m.group(2)
s = name + '\t' + filename + '\t/^' + content + '/\n'
tags.append(s)
if __name__ == '__main__':
main()
| 1,227 | 54 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/nm2def.py | #! /usr/bin/env python3
"""nm2def.py
Helpers to extract symbols from Unix libs and auto-generate
Windows definition files from them. Depends on nm(1). Tested
on Linux and Solaris only (-p option to nm is for Solaris only).
By Marc-Andre Lemburg, Aug 1998.
Additional notes: the output of nm is supposed to look like this:
acceler.o:
000001fd T PyGrammar_AddAccelerators
U PyGrammar_FindDFA
00000237 T PyGrammar_RemoveAccelerators
U _IO_stderr_
U exit
U fprintf
U free
U malloc
U printf
grammar1.o:
00000000 T PyGrammar_FindDFA
00000034 T PyGrammar_LabelRepr
U _PyParser_TokenNames
U abort
U printf
U sprintf
...
Even if this isn't the default output of your nm, there is generally an
option to produce this format (since it is the original v7 Unix format).
"""
import os, sys
PYTHONLIB = 'libpython%d.%d.a' % sys.version_info[:2]
PC_PYTHONLIB = 'Python%d%d.dll' % sys.version_info[:2]
NM = 'nm -p -g %s' # For Linux, use "nm -g %s"
def symbols(lib=PYTHONLIB,types=('T','C','D')):
lines = os.popen(NM % lib).readlines()
lines = [s.strip() for s in lines]
symbols = {}
for line in lines:
if len(line) == 0 or ':' in line:
continue
items = line.split()
if len(items) != 3:
continue
address, type, name = items
if type not in types:
continue
symbols[name] = address,type
return symbols
def export_list(symbols):
data = []
code = []
for name,(addr,type) in symbols.items():
if type in ('C','D'):
data.append('\t'+name)
else:
code.append('\t'+name)
data.sort()
data.append('')
code.sort()
return ' DATA\n'.join(data)+'\n'+'\n'.join(code)
# Definition file template
DEF_TEMPLATE = """\
EXPORTS
%s
"""
# Special symbols that have to be included even though they don't
# pass the filter
SPECIALS = (
)
def filter_Python(symbols,specials=SPECIALS):
for name in list(symbols.keys()):
if name[:2] == 'Py' or name[:3] == '_Py':
pass
elif name not in specials:
del symbols[name]
def main():
s = symbols(PYTHONLIB)
filter_Python(s)
exports = export_list(s)
f = sys.stdout # open('PC/python_nt.def','w')
f.write(DEF_TEMPLATE % (exports))
f.close()
if __name__ == '__main__':
main()
| 2,454 | 104 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/svneol.py | #! /usr/bin/env python3
r"""
SVN helper script.
Try to set the svn:eol-style property to "native" on every .py, .txt, .c and
.h file in the directory tree rooted at the current directory.
Files with the svn:eol-style property already set (to anything) are skipped.
svn will itself refuse to set this property on a file that's not under SVN
control, or that has a binary mime-type property set. This script inherits
that behavior, and passes on whatever warning message the failing "svn
propset" command produces.
In the Python project, it's safe to invoke this script from the root of
a checkout.
No output is produced for files that are ignored. For a file that gets
svn:eol-style set, output looks like:
property 'svn:eol-style' set on 'Lib\ctypes\__init__.py'
For a file not under version control:
svn: warning: 'patch-finalizer.txt' is not under version control
and for a file with a binary mime-type property:
svn: File 'Lib\test\test_pep263.py' has binary mime type property
"""
import re
import os
import sys
import subprocess
def propfiles(root, fn):
default = os.path.join(root, ".svn", "props", fn + ".svn-work")
try:
format = int(open(os.path.join(root, ".svn", "format")).read().strip())
except IOError:
return []
if format in (8, 9):
# In version 8 and 9, committed props are stored in prop-base, local
# modifications in props
return [os.path.join(root, ".svn", "prop-base", fn + ".svn-base"),
os.path.join(root, ".svn", "props", fn + ".svn-work")]
raise ValueError("Unknown repository format")
def proplist(root, fn):
"""Return a list of property names for file fn in directory root."""
result = []
for path in propfiles(root, fn):
try:
f = open(path)
except IOError:
# no properties file: not under version control,
# or no properties set
continue
while True:
# key-value pairs, of the form
# K <length>
# <keyname>NL
# V length
# <value>NL
# END
line = f.readline()
if line.startswith("END"):
break
assert line.startswith("K ")
L = int(line.split()[1])
key = f.read(L)
result.append(key)
f.readline()
line = f.readline()
assert line.startswith("V ")
L = int(line.split()[1])
value = f.read(L)
f.readline()
f.close()
return result
def set_eol_native(path):
cmd = 'svn propset svn:eol-style native "{}"'.format(path)
propset = subprocess.Popen(cmd, shell=True)
propset.wait()
possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search
def main():
for arg in sys.argv[1:] or [os.curdir]:
if os.path.isfile(arg):
root, fn = os.path.split(arg)
if 'svn:eol-style' not in proplist(root, fn):
set_eol_native(arg)
elif os.path.isdir(arg):
for root, dirs, files in os.walk(arg):
if '.svn' in dirs:
dirs.remove('.svn')
for fn in files:
if possible_text_file(fn):
if 'svn:eol-style' not in proplist(root, fn):
path = os.path.join(root, fn)
set_eol_native(path)
if __name__ == '__main__':
main()
| 3,494 | 115 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/make_ctype.py | #!/usr/bin/env python3
"""Script that generates the ctype.h-replacement in stringobject.c."""
NAMES = ("LOWER", "UPPER", "ALPHA", "DIGIT", "XDIGIT", "ALNUM", "SPACE")
print("""
#define FLAG_LOWER 0x01
#define FLAG_UPPER 0x02
#define FLAG_ALPHA (FLAG_LOWER|FLAG_UPPER)
#define FLAG_DIGIT 0x04
#define FLAG_ALNUM (FLAG_ALPHA|FLAG_DIGIT)
#define FLAG_SPACE 0x08
#define FLAG_XDIGIT 0x10
static unsigned int ctype_table[256] = {""")
for i in range(128):
c = chr(i)
flags = []
for name in NAMES:
if name in ("ALPHA", "ALNUM"):
continue
if name == "XDIGIT":
method = lambda: c.isdigit() or c.upper() in "ABCDEF"
else:
method = getattr(c, "is" + name.lower())
if method():
flags.append("FLAG_" + name)
rc = repr(c)
if c == '\v':
rc = "'\\v'"
elif c == '\f':
rc = "'\\f'"
if not flags:
print(" 0, /* 0x%x %s */" % (i, rc))
else:
print(" %s, /* 0x%x %s */" % ("|".join(flags), i, rc))
for i in range(128, 256, 16):
print(" %s," % ", ".join(16*["0"]))
print("};")
print("")
for name in NAMES:
print("#define IS%s(c) (ctype_table[Py_CHARMASK(c)] & FLAG_%s)" %
(name, name))
print("")
for name in NAMES:
name = "is" + name.lower()
print("#undef %s" % name)
print("#define %s(c) undefined_%s(c)" % (name, name))
print("""
static unsigned char ctype_tolower[256] = {""")
for i in range(0, 256, 8):
values = []
for i in range(i, i+8):
if i < 128:
c = chr(i)
if c.isupper():
i = ord(c.lower())
values.append("0x%02x" % i)
print(" %s," % ", ".join(values))
print("};")
print("""
static unsigned char ctype_toupper[256] = {""")
for i in range(0, 256, 8):
values = []
for i in range(i, i+8):
if i < 128:
c = chr(i)
if c.islower():
i = ord(c.upper())
values.append("0x%02x" % i)
print(" %s," % ", ".join(values))
print("};")
print("""
#define TOLOWER(c) (ctype_tolower[Py_CHARMASK(c)])
#define TOUPPER(c) (ctype_toupper[Py_CHARMASK(c)])
#undef tolower
#define tolower(c) undefined_tolower(c)
#undef toupper
#define toupper(c) undefined_toupper(c)
""")
| 2,280 | 95 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/texi2html.py | #! /usr/bin/env python3
# Convert GNU texinfo files into HTML, one file per node.
# Based on Texinfo 2.14.
# Usage: texi2html [-d] [-d] [-c] inputfile outputdirectory
# The input file must be a complete texinfo file, e.g. emacs.texi.
# This creates many files (one per info node) in the output directory,
# overwriting existing files of the same name. All files created have
# ".html" as their extension.
# XXX To do:
# - handle @comment*** correctly
# - handle @xref {some words} correctly
# - handle @ftable correctly (items aren't indexed?)
# - handle @itemx properly
# - handle @exdent properly
# - add links directly to the proper line from indices
# - check against the definitive list of @-cmds; we still miss (among others):
# - @defindex (hard)
# - @c(omment) in the middle of a line (rarely used)
# - @this* (not really needed, only used in headers anyway)
# - @today{} (ever used outside title page?)
# More consistent handling of chapters/sections/etc.
# Lots of documentation
# Many more options:
# -top designate top node
# -links customize which types of links are included
# -split split at chapters or sections instead of nodes
# -name Allow different types of filename handling. Non unix systems
# will have problems with long node names
# ...
# Support the most recent texinfo version and take a good look at HTML 3.0
# More debugging output (customizable) and more flexible error handling
# How about icons ?
# rpyron 2002-05-07
# Robert Pyron <[email protected]>
# 1. BUGFIX: In function makefile(), strip blanks from the nodename.
# This is necessary to match the behavior of parser.makeref() and
# parser.do_node().
# 2. BUGFIX fixed KeyError in end_ifset (well, I may have just made
# it go away, rather than fix it)
# 3. BUGFIX allow @menu and menu items inside @ifset or @ifclear
# 4. Support added for:
# @uref URL reference
# @image image file reference (see note below)
# @multitable output an HTML table
# @vtable
# 5. Partial support for accents, to match MAKEINFO output
# 6. I added a new command-line option, '-H basename', to specify
# HTML Help output. This will cause three files to be created
# in the current directory:
# `basename`.hhp HTML Help Workshop project file
# `basename`.hhc Contents file for the project
# `basename`.hhk Index file for the project
# When fed into HTML Help Workshop, the resulting file will be
# named `basename`.chm.
# 7. A new class, HTMLHelp, to accomplish item 6.
# 8. Various calls to HTMLHelp functions.
# A NOTE ON IMAGES: Just as 'outputdirectory' must exist before
# running this program, all referenced images must already exist
# in outputdirectory.
import os
import sys
import string
import re
MAGIC = '\\input texinfo'
cmprog = re.compile('^@([a-z]+)([ \t]|$)') # Command (line-oriented)
blprog = re.compile('^[ \t]*$') # Blank line
kwprog = re.compile('@[a-z]+') # Keyword (embedded, usually
# with {} args)
spprog = re.compile('[\n@{}&<>]') # Special characters in
# running text
#
# menu item (Yuck!)
miprog = re.compile(r'^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
# 0 1 1 2 3 34 42 0
# ----- ---------- ---------
# -|-----------------------------
# -----------------------------------------------------
class HTMLNode:
"""Some of the parser's functionality is separated into this class.
A Node accumulates its contents, takes care of links to other Nodes
and saves itself when it is finished and all links are resolved.
"""
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">'
type = 0
cont = ''
epilogue = '</BODY></HTML>\n'
def __init__(self, dir, name, topname, title, next, prev, up):
self.dirname = dir
self.name = name
if topname:
self.topname = topname
else:
self.topname = name
self.title = title
self.next = next
self.prev = prev
self.up = up
self.lines = []
def write(self, *lines):
for line in lines:
self.lines.append(line)
def flush(self):
fp = open(self.dirname + '/' + makefile(self.name), 'w')
fp.write(self.prologue)
fp.write(self.text)
fp.write(self.epilogue)
fp.close()
def link(self, label, nodename, rel=None, rev=None):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
title = ''
else:
addr = makefile(nodename)
title = ' TITLE="%s"' % nodename
self.write(label, ': <A HREF="', addr, '"', \
rel and (' REL=' + rel) or "", \
rev and (' REV=' + rev) or "", \
title, '>', nodename, '</A> \n')
def finalize(self):
length = len(self.lines)
self.text = ''.join(self.lines)
self.lines = []
self.open_links()
self.output_links()
self.close_links()
links = ''.join(self.lines)
self.lines = []
self.prologue = (
self.DOCTYPE +
'\n<HTML><HEAD>\n'
' <!-- Converted with texi2html and Python -->\n'
' <TITLE>' + self.title + '</TITLE>\n'
' <LINK REL=Next HREF="'
+ makefile(self.next) + '" TITLE="' + self.next + '">\n'
' <LINK REL=Previous HREF="'
+ makefile(self.prev) + '" TITLE="' + self.prev + '">\n'
' <LINK REL=Up HREF="'
+ makefile(self.up) + '" TITLE="' + self.up + '">\n'
'</HEAD><BODY>\n' +
links)
if length > 20:
self.epilogue = '<P>\n%s</BODY></HTML>\n' % links
def open_links(self):
self.write('<HR>\n')
def close_links(self):
self.write('<HR>\n')
def output_links(self):
if self.cont != self.next:
self.link(' Cont', self.cont)
self.link(' Next', self.next, rel='Next')
self.link(' Prev', self.prev, rel='Previous')
self.link(' Up', self.up, rel='Up')
if self.name != self.topname:
self.link(' Top', self.topname)
class HTML3Node(HTMLNode):
DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML Level 3//EN//3.0">'
def open_links(self):
self.write('<DIV CLASS=Navigation>\n <HR>\n')
def close_links(self):
self.write(' <HR>\n</DIV>\n')
class TexinfoParser:
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "(%(id)s)"
FN_SOURCE_PATTERN = '<A NAME=footnoteref%(id)s' \
' HREF="#footnotetext%(id)s">' \
+ FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<A NAME=footnotetext%(id)s' \
' HREF="#footnoteref%(id)s">' \
+ FN_ID_PATTERN + '</A>\n%(text)s<P>\n'
FN_HEADER = '\n<P>\n<HR NOSHADE SIZE=1 WIDTH=200>\n' \
'<STRONG><EM>Footnotes</EM></STRONG>\n<P>'
Node = HTMLNode
# Initialize an instance
def __init__(self):
self.unknown = {} # statistics about unknown @-commands
self.filenames = {} # Check for identical filenames
self.debugging = 0 # larger values produce more output
self.print_headers = 0 # always print headers?
self.nodefp = None # open file we're writing to
self.nodelineno = 0 # Linenumber relative to node
self.links = None # Links from current node
self.savetext = None # If not None, save text head instead
self.savestack = [] # If not None, save text head instead
self.htmlhelp = None # html help data
self.dirname = 'tmp' # directory where files are created
self.includedir = '.' # directory to search @include files
self.nodename = '' # name of current node
self.topname = '' # name of top node (first node seen)
self.title = '' # title of this whole Texinfo tree
self.resetindex() # Reset all indices
self.contents = [] # Reset table of contents
self.numbering = [] # Reset section numbering counters
self.nofill = 0 # Normal operation: fill paragraphs
self.values={'html': 1} # Names that should be parsed in ifset
self.stackinfo={} # Keep track of state in the stack
# XXX The following should be reset per node?!
self.footnotes = [] # Reset list of footnotes
self.itemarg = None # Reset command used by @item
self.itemnumber = None # Reset number for @item in @enumerate
self.itemindex = None # Reset item index name
self.node = None
self.nodestack = []
self.cont = 0
self.includedepth = 0
# Set htmlhelp helper class
def sethtmlhelp(self, htmlhelp):
self.htmlhelp = htmlhelp
# Set (output) directory name
def setdirname(self, dirname):
self.dirname = dirname
# Set include directory name
def setincludedir(self, includedir):
self.includedir = includedir
# Parse the contents of an entire file
def parse(self, fp):
line = fp.readline()
lineno = 1
while line and (line[0] == '%' or blprog.match(line)):
line = fp.readline()
lineno = lineno + 1
if line[:len(MAGIC)] != MAGIC:
raise SyntaxError('file does not begin with %r' % (MAGIC,))
self.parserest(fp, lineno)
# Parse the contents of a file, not expecting a MAGIC header
def parserest(self, fp, initial_lineno):
lineno = initial_lineno
self.done = 0
self.skip = 0
self.stack = []
accu = []
while not self.done:
line = fp.readline()
self.nodelineno = self.nodelineno + 1
if not line:
if accu:
if not self.skip: self.process(accu)
accu = []
if initial_lineno > 0:
print('*** EOF before @bye')
break
lineno = lineno + 1
mo = cmprog.match(line)
if mo:
a, b = mo.span(1)
cmd = line[a:b]
if cmd in ('noindent', 'refill'):
accu.append(line)
else:
if accu:
if not self.skip:
self.process(accu)
accu = []
self.command(line, mo)
elif blprog.match(line) and \
'format' not in self.stack and \
'example' not in self.stack:
if accu:
if not self.skip:
self.process(accu)
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
accu = []
else:
# Append the line including trailing \n!
accu.append(line)
#
if self.skip:
print('*** Still skipping at the end')
if self.stack:
print('*** Stack not empty at the end')
print('***', self.stack)
if self.includedepth == 0:
while self.nodestack:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
# Start saving text in a buffer instead of writing it to a file
def startsaving(self):
if self.savetext is not None:
self.savestack.append(self.savetext)
# print '*** Recursively saving text, expect trouble'
self.savetext = ''
# Return the text saved so far and start writing to file again
def collectsavings(self):
savetext = self.savetext
if len(self.savestack) > 0:
self.savetext = self.savestack[-1]
del self.savestack[-1]
else:
self.savetext = None
return savetext or ''
# Write text to file, or save it in a buffer, or ignore it
def write(self, *args):
try:
text = ''.join(args)
except:
print(args)
raise TypeError
if self.savetext is not None:
self.savetext = self.savetext + text
elif self.nodefp:
self.nodefp.write(text)
elif self.node:
self.node.write(text)
# Complete the current node -- write footnotes and close file
def endnode(self):
if self.savetext is not None:
print('*** Still saving text at end of node')
dummy = self.collectsavings()
if self.footnotes:
self.writefootnotes()
if self.nodefp:
if self.nodelineno > 20:
self.write('<HR>\n')
[name, next, prev, up] = self.nodelinks[:4]
self.link('Next', next)
self.link('Prev', prev)
self.link('Up', up)
if self.nodename != self.topname:
self.link('Top', self.topname)
self.write('<HR>\n')
self.write('</BODY>\n')
self.nodefp.close()
self.nodefp = None
elif self.node:
if not self.cont and \
(not self.node.type or \
(self.node.next and self.node.prev and self.node.up)):
self.node.finalize()
self.node.flush()
else:
self.nodestack.append(self.node)
self.node = None
self.nodename = ''
# Process a list of lines, expanding embedded @-commands
# This mostly distinguishes between menus and normal text
def process(self, accu):
if self.debugging > 1:
print('!'*self.debugging, 'process:', self.skip, self.stack, end=' ')
if accu: print(accu[0][:30], end=' ')
if accu[0][30:] or accu[1:]: print('...', end=' ')
print()
if self.inmenu():
# XXX should be done differently
for line in accu:
mo = miprog.match(line)
if not mo:
line = line.strip() + '\n'
self.expand(line)
continue
bgn, end = mo.span(0)
a, b = mo.span(1)
c, d = mo.span(2)
e, f = mo.span(3)
g, h = mo.span(4)
label = line[a:b]
nodename = line[c:d]
if nodename[0] == ':': nodename = label
else: nodename = line[e:f]
punct = line[g:h]
self.write(' <LI><A HREF="',
makefile(nodename),
'">', nodename,
'</A>', punct, '\n')
self.htmlhelp.menuitem(nodename)
self.expand(line[end:])
else:
text = ''.join(accu)
self.expand(text)
# find 'menu' (we might be inside 'ifset' or 'ifclear')
def inmenu(self):
#if 'menu' in self.stack:
# print 'inmenu :', self.skip, self.stack, self.stackinfo
stack = self.stack
while stack and stack[-1] in ('ifset','ifclear'):
try:
if self.stackinfo[len(stack)]:
return 0
except KeyError:
pass
stack = stack[:-1]
return (stack and stack[-1] == 'menu')
# Write a string, expanding embedded @-commands
def expand(self, text):
stack = []
i = 0
n = len(text)
while i < n:
start = i
mo = spprog.search(text, i)
if mo:
i = mo.start()
else:
self.write(text[start:])
break
self.write(text[start:i])
c = text[i]
i = i+1
if c == '\n':
self.write('\n')
continue
if c == '<':
self.write('<')
continue
if c == '>':
self.write('>')
continue
if c == '&':
self.write('&')
continue
if c == '{':
stack.append('')
continue
if c == '}':
if not stack:
print('*** Unmatched }')
self.write('}')
continue
cmd = stack[-1]
del stack[-1]
try:
method = getattr(self, 'close_' + cmd)
except AttributeError:
self.unknown_close(cmd)
continue
method()
continue
if c != '@':
# Cannot happen unless spprog is changed
raise RuntimeError('unexpected funny %r' % c)
start = i
while i < n and text[i] in string.ascii_letters: i = i+1
if i == start:
# @ plus non-letter: literal next character
i = i+1
c = text[start:i]
if c == ':':
# `@:' means no extra space after
# preceding `.', `?', `!' or `:'
pass
else:
# `@.' means a sentence-ending period;
# `@@', `@{', `@}' quote `@', `{', `}'
self.write(c)
continue
cmd = text[start:i]
if i < n and text[i] == '{':
i = i+1
stack.append(cmd)
try:
method = getattr(self, 'open_' + cmd)
except AttributeError:
self.unknown_open(cmd)
continue
method()
continue
try:
method = getattr(self, 'handle_' + cmd)
except AttributeError:
self.unknown_handle(cmd)
continue
method()
if stack:
print('*** Stack not empty at para:', stack)
# --- Handle unknown embedded @-commands ---
def unknown_open(self, cmd):
print('*** No open func for @' + cmd + '{...}')
cmd = cmd + '{'
self.write('@', cmd)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_close(self, cmd):
print('*** No close func for @' + cmd + '{...}')
cmd = '}' + cmd
self.write('}')
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def unknown_handle(self, cmd):
print('*** No handler for @' + cmd)
self.write('@', cmd)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# XXX The following sections should be ordered as the texinfo docs
# --- Embedded @-commands without {} argument list --
def handle_noindent(self): pass
def handle_refill(self): pass
# --- Include file handling ---
def do_include(self, args):
file = args
file = os.path.join(self.includedir, file)
try:
fp = open(file, 'r')
except IOError as msg:
print('*** Can\'t open include file', repr(file))
return
print('!'*self.debugging, '--> file', repr(file))
save_done = self.done
save_skip = self.skip
save_stack = self.stack
self.includedepth = self.includedepth + 1
self.parserest(fp, 0)
self.includedepth = self.includedepth - 1
fp.close()
self.done = save_done
self.skip = save_skip
self.stack = save_stack
print('!'*self.debugging, '<-- file', repr(file))
# --- Special Insertions ---
def open_dmn(self): pass
def close_dmn(self): pass
def open_dots(self): self.write('...')
def close_dots(self): pass
def open_bullet(self): pass
def close_bullet(self): pass
def open_TeX(self): self.write('TeX')
def close_TeX(self): pass
def handle_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def open_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
def close_copyright(self): pass
def open_minus(self): self.write('-')
def close_minus(self): pass
# --- Accents ---
# rpyron 2002-05-07
# I would like to do at least as well as makeinfo when
# it is producing HTML output:
#
# input output
# @"o @"o umlaut accent
# @'o 'o acute accent
# @,{c} @,{c} cedilla accent
# @=o @=o macron/overbar accent
# @^o @^o circumflex accent
# @`o `o grave accent
# @~o @~o tilde accent
# @dotaccent{o} @dotaccent{o} overdot accent
# @H{o} @H{o} long Hungarian umlaut
# @ringaccent{o} @ringaccent{o} ring accent
# @tieaccent{oo} @tieaccent{oo} tie-after accent
# @u{o} @u{o} breve accent
# @ubaraccent{o} @ubaraccent{o} underbar accent
# @udotaccent{o} @udotaccent{o} underdot accent
# @v{o} @v{o} hacek or check accent
# @exclamdown{} ¡ upside-down !
# @questiondown{} ¿ upside-down ?
# @aa{},@AA{} å,Å a,A with circle
# @ae{},@AE{} æ,Æ ae,AE ligatures
# @dotless{i} @dotless{i} dotless i
# @dotless{j} @dotless{j} dotless j
# @l{},@L{} l/,L/ suppressed-L,l
# @o{},@O{} ø,Ø O,o with slash
# @oe{},@OE{} oe,OE oe,OE ligatures
# @ss{} ß es-zet or sharp S
#
# The following character codes and approximations have been
# copied from makeinfo's HTML output.
def open_exclamdown(self): self.write('¡') # upside-down !
def close_exclamdown(self): pass
def open_questiondown(self): self.write('¿') # upside-down ?
def close_questiondown(self): pass
def open_aa(self): self.write('å') # a with circle
def close_aa(self): pass
def open_AA(self): self.write('Å') # A with circle
def close_AA(self): pass
def open_ae(self): self.write('æ') # ae ligatures
def close_ae(self): pass
def open_AE(self): self.write('Æ') # AE ligatures
def close_AE(self): pass
def open_o(self): self.write('ø') # o with slash
def close_o(self): pass
def open_O(self): self.write('Ø') # O with slash
def close_O(self): pass
def open_ss(self): self.write('ß') # es-zet or sharp S
def close_ss(self): pass
def open_oe(self): self.write('oe') # oe ligatures
def close_oe(self): pass
def open_OE(self): self.write('OE') # OE ligatures
def close_OE(self): pass
def open_l(self): self.write('l/') # suppressed-l
def close_l(self): pass
def open_L(self): self.write('L/') # suppressed-L
def close_L(self): pass
# --- Special Glyphs for Examples ---
def open_result(self): self.write('=>')
def close_result(self): pass
def open_expansion(self): self.write('==>')
def close_expansion(self): pass
def open_print(self): self.write('-|')
def close_print(self): pass
def open_error(self): self.write('error-->')
def close_error(self): pass
def open_equiv(self): self.write('==')
def close_equiv(self): pass
def open_point(self): self.write('-!-')
def close_point(self): pass
# --- Cross References ---
def open_pxref(self):
self.write('see ')
self.startsaving()
def close_pxref(self):
self.makeref()
def open_xref(self):
self.write('See ')
self.startsaving()
def close_xref(self):
self.makeref()
def open_ref(self):
self.startsaving()
def close_ref(self):
self.makeref()
def open_inforef(self):
self.write('See info file ')
self.startsaving()
def close_inforef(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 3: args.append('')
node = args[0]
file = args[2]
self.write('`', file, '\', node `', node, '\'')
def makeref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
nodename = label = args[0]
if args[2]: label = args[2]
file = args[3]
title = args[4]
href = makefile(nodename)
if file:
href = '../' + file + '/' + href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 uref support
def open_uref(self):
self.startsaving()
def close_uref(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 2: args.append('')
href = args[0]
label = args[1]
if not label: label = href
self.write('<A HREF="', href, '">', label, '</A>')
# rpyron 2002-05-07 image support
# GNU makeinfo producing HTML output tries `filename.png'; if
# that does not exist, it tries `filename.jpg'. If that does
# not exist either, it complains. GNU makeinfo does not handle
# GIF files; however, I include GIF support here because
# MySQL documentation uses GIF files.
def open_image(self):
self.startsaving()
def close_image(self):
self.makeimage()
def makeimage(self):
text = self.collectsavings()
args = [s.strip() for s in text.split(',')]
while len(args) < 5: args.append('')
filename = args[0]
width = args[1]
height = args[2]
alt = args[3]
ext = args[4]
# The HTML output will have a reference to the image
# that is relative to the HTML output directory,
# which is what 'filename' gives us. However, we need
# to find it relative to our own current directory,
# so we construct 'imagename'.
imagelocation = self.dirname + '/' + filename
if os.path.exists(imagelocation+'.png'):
filename += '.png'
elif os.path.exists(imagelocation+'.jpg'):
filename += '.jpg'
elif os.path.exists(imagelocation+'.gif'): # MySQL uses GIF files
filename += '.gif'
else:
print("*** Cannot find image " + imagelocation)
#TODO: what is 'ext'?
self.write('<IMG SRC="', filename, '"', \
width and (' WIDTH="' + width + '"') or "", \
height and (' HEIGHT="' + height + '"') or "", \
alt and (' ALT="' + alt + '"') or "", \
'/>' )
self.htmlhelp.addimage(imagelocation)
# --- Marking Words and Phrases ---
# --- Other @xxx{...} commands ---
def open_(self): pass # Used by {text enclosed in braces}
def close_(self): pass
open_asis = open_
close_asis = close_
def open_cite(self): self.write('<CITE>')
def close_cite(self): self.write('</CITE>')
def open_code(self): self.write('<CODE>')
def close_code(self): self.write('</CODE>')
def open_t(self): self.write('<TT>')
def close_t(self): self.write('</TT>')
def open_dfn(self): self.write('<DFN>')
def close_dfn(self): self.write('</DFN>')
def open_emph(self): self.write('<EM>')
def close_emph(self): self.write('</EM>')
def open_i(self): self.write('<I>')
def close_i(self): self.write('</I>')
def open_footnote(self):
# if self.savetext is not None:
# print '*** Recursive footnote -- expect weirdness'
id = len(self.footnotes) + 1
self.write(self.FN_SOURCE_PATTERN % {'id': repr(id)})
self.startsaving()
def close_footnote(self):
id = len(self.footnotes) + 1
self.footnotes.append((id, self.collectsavings()))
def writefootnotes(self):
self.write(self.FN_HEADER)
for id, text in self.footnotes:
self.write(self.FN_TARGET_PATTERN
% {'id': repr(id), 'text': text})
self.footnotes = []
def open_file(self): self.write('<CODE>')
def close_file(self): self.write('</CODE>')
def open_kbd(self): self.write('<KBD>')
def close_kbd(self): self.write('</KBD>')
def open_key(self): self.write('<KEY>')
def close_key(self): self.write('</KEY>')
def open_r(self): self.write('<R>')
def close_r(self): self.write('</R>')
def open_samp(self): self.write('`<SAMP>')
def close_samp(self): self.write('</SAMP>\'')
def open_sc(self): self.write('<SMALLCAPS>')
def close_sc(self): self.write('</SMALLCAPS>')
def open_strong(self): self.write('<STRONG>')
def close_strong(self): self.write('</STRONG>')
def open_b(self): self.write('<B>')
def close_b(self): self.write('</B>')
def open_var(self): self.write('<VAR>')
def close_var(self): self.write('</VAR>')
def open_w(self): self.write('<NOBREAK>')
def close_w(self): self.write('</NOBREAK>')
def open_url(self): self.startsaving()
def close_url(self):
text = self.collectsavings()
self.write('<A HREF="', text, '">', text, '</A>')
def open_email(self): self.startsaving()
def close_email(self):
text = self.collectsavings()
self.write('<A HREF="mailto:', text, '">', text, '</A>')
open_titlefont = open_
close_titlefont = close_
def open_small(self): pass
def close_small(self): pass
def command(self, line, mo):
a, b = mo.span(1)
cmd = line[a:b]
args = line[b:].strip()
if self.debugging > 1:
print('!'*self.debugging, 'command:', self.skip, self.stack, \
'@' + cmd, args)
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
try:
func = getattr(self, 'bgn_' + cmd)
except AttributeError:
# don't complain if we are skipping anyway
if not self.skip:
self.unknown_cmd(cmd, args)
return
self.stack.append(cmd)
func(args)
return
if not self.skip or cmd == 'end':
func(args)
def unknown_cmd(self, cmd, args):
print('*** unknown', '@' + cmd, args)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
def do_end(self, args):
words = args.split()
if not words:
print('*** @end w/o args')
else:
cmd = words[0]
if not self.stack or self.stack[-1] != cmd:
print('*** @end', cmd, 'unexpected')
else:
del self.stack[-1]
try:
func = getattr(self, 'end_' + cmd)
except AttributeError:
self.unknown_end(cmd)
return
func()
def unknown_end(self, cmd):
cmd = 'end ' + cmd
print('*** unknown', '@' + cmd)
if cmd not in self.unknown:
self.unknown[cmd] = 1
else:
self.unknown[cmd] = self.unknown[cmd] + 1
# --- Comments ---
def do_comment(self, args): pass
do_c = do_comment
# --- Conditional processing ---
def bgn_ifinfo(self, args): pass
def end_ifinfo(self): pass
def bgn_iftex(self, args): self.skip = self.skip + 1
def end_iftex(self): self.skip = self.skip - 1
def bgn_ignore(self, args): self.skip = self.skip + 1
def end_ignore(self): self.skip = self.skip - 1
def bgn_tex(self, args): self.skip = self.skip + 1
def end_tex(self): self.skip = self.skip - 1
def do_set(self, args):
fields = args.split(' ')
key = fields[0]
if len(fields) == 1:
value = 1
else:
value = ' '.join(fields[1:])
self.values[key] = value
def do_clear(self, args):
self.values[args] = None
def bgn_ifset(self, args):
if args not in self.values or self.values[args] is None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifset(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print('*** end_ifset: KeyError :', len(self.stack) + 1)
def bgn_ifclear(self, args):
if args in self.values and self.values[args] is not None:
self.skip = self.skip + 1
self.stackinfo[len(self.stack)] = 1
else:
self.stackinfo[len(self.stack)] = 0
def end_ifclear(self):
try:
if self.stackinfo[len(self.stack) + 1]:
self.skip = self.skip - 1
del self.stackinfo[len(self.stack) + 1]
except KeyError:
print('*** end_ifclear: KeyError :', len(self.stack) + 1)
def open_value(self):
self.startsaving()
def close_value(self):
key = self.collectsavings()
if key in self.values:
self.write(self.values[key])
else:
print('*** Undefined value: ', key)
# --- Beginning a file ---
do_finalout = do_comment
do_setchapternewpage = do_comment
do_setfilename = do_comment
def do_settitle(self, args):
self.startsaving()
self.expand(args)
self.title = self.collectsavings()
def do_parskip(self, args): pass
# --- Ending a file ---
def do_bye(self, args):
self.endnode()
self.done = 1
# --- Title page ---
def bgn_titlepage(self, args): self.skip = self.skip + 1
def end_titlepage(self): self.skip = self.skip - 1
def do_shorttitlepage(self, args): pass
def do_center(self, args):
# Actually not used outside title page...
self.write('<H1>')
self.expand(args)
self.write('</H1>\n')
do_title = do_center
do_subtitle = do_center
do_author = do_center
do_vskip = do_comment
do_vfill = do_comment
do_smallbook = do_comment
do_paragraphindent = do_comment
do_setchapternewpage = do_comment
do_headings = do_comment
do_footnotestyle = do_comment
do_evenheading = do_comment
do_evenfooting = do_comment
do_oddheading = do_comment
do_oddfooting = do_comment
do_everyheading = do_comment
do_everyfooting = do_comment
# --- Nodes ---
def do_node(self, args):
self.endnode()
self.nodelineno = 0
parts = [s.strip() for s in args.split(',')]
while len(parts) < 4: parts.append('')
self.nodelinks = parts
[name, next, prev, up] = parts[:4]
file = self.dirname + '/' + makefile(name)
if file in self.filenames:
print('*** Filename already in use: ', file)
else:
if self.debugging: print('!'*self.debugging, '--- writing', file)
self.filenames[file] = 1
# self.nodefp = open(file, 'w')
self.nodename = name
if self.cont and self.nodestack:
self.nodestack[-1].cont = self.nodename
if not self.topname: self.topname = name
title = name
if self.title: title = title + ' -- ' + self.title
self.node = self.Node(self.dirname, self.nodename, self.topname,
title, next, prev, up)
self.htmlhelp.addnode(self.nodename,next,prev,up,file)
def link(self, label, nodename):
if nodename:
if nodename.lower() == '(dir)':
addr = '../dir.html'
else:
addr = makefile(nodename)
self.write(label, ': <A HREF="', addr, '" TYPE="',
label, '">', nodename, '</A> \n')
# --- Sectioning commands ---
def popstack(self, type):
if (self.node):
self.node.type = type
while self.nodestack:
if self.nodestack[-1].type > type:
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
elif self.nodestack[-1].type == type:
if not self.nodestack[-1].next:
self.nodestack[-1].next = self.node.name
if not self.node.prev:
self.node.prev = self.nodestack[-1].name
self.nodestack[-1].finalize()
self.nodestack[-1].flush()
del self.nodestack[-1]
else:
if type > 1 and not self.node.up:
self.node.up = self.nodestack[-1].name
break
def do_chapter(self, args):
self.heading('H1', args, 0)
self.popstack(1)
def do_unnumbered(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_appendix(self, args):
self.heading('H1', args, -1)
self.popstack(1)
def do_top(self, args):
self.heading('H1', args, -1)
def do_chapheading(self, args):
self.heading('H1', args, -1)
def do_majorheading(self, args):
self.heading('H1', args, -1)
def do_section(self, args):
self.heading('H1', args, 1)
self.popstack(2)
def do_unnumberedsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
def do_appendixsec(self, args):
self.heading('H1', args, -1)
self.popstack(2)
do_appendixsection = do_appendixsec
def do_heading(self, args):
self.heading('H1', args, -1)
def do_subsection(self, args):
self.heading('H2', args, 2)
self.popstack(3)
def do_unnumberedsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_appendixsubsec(self, args):
self.heading('H2', args, -1)
self.popstack(3)
def do_subheading(self, args):
self.heading('H2', args, -1)
def do_subsubsection(self, args):
self.heading('H3', args, 3)
self.popstack(4)
def do_unnumberedsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_appendixsubsubsec(self, args):
self.heading('H3', args, -1)
self.popstack(4)
def do_subsubheading(self, args):
self.heading('H3', args, -1)
def heading(self, type, args, level):
if level >= 0:
while len(self.numbering) <= level:
self.numbering.append(0)
del self.numbering[level+1:]
self.numbering[level] = self.numbering[level] + 1
x = ''
for i in self.numbering:
x = x + repr(i) + '.'
args = x + ' ' + args
self.contents.append((level, args, self.nodename))
self.write('<', type, '>')
self.expand(args)
self.write('</', type, '>\n')
if self.debugging or self.print_headers:
print('---', args)
def do_contents(self, args):
# pass
self.listcontents('Table of Contents', 999)
def do_shortcontents(self, args):
pass
# self.listcontents('Short Contents', 0)
do_summarycontents = do_shortcontents
def listcontents(self, title, maxlevel):
self.write('<H1>', title, '</H1>\n<UL COMPACT PLAIN>\n')
prevlevels = [0]
for level, title, node in self.contents:
if level > maxlevel:
continue
if level > prevlevels[-1]:
# can only advance one level at a time
self.write(' '*prevlevels[-1], '<UL PLAIN>\n')
prevlevels.append(level)
elif level < prevlevels[-1]:
# might drop back multiple levels
while level < prevlevels[-1]:
del prevlevels[-1]
self.write(' '*prevlevels[-1],
'</UL>\n')
self.write(' '*level, '<LI> <A HREF="',
makefile(node), '">')
self.expand(title)
self.write('</A>\n')
self.write('</UL>\n' * len(prevlevels))
# --- Page lay-out ---
# These commands are only meaningful in printed text
def do_page(self, args): pass
def do_need(self, args): pass
def bgn_group(self, args): pass
def end_group(self): pass
# --- Line lay-out ---
def do_sp(self, args):
if self.nofill:
self.write('\n')
else:
self.write('<P>\n')
def do_hline(self, args):
self.write('<HR>')
# --- Function and variable definitions ---
def bgn_deffn(self, args):
self.write('<DL>')
self.do_deffnx(args)
def end_deffn(self):
self.write('</DL>\n')
def do_deffnx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_defun(self, args): self.bgn_deffn('Function ' + args)
end_defun = end_deffn
def do_defunx(self, args): self.do_deffnx('Function ' + args)
def bgn_defmac(self, args): self.bgn_deffn('Macro ' + args)
end_defmac = end_deffn
def do_defmacx(self, args): self.do_deffnx('Macro ' + args)
def bgn_defspec(self, args): self.bgn_deffn('{Special Form} ' + args)
end_defspec = end_deffn
def do_defspecx(self, args): self.do_deffnx('{Special Form} ' + args)
def bgn_defvr(self, args):
self.write('<DL>')
self.do_defvrx(args)
end_defvr = end_deffn
def do_defvrx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@code{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('vr', name)
def bgn_defvar(self, args): self.bgn_defvr('Variable ' + args)
end_defvar = end_defvr
def do_defvarx(self, args): self.do_defvrx('Variable ' + args)
def bgn_defopt(self, args): self.bgn_defvr('{User Option} ' + args)
end_defopt = end_defvr
def do_defoptx(self, args): self.do_defvrx('{User Option} ' + args)
# --- Ditto for typed languages ---
def bgn_deftypefn(self, args):
self.write('<DL>')
self.do_deftypefnx(args)
end_deftypefn = end_deffn
def do_deftypefnx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypefun(self, args): self.bgn_deftypefn('Function ' + args)
end_deftypefun = end_deftypefn
def do_deftypefunx(self, args): self.do_deftypefnx('Function ' + args)
def bgn_deftypevr(self, args):
self.write('<DL>')
self.do_deftypevrx(args)
end_deftypevr = end_deftypefn
def do_deftypevrx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, datatype, name], rest = words[:3], words[3:]
self.expand('@code{%s} @b{%s}' % (datatype, name))
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('fn', name)
def bgn_deftypevar(self, args):
self.bgn_deftypevr('Variable ' + args)
end_deftypevar = end_deftypevr
def do_deftypevarx(self, args):
self.do_deftypevrx('Variable ' + args)
# --- Ditto for object-oriented languages ---
def bgn_defcv(self, args):
self.write('<DL>')
self.do_defcvx(args)
end_defcv = end_deftypevr
def do_defcvx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
# If there are too many arguments, show them
for word in rest: self.expand(' ' + word)
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('vr', '%s @r{on %s}' % (name, classname))
def bgn_defivar(self, args):
self.bgn_defcv('{Instance Variable} ' + args)
end_defivar = end_defcv
def do_defivarx(self, args):
self.do_defcvx('{Instance Variable} ' + args)
def bgn_defop(self, args):
self.write('<DL>')
self.do_defopx(args)
end_defop = end_defcv
def do_defopx(self, args):
self.write('<DT>')
words = splitwords(args, 3)
[category, classname, name], rest = words[:3], words[3:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + makevar(word))
#self.expand(' -- %s of @code{%s}' % (category, classname))
self.write('\n<DD>')
self.index('fn', '%s @r{on %s}' % (name, classname))
def bgn_defmethod(self, args):
self.bgn_defop('Method ' + args)
end_defmethod = end_defop
def do_defmethodx(self, args):
self.do_defopx('Method ' + args)
# --- Ditto for data types ---
def bgn_deftp(self, args):
self.write('<DL>')
self.do_deftpx(args)
end_deftp = end_defcv
def do_deftpx(self, args):
self.write('<DT>')
words = splitwords(args, 2)
[category, name], rest = words[:2], words[2:]
self.expand('@b{%s}' % name)
for word in rest: self.expand(' ' + word)
#self.expand(' -- ' + category)
self.write('\n<DD>')
self.index('tp', name)
# --- Making Lists and Tables
def bgn_enumerate(self, args):
if not args:
self.write('<OL>\n')
self.stackinfo[len(self.stack)] = '</OL>\n'
else:
self.itemnumber = args
self.write('<UL>\n')
self.stackinfo[len(self.stack)] = '</UL>\n'
def end_enumerate(self):
self.itemnumber = None
self.write(self.stackinfo[len(self.stack) + 1])
del self.stackinfo[len(self.stack) + 1]
def bgn_itemize(self, args):
self.itemarg = args
self.write('<UL>\n')
def end_itemize(self):
self.itemarg = None
self.write('</UL>\n')
def bgn_table(self, args):
self.itemarg = args
self.write('<DL>\n')
def end_table(self):
self.itemarg = None
self.write('</DL>\n')
def bgn_ftable(self, args):
self.itemindex = 'fn'
self.bgn_table(args)
def end_ftable(self):
self.itemindex = None
self.end_table()
def bgn_vtable(self, args):
self.itemindex = 'vr'
self.bgn_table(args)
def end_vtable(self):
self.itemindex = None
self.end_table()
def do_item(self, args):
if self.itemindex: self.index(self.itemindex, args)
if self.itemarg:
if self.itemarg[0] == '@' and self.itemarg[1] and \
self.itemarg[1] in string.ascii_letters:
args = self.itemarg + '{' + args + '}'
else:
# some other character, e.g. '-'
args = self.itemarg + ' ' + args
if self.itemnumber is not None:
args = self.itemnumber + '. ' + args
self.itemnumber = increment(self.itemnumber)
if self.stack and self.stack[-1] == 'table':
self.write('<DT>')
self.expand(args)
self.write('\n<DD>')
elif self.stack and self.stack[-1] == 'multitable':
self.write('<TR><TD>')
self.expand(args)
self.write('</TD>\n</TR>\n')
else:
self.write('<LI>')
self.expand(args)
self.write(' ')
do_itemx = do_item # XXX Should suppress leading blank line
# rpyron 2002-05-07 multitable support
def bgn_multitable(self, args):
self.itemarg = None # should be handled by columnfractions
self.write('<TABLE BORDER="">\n')
def end_multitable(self):
self.itemarg = None
self.write('</TABLE>\n<BR>\n')
def handle_columnfractions(self):
# It would be better to handle this, but for now it's in the way...
self.itemarg = None
def handle_tab(self):
self.write('</TD>\n <TD>')
# --- Enumerations, displays, quotations ---
# XXX Most of these should increase the indentation somehow
def bgn_quotation(self, args): self.write('<BLOCKQUOTE>')
def end_quotation(self): self.write('</BLOCKQUOTE>\n')
def bgn_example(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>')
def end_example(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
bgn_lisp = bgn_example # Synonym when contents are executable lisp code
end_lisp = end_example
bgn_smallexample = bgn_example # XXX Should use smaller font
end_smallexample = end_example
bgn_smalllisp = bgn_lisp # Ditto
end_smalllisp = end_lisp
bgn_display = bgn_example
end_display = end_example
bgn_format = bgn_display
end_format = end_display
def do_exdent(self, args): self.expand(args + '\n')
# XXX Should really mess with indentation
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE>\n')
def end_flushleft(self):
self.write('</PRE>\n')
self.nofill = self.nofill - 1
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<DIR>\n')
self.write(' <STRONG><EM>Menu</EM></STRONG><P>\n')
self.htmlhelp.beginmenu()
def end_menu(self):
self.write('</DIR>\n')
self.htmlhelp.endmenu()
def bgn_cartouche(self, args): pass
def end_cartouche(self): pass
# --- Indices ---
def resetindex(self):
self.noncodeindices = ['cp']
self.indextitle = {}
self.indextitle['cp'] = 'Concept'
self.indextitle['fn'] = 'Function'
self.indextitle['ky'] = 'Keyword'
self.indextitle['pg'] = 'Program'
self.indextitle['tp'] = 'Type'
self.indextitle['vr'] = 'Variable'
#
self.whichindex = {}
for name in self.indextitle:
self.whichindex[name] = []
def user_index(self, name, args):
if name in self.whichindex:
self.index(name, args)
else:
print('*** No index named', repr(name))
def do_cindex(self, args): self.index('cp', args)
def do_findex(self, args): self.index('fn', args)
def do_kindex(self, args): self.index('ky', args)
def do_pindex(self, args): self.index('pg', args)
def do_tindex(self, args): self.index('tp', args)
def do_vindex(self, args): self.index('vr', args)
def index(self, name, args):
self.whichindex[name].append((args, self.nodename))
self.htmlhelp.index(args, self.nodename)
def do_synindex(self, args):
words = args.split()
if len(words) != 2:
print('*** bad @synindex', args)
return
[old, new] = words
if old not in self.whichindex or \
new not in self.whichindex:
print('*** bad key(s) in @synindex', args)
return
if old != new and \
self.whichindex[old] is not self.whichindex[new]:
inew = self.whichindex[new]
inew[len(inew):] = self.whichindex[old]
self.whichindex[old] = inew
do_syncodeindex = do_synindex # XXX Should use code font
def do_printindex(self, args):
words = args.split()
for name in words:
if name in self.whichindex:
self.prindex(name)
else:
print('*** No index named', repr(name))
def prindex(self, name):
iscodeindex = (name not in self.noncodeindices)
index = self.whichindex[name]
if not index: return
if self.debugging:
print('!'*self.debugging, '--- Generating', \
self.indextitle[name], 'index')
# The node already provides a title
index1 = []
junkprog = re.compile('^(@[a-z]+)?{')
for key, node in index:
sortkey = key.lower()
# Remove leading `@cmd{' from sort key
# -- don't bother about the matching `}'
oldsortkey = sortkey
while 1:
mo = junkprog.match(sortkey)
if not mo:
break
i = mo.end()
sortkey = sortkey[i:]
index1.append((sortkey, key, node))
del index[:]
index1.sort()
self.write('<DL COMPACT>\n')
prevkey = prevnode = None
for sortkey, key, node in index1:
if (key, node) == (prevkey, prevnode):
continue
if self.debugging > 1: print('!'*self.debugging, key, ':', node)
self.write('<DT>')
if iscodeindex: key = '@code{' + key + '}'
if key != prevkey:
self.expand(key)
self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node))
prevkey, prevnode = key, node
self.write('</DL>\n')
# --- Final error reports ---
def report(self):
if self.unknown:
print('--- Unrecognized commands ---')
cmds = sorted(self.unknown.keys())
for cmd in cmds:
print(cmd.ljust(20), self.unknown[cmd])
class TexinfoParserHTML3(TexinfoParser):
COPYRIGHT_SYMBOL = "©"
FN_ID_PATTERN = "[%(id)s]"
FN_SOURCE_PATTERN = '<A ID=footnoteref%(id)s ' \
'HREF="#footnotetext%(id)s">' + FN_ID_PATTERN + '</A>'
FN_TARGET_PATTERN = '<FN ID=footnotetext%(id)s>\n' \
'<P><A HREF="#footnoteref%(id)s">' + FN_ID_PATTERN \
+ '</A>\n%(text)s</P></FN>\n'
FN_HEADER = '<DIV CLASS=footnotes>\n <HR NOSHADE WIDTH=200>\n' \
' <STRONG><EM>Footnotes</EM></STRONG>\n <P>\n'
Node = HTML3Node
def bgn_quotation(self, args): self.write('<BQ>')
def end_quotation(self): self.write('</BQ>\n')
def bgn_example(self, args):
# this use of <CODE> would not be legal in HTML 2.0,
# but is in more recent DTDs.
self.nofill = self.nofill + 1
self.write('<PRE CLASS=example><CODE>')
def end_example(self):
self.write("</CODE></PRE>\n")
self.nofill = self.nofill - 1
def bgn_flushleft(self, args):
self.nofill = self.nofill + 1
self.write('<PRE CLASS=flushleft>\n')
def bgn_flushright(self, args):
self.nofill = self.nofill + 1
self.write('<DIV ALIGN=right CLASS=flushright><ADDRESS COMPACT>\n')
def end_flushright(self):
self.write('</ADDRESS></DIV>\n')
self.nofill = self.nofill - 1
def bgn_menu(self, args):
self.write('<UL PLAIN CLASS=menu>\n')
self.write(' <LH>Menu</LH>\n')
def end_menu(self):
self.write('</UL>\n')
# rpyron 2002-05-07
class HTMLHelp:
"""
This class encapsulates support for HTML Help. Node names,
file names, menu items, index items, and image file names are
accumulated until a call to finalize(). At that time, three
output files are created in the current directory:
`helpbase`.hhp is a HTML Help Workshop project file.
It contains various information, some of
which I do not understand; I just copied
the default project info from a fresh
installation.
`helpbase`.hhc is the Contents file for the project.
`helpbase`.hhk is the Index file for the project.
When these files are used as input to HTML Help Workshop,
the resulting file will be named:
`helpbase`.chm
If none of the defaults in `helpbase`.hhp are changed,
the .CHM file will have Contents, Index, Search, and
Favorites tabs.
"""
codeprog = re.compile('@code{(.*?)}')
def __init__(self,helpbase,dirname):
self.helpbase = helpbase
self.dirname = dirname
self.projectfile = None
self.contentfile = None
self.indexfile = None
self.nodelist = []
self.nodenames = {} # nodename : index
self.nodeindex = {}
self.filenames = {} # filename : filename
self.indexlist = [] # (args,nodename) == (key,location)
self.current = ''
self.menudict = {}
self.dumped = {}
def addnode(self,name,next,prev,up,filename):
node = (name,next,prev,up,filename)
# add this file to dict
# retrieve list with self.filenames.values()
self.filenames[filename] = filename
# add this node to nodelist
self.nodeindex[name] = len(self.nodelist)
self.nodelist.append(node)
# set 'current' for menu items
self.current = name
self.menudict[self.current] = []
def menuitem(self,nodename):
menu = self.menudict[self.current]
menu.append(nodename)
def addimage(self,imagename):
self.filenames[imagename] = imagename
def index(self, args, nodename):
self.indexlist.append((args,nodename))
def beginmenu(self):
pass
def endmenu(self):
pass
def finalize(self):
if not self.helpbase:
return
# generate interesting filenames
resultfile = self.helpbase + '.chm'
projectfile = self.helpbase + '.hhp'
contentfile = self.helpbase + '.hhc'
indexfile = self.helpbase + '.hhk'
# generate a reasonable title
title = self.helpbase
# get the default topic file
(topname,topnext,topprev,topup,topfile) = self.nodelist[0]
defaulttopic = topfile
# PROJECT FILE
try:
fp = open(projectfile,'w')
print('[OPTIONS]', file=fp)
print('Auto Index=Yes', file=fp)
print('Binary TOC=No', file=fp)
print('Binary Index=Yes', file=fp)
print('Compatibility=1.1', file=fp)
print('Compiled file=' + resultfile + '', file=fp)
print('Contents file=' + contentfile + '', file=fp)
print('Default topic=' + defaulttopic + '', file=fp)
print('Error log file=ErrorLog.log', file=fp)
print('Index file=' + indexfile + '', file=fp)
print('Title=' + title + '', file=fp)
print('Display compile progress=Yes', file=fp)
print('Full-text search=Yes', file=fp)
print('Default window=main', file=fp)
print('', file=fp)
print('[WINDOWS]', file=fp)
print('main=,"' + contentfile + '","' + indexfile
+ '","","",,,,,0x23520,222,0x1046,[10,10,780,560],'
'0xB0000,,,,,,0', file=fp)
print('', file=fp)
print('[FILES]', file=fp)
print('', file=fp)
self.dumpfiles(fp)
fp.close()
except IOError as msg:
print(projectfile, ':', msg)
sys.exit(1)
# CONTENT FILE
try:
fp = open(contentfile,'w')
print('<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">', file=fp)
print('<!-- This file defines the table of contents -->', file=fp)
print('<HTML>', file=fp)
print('<HEAD>', file=fp)
print('<meta name="GENERATOR" '
'content="Microsoft® HTML Help Workshop 4.1">', file=fp)
print('<!-- Sitemap 1.0 -->', file=fp)
print('</HEAD>', file=fp)
print('<BODY>', file=fp)
print(' <OBJECT type="text/site properties">', file=fp)
print(' <param name="Window Styles" value="0x800025">', file=fp)
print(' <param name="comment" value="title:">', file=fp)
print(' <param name="comment" value="base:">', file=fp)
print(' </OBJECT>', file=fp)
self.dumpnodes(fp)
print('</BODY>', file=fp)
print('</HTML>', file=fp)
fp.close()
except IOError as msg:
print(contentfile, ':', msg)
sys.exit(1)
# INDEX FILE
try:
fp = open(indexfile ,'w')
print('<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">', file=fp)
print('<!-- This file defines the index -->', file=fp)
print('<HTML>', file=fp)
print('<HEAD>', file=fp)
print('<meta name="GENERATOR" '
'content="Microsoft® HTML Help Workshop 4.1">', file=fp)
print('<!-- Sitemap 1.0 -->', file=fp)
print('</HEAD>', file=fp)
print('<BODY>', file=fp)
print('<OBJECT type="text/site properties">', file=fp)
print('</OBJECT>', file=fp)
self.dumpindex(fp)
print('</BODY>', file=fp)
print('</HTML>', file=fp)
fp.close()
except IOError as msg:
print(indexfile , ':', msg)
sys.exit(1)
def dumpfiles(self, outfile=sys.stdout):
filelist = sorted(self.filenames.values())
for filename in filelist:
print(filename, file=outfile)
def dumpnodes(self, outfile=sys.stdout):
self.dumped = {}
if self.nodelist:
nodename, dummy, dummy, dummy, dummy = self.nodelist[0]
self.topnode = nodename
print('<UL>', file=outfile)
for node in self.nodelist:
self.dumpnode(node,0,outfile)
print('</UL>', file=outfile)
def dumpnode(self, node, indent=0, outfile=sys.stdout):
if node:
# Retrieve info for this node
(nodename,next,prev,up,filename) = node
self.current = nodename
# Have we been dumped already?
if nodename in self.dumped:
return
self.dumped[nodename] = 1
# Print info for this node
print(' '*indent, end=' ', file=outfile)
print('<LI><OBJECT type="text/sitemap">', end=' ', file=outfile)
print('<param name="Name" value="' + nodename +'">', end=' ', file=outfile)
print('<param name="Local" value="'+ filename +'">', end=' ', file=outfile)
print('</OBJECT>', file=outfile)
# Does this node have menu items?
try:
menu = self.menudict[nodename]
self.dumpmenu(menu,indent+2,outfile)
except KeyError:
pass
def dumpmenu(self, menu, indent=0, outfile=sys.stdout):
if menu:
currentnode = self.current
if currentnode != self.topnode: # XXX this is a hack
print(' '*indent + '<UL>', file=outfile)
indent += 2
for item in menu:
menunode = self.getnode(item)
self.dumpnode(menunode,indent,outfile)
if currentnode != self.topnode: # XXX this is a hack
print(' '*indent + '</UL>', file=outfile)
indent -= 2
def getnode(self, nodename):
try:
index = self.nodeindex[nodename]
return self.nodelist[index]
except KeyError:
return None
except IndexError:
return None
# (args,nodename) == (key,location)
def dumpindex(self, outfile=sys.stdout):
print('<UL>', file=outfile)
for (key,location) in self.indexlist:
key = self.codeexpand(key)
location = makefile(location)
location = self.dirname + '/' + location
print('<LI><OBJECT type="text/sitemap">', end=' ', file=outfile)
print('<param name="Name" value="' + key + '">', end=' ', file=outfile)
print('<param name="Local" value="' + location + '">', end=' ', file=outfile)
print('</OBJECT>', file=outfile)
print('</UL>', file=outfile)
def codeexpand(self, line):
co = self.codeprog.match(line)
if not co:
return line
bgn, end = co.span(0)
a, b = co.span(1)
line = line[:bgn] + line[a:b] + line[end:]
return line
# Put @var{} around alphabetic substrings
def makevar(str):
return '@var{'+str+'}'
# Split a string in "words" according to findwordend
def splitwords(str, minlength):
words = []
i = 0
n = len(str)
while i < n:
while i < n and str[i] in ' \t\n': i = i+1
if i >= n: break
start = i
i = findwordend(str, i, n)
words.append(str[start:i])
while len(words) < minlength: words.append('')
return words
# Find the end of a "word", matching braces and interpreting @@ @{ @}
fwprog = re.compile('[@{} ]')
def findwordend(str, i, n):
level = 0
while i < n:
mo = fwprog.search(str, i)
if not mo:
break
i = mo.start()
c = str[i]; i = i+1
if c == '@': i = i+1 # Next character is not special
elif c == '{': level = level+1
elif c == '}': level = level-1
elif c == ' ' and level <= 0: return i-1
return n
# Convert a node name into a file name
def makefile(nodename):
nodename = nodename.strip()
return fixfunnychars(nodename) + '.html'
# Characters that are perfectly safe in filenames and hyperlinks
goodchars = string.ascii_letters + string.digits + '!@-=+.'
# Replace characters that aren't perfectly safe by dashes
# Underscores are bad since Cern HTTPD treats them as delimiters for
# encoding times, so you get mismatches if you compress your files:
# a.html.gz will map to a_b.html.gz
def fixfunnychars(addr):
i = 0
while i < len(addr):
c = addr[i]
if c not in goodchars:
c = '-'
addr = addr[:i] + c + addr[i+1:]
i = i + len(c)
return addr
# Increment a string used as an enumeration
def increment(s):
if not s:
return '1'
for sequence in string.digits, string.ascii_lowercase, string.ascii_uppercase:
lastc = s[-1]
if lastc in sequence:
i = sequence.index(lastc) + 1
if i >= len(sequence):
if len(s) == 1:
s = sequence[0]*2
if s == '00':
s = '10'
else:
s = increment(s[:-1]) + sequence[0]
else:
s = s[:-1] + sequence[i]
return s
return s # Don't increment
def test():
import sys
debugging = 0
print_headers = 0
cont = 0
html3 = 0
htmlhelp = ''
while sys.argv[1] == ['-d']:
debugging = debugging + 1
del sys.argv[1]
if sys.argv[1] == '-p':
print_headers = 1
del sys.argv[1]
if sys.argv[1] == '-c':
cont = 1
del sys.argv[1]
if sys.argv[1] == '-3':
html3 = 1
del sys.argv[1]
if sys.argv[1] == '-H':
helpbase = sys.argv[2]
del sys.argv[1:3]
if len(sys.argv) != 3:
print('usage: texi2hh [-d [-d]] [-p] [-c] [-3] [-H htmlhelp]', \
'inputfile outputdirectory')
sys.exit(2)
if html3:
parser = TexinfoParserHTML3()
else:
parser = TexinfoParser()
parser.cont = cont
parser.debugging = debugging
parser.print_headers = print_headers
file = sys.argv[1]
dirname = sys.argv[2]
parser.setdirname(dirname)
parser.setincludedir(os.path.dirname(file))
htmlhelp = HTMLHelp(helpbase, dirname)
parser.sethtmlhelp(htmlhelp)
try:
fp = open(file, 'r')
except IOError as msg:
print(file, ':', msg)
sys.exit(1)
parser.parse(fp)
fp.close()
parser.report()
htmlhelp.finalize()
if __name__ == "__main__":
test()
| 70,176 | 2,076 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/h2py.py | #! /usr/bin/env python3
# Read #define's and translate to Python code.
# Handle #include statements.
# Handle #define macros with one argument.
# Anything that isn't recognized or doesn't translate into valid
# Python is ignored.
# Without filename arguments, acts as a filter.
# If one or more filenames are given, output is written to corresponding
# filenames in the local directory, translated to all uppercase, with
# the extension replaced by ".py".
# By passing one or more options of the form "-i regular_expression"
# you can specify additional strings to be ignored. This is useful
# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
# XXX To do:
# - turn trailing C comments into Python comments
# - turn C Boolean operators "&& || !" into Python "and or not"
# - what to do about #if(def)?
# - what to do about macros with multiple parameters?
import sys, re, getopt, os
p_define = re.compile(r'^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
p_macro = re.compile(
r'^[\t ]*#[\t ]*define[\t ]+'
r'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
p_include = re.compile(r'^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>')
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
p_cpp_comment = re.compile('//.*')
ignores = [p_comment, p_cpp_comment]
p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
filedict = {}
importable = {}
try:
searchdirs=os.environ['include'].split(';')
except KeyError:
try:
searchdirs=os.environ['INCLUDE'].split(';')
except KeyError:
searchdirs=['/usr/include']
try:
searchdirs.insert(0, os.path.join('/usr/include',
os.environ['MULTIARCH']))
except KeyError:
pass
def main():
global filedict
opts, args = getopt.getopt(sys.argv[1:], 'i:')
for o, a in opts:
if o == '-i':
ignores.append(re.compile(a))
if not args:
args = ['-']
for filename in args:
if filename == '-':
sys.stdout.write('# Generated by h2py from stdin\n')
process(sys.stdin, sys.stdout)
else:
fp = open(filename, 'r')
outfile = os.path.basename(filename)
i = outfile.rfind('.')
if i > 0: outfile = outfile[:i]
modname = outfile.upper()
outfile = modname + '.py'
outfp = open(outfile, 'w')
outfp.write('# Generated by h2py from %s\n' % filename)
filedict = {}
for dir in searchdirs:
if filename[:len(dir)] == dir:
filedict[filename[len(dir)+1:]] = None # no '/' trailing
importable[filename[len(dir)+1:]] = modname
break
process(fp, outfp)
outfp.close()
fp.close()
def pytify(body):
# replace ignored patterns by spaces
for p in ignores:
body = p.sub(' ', body)
# replace char literals by ord(...)
body = p_char.sub("ord('\\1')", body)
# Compute negative hexadecimal constants
start = 0
UMAX = 2*(sys.maxsize+1)
while 1:
m = p_hex.search(body, start)
if not m: break
s,e = m.span()
val = int(body[slice(*m.span(1))], 16)
if val > sys.maxsize:
val -= UMAX
body = body[:s] + "(" + str(val) + ")" + body[e:]
start = s + 1
return body
def process(fp, outfp, env = {}):
lineno = 0
while 1:
line = fp.readline()
if not line: break
lineno = lineno + 1
match = p_define.match(line)
if match:
# gobble up continuation lines
while line[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: break
lineno = lineno + 1
line = line + nextline
name = match.group(1)
body = line[match.end():]
body = pytify(body)
ok = 0
stmt = '%s = %s\n' % (name, body.strip())
try:
exec(stmt, env)
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_macro.match(line)
if match:
macro, arg = match.group(1, 2)
body = line[match.end():]
body = pytify(body)
stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
try:
exec(stmt, env)
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_include.match(line)
if match:
regs = match.regs
a, b = regs[1]
filename = line[a:b]
if filename in importable:
outfp.write('from %s import *\n' % importable[filename])
elif filename not in filedict:
filedict[filename] = None
inclfp = None
for dir in searchdirs:
try:
inclfp = open(dir + '/' + filename)
break
except IOError:
pass
if inclfp:
outfp.write(
'\n# Included from %s\n' % filename)
process(inclfp, outfp, env)
else:
sys.stderr.write('Warning - could not find file %s\n' %
filename)
if __name__ == '__main__':
main()
| 5,604 | 173 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/patchcheck.py | #!/usr/bin/env python3
"""Check proposed changes for common issues."""
import re
import sys
import shutil
import os.path
import subprocess
import sysconfig
import reindent
import untabify
# Excluded directories which are copies of external libraries:
# don't check their coding style
EXCLUDE_DIRS = [os.path.join('Modules', '_ctypes', 'libffi'),
os.path.join('Modules', '_ctypes', 'libffi_osx'),
os.path.join('Modules', '_ctypes', 'libffi_msvc'),
os.path.join('Modules', '_decimal', 'libmpdec'),
os.path.join('Modules', 'expat'),
os.path.join('Modules', 'zlib')]
SRCDIR = sysconfig.get_config_var('srcdir')
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print("done")
elif info:
print(info(result))
else:
print("yes" if result else "NO")
return result
return call_fxn
return decorated_fxn
def get_git_branch():
"""Get the symbolic name for the current git branch"""
cmd = "git rev-parse --abbrev-ref HEAD".split()
try:
return subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR)
except subprocess.CalledProcessError:
return None
def get_git_upstream_remote():
"""Get the remote name to use for upstream branches
Uses "upstream" if it exists, "origin" otherwise
"""
cmd = "git remote get-url upstream".split()
try:
subprocess.check_output(cmd,
stderr=subprocess.DEVNULL,
cwd=SRCDIR)
except subprocess.CalledProcessError:
return "origin"
return "upstream"
@status("Getting base branch for PR",
info=lambda x: x if x is not None else "not a PR branch")
def get_base_branch():
if not os.path.exists(os.path.join(SRCDIR, '.git')):
# Not a git checkout, so there's no base branch
return None
version = sys.version_info
if version.releaselevel == 'alpha':
base_branch = "master"
else:
base_branch = "{0.major}.{0.minor}".format(version)
this_branch = get_git_branch()
if this_branch is None or this_branch == base_branch:
# Not on a git PR branch, so there's no base branch
return None
upstream_remote = get_git_upstream_remote()
return upstream_remote + "/" + base_branch
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files(base_branch=None):
"""Get the list of changed or added files from git."""
if os.path.exists(os.path.join(SRCDIR, '.git')):
# We just use an existence check here as:
# directory = normal git checkout/clone
# file = git worktree directory
if base_branch:
cmd = 'git diff --name-status ' + base_branch
else:
cmd = 'git status --porcelain'
filenames = []
with subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
cwd=SRCDIR) as st:
for line in st.stdout:
line = line.decode().rstrip()
status_text, filename = line.split(maxsplit=1)
status = set(status_text)
# modified, added or unmerged files
if not status.intersection('MAU'):
continue
if ' -> ' in filename:
# file is renamed
filename = filename.split(' -> ', 2)[1].strip()
filenames.append(filename)
else:
sys.exit('need a git checkout to get modified files')
filenames2 = []
for filename in filenames:
# Normalize the path to be able to match using .startswith()
filename = os.path.normpath(filename)
if any(filename.startswith(path) for path in EXCLUDE_DIRS):
# Exclude the file
continue
filenames2.append(filename)
return filenames2
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing Python file whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = [path for path in file_paths if path.endswith('.py') and
reindent.check(os.path.join(SRCDIR, path))]
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
with open(abspath, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(abspath, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
try:
with open(abspath, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(abspath, abspath + '.bak')
with open(abspath, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print('Cannot fix %s: %s' % (path, err))
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return os.path.join('Misc', 'ACKS') in file_paths
@status("Misc/NEWS.d updated with `blurb`", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS.d has been changed."""
return any(p.startswith(os.path.join('Misc', 'NEWS.d', 'next'))
for p in file_paths)
@status("configure regenerated", modal=True, info=str)
def regenerated_configure(file_paths):
"""Check if configure has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'configure' in file_paths else "no"
else:
return "not needed"
@status("pyconfig.h.in regenerated", modal=True, info=str)
def regenerated_pyconfig_h_in(file_paths):
"""Check if pyconfig.h.in has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'pyconfig.h.in' in file_paths else "no"
else:
return "not needed"
def travis(pull_request):
if pull_request == 'false':
print('Not a pull request; skipping')
return
base_branch = get_base_branch()
file_paths = changed_files(base_branch)
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
fixed = []
fixed.extend(normalize_whitespace(python_files))
fixed.extend(normalize_c_whitespace(c_files))
fixed.extend(normalize_docs_whitespace(doc_files))
if not fixed:
print('No whitespace issues found')
else:
print(f'Please fix the {len(fixed)} file(s) with whitespace issues')
print('(on UNIX you can run `make patchcheck` to make the fixes)')
sys.exit(1)
def main():
base_branch = get_base_branch()
file_paths = changed_files(base_branch)
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
fn.endswith(('.rst', '.inc'))]
misc_files = {p for p in file_paths if p.startswith('Misc')}
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(misc_files)
# Misc/NEWS changed.
reported_news(misc_files)
# Regenerated configure, if necessary.
regenerated_configure(file_paths)
# Regenerated pyconfig.h.in, if necessary.
regenerated_pyconfig_h_in(file_paths)
# Test suite run and passed.
if python_files or c_files:
end = " and check for refleaks?" if c_files else "?"
print()
print("Did you run the test suite" + end)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--travis',
help='Perform pass/fail checks')
args = parser.parse_args()
if args.travis:
travis(args.travis)
else:
main()
| 9,850 | 287 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/pysource.py | #!/usr/bin/env python3
"""\
List python source files.
There are three functions to check whether a file is a Python source, listed
here with increasing complexity:
- has_python_ext() checks whether a file name ends in '.py[w]'.
- look_like_python() checks whether the file is not binary and either has
the '.py[w]' extension or the first line contains the word 'python'.
- can_be_compiled() checks whether the file can be compiled by compile().
The file also must be of appropriate size - not bigger than a megabyte.
walk_python_files() recursively lists all Python files under the given directories.
"""
__author__ = "Oleg Broytmann, Georg Brandl"
__all__ = ["has_python_ext", "looks_like_python", "can_be_compiled", "walk_python_files"]
import os, re
binary_re = re.compile(br'[\x00-\x08\x0E-\x1F\x7F]')
debug = False
def print_debug(msg):
if debug: print(msg)
def _open(fullpath):
try:
size = os.stat(fullpath).st_size
except OSError as err: # Permission denied - ignore the file
print_debug("%s: permission denied: %s" % (fullpath, err))
return None
if size > 1024*1024: # too big
print_debug("%s: the file is too big: %d bytes" % (fullpath, size))
return None
try:
return open(fullpath, "rb")
except IOError as err: # Access denied, or a special file - ignore it
print_debug("%s: access denied: %s" % (fullpath, err))
return None
def has_python_ext(fullpath):
return fullpath.endswith(".py") or fullpath.endswith(".pyw")
def looks_like_python(fullpath):
infile = _open(fullpath)
if infile is None:
return False
with infile:
line = infile.readline()
if binary_re.search(line):
# file appears to be binary
print_debug("%s: appears to be binary" % fullpath)
return False
if fullpath.endswith(".py") or fullpath.endswith(".pyw"):
return True
elif b"python" in line:
# disguised Python script (e.g. CGI)
return True
return False
def can_be_compiled(fullpath):
infile = _open(fullpath)
if infile is None:
return False
with infile:
code = infile.read()
try:
compile(code, fullpath, "exec")
except Exception as err:
print_debug("%s: cannot compile: %s" % (fullpath, err))
return False
return True
def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None):
"""\
Recursively yield all Python source files below the given paths.
paths: a list of files and/or directories to be checked.
is_python: a function that takes a file name and checks whether it is a
Python source file
exclude_dirs: a list of directory base names that should be excluded in
the search
"""
if exclude_dirs is None:
exclude_dirs=[]
for path in paths:
print_debug("testing: %s" % path)
if os.path.isfile(path):
if is_python(path):
yield path
elif os.path.isdir(path):
print_debug(" it is a directory")
for dirpath, dirnames, filenames in os.walk(path):
for exclude in exclude_dirs:
if exclude in dirnames:
dirnames.remove(exclude)
for filename in filenames:
fullpath = os.path.join(dirpath, filename)
print_debug("testing: %s" % fullpath)
if is_python(fullpath):
yield fullpath
else:
print_debug(" unknown type")
if __name__ == "__main__":
# Two simple examples/tests
for fullpath in walk_python_files(['.']):
print(fullpath)
print("----------")
for fullpath in walk_python_files(['.'], is_python=can_be_compiled):
print(fullpath)
| 3,864 | 131 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/combinerefs.py | #! /usr/bin/env python3
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_FinalizeEx() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_FinalizeEx; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = open(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print('??? skipped:', line)
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print('??? new object created while tearing down:', line.rstrip())
continue
print(addr, end=' ')
if rc == addr2rc[addr]:
print('[%s]' % rc, end=' ')
else:
print('[%s->%s]' % (addr2rc[addr], rc), end=' ')
print(guts, addr2guts[addr])
f.close()
print("%d objects before, %d after" % (before, after))
if __name__ == '__main__':
combine(sys.argv[1])
| 4,418 | 129 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/find-uname.py | #!/usr/bin/env python3
"""
For each argument on the command line, look for it in the set of all Unicode
names. Arguments are treated as case-insensitive regular expressions, e.g.:
% find-uname 'small letter a$' 'horizontal line'
*** small letter a$ matches ***
LATIN SMALL LETTER A (97)
COMBINING LATIN SMALL LETTER A (867)
CYRILLIC SMALL LETTER A (1072)
PARENTHESIZED LATIN SMALL LETTER A (9372)
CIRCLED LATIN SMALL LETTER A (9424)
FULLWIDTH LATIN SMALL LETTER A (65345)
*** horizontal line matches ***
HORIZONTAL LINE EXTENSION (9135)
"""
import unicodedata
import sys
import re
def main(args):
unicode_names = []
for ix in range(sys.maxunicode+1):
try:
unicode_names.append((ix, unicodedata.name(chr(ix))))
except ValueError: # no name for the character
pass
for arg in args:
pat = re.compile(arg, re.I)
matches = [(y,x) for (x,y) in unicode_names
if pat.search(y) is not None]
if matches:
print("***", arg, "matches", "***")
for match in matches:
print("%s (%d)" % match)
if __name__ == "__main__":
main(sys.argv[1:])
| 1,207 | 41 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/serve.py | #!/usr/bin/env python3
'''
Small wsgiref based web server. Takes a path to serve from and an
optional port number (defaults to 8000), then tries to serve files.
Mime types are guessed from the file names, 404 errors are raised
if the file is not found. Used for the make serve target in Doc.
'''
import sys
import os
import mimetypes
from wsgiref import simple_server, util
def app(environ, respond):
fn = os.path.join(path, environ['PATH_INFO'][1:])
if '.' not in fn.split(os.path.sep)[-1]:
fn = os.path.join(fn, 'index.html')
type = mimetypes.guess_type(fn)[0]
if os.path.exists(fn):
respond('200 OK', [('Content-Type', type)])
return util.FileWrapper(open(fn, "rb"))
else:
respond('404 Not Found', [('Content-Type', 'text/plain')])
return [b'not found']
if __name__ == '__main__':
path = sys.argv[1]
port = int(sys.argv[2]) if len(sys.argv) > 2 else 8000
httpd = simple_server.make_server('', port, app)
print("Serving {} on port {}, control-C to stop".format(path, port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\b\bShutting down.")
| 1,161 | 36 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/pdeps.py | #! /usr/bin/env python3
# pdeps
#
# Find dependencies between a bunch of Python modules.
#
# Usage:
# pdeps file1.py file2.py ...
#
# Output:
# Four tables separated by lines like '--- Closure ---':
# 1) Direct dependencies, listing which module imports which other modules
# 2) The inverse of (1)
# 3) Indirect dependencies, or the closure of the above
# 4) The inverse of (3)
#
# To do:
# - command line options to select output type
# - option to automatically scan the Python library for referenced modules
# - option to limit output to particular modules
import sys
import re
import os
# Main program
#
def main():
args = sys.argv[1:]
if not args:
print('usage: pdeps file.py file.py ...')
return 2
#
table = {}
for arg in args:
process(arg, table)
#
print('--- Uses ---')
printresults(table)
#
print('--- Used By ---')
inv = inverse(table)
printresults(inv)
#
print('--- Closure of Uses ---')
reach = closure(table)
printresults(reach)
#
print('--- Closure of Used By ---')
invreach = inverse(reach)
printresults(invreach)
#
return 0
# Compiled regular expressions to search for import statements
#
m_import = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
m_from = re.compile('^[ \t]*import[ \t]+([^#]+)')
# Collect data from one file
#
def process(filename, table):
fp = open(filename, 'r')
mod = os.path.basename(filename)
if mod[-3:] == '.py':
mod = mod[:-3]
table[mod] = list = []
while 1:
line = fp.readline()
if not line: break
while line[-1:] == '\\':
nextline = fp.readline()
if not nextline: break
line = line[:-1] + nextline
m_found = m_import.match(line) or m_from.match(line)
if m_found:
(a, b), (a1, b1) = m_found.regs[:2]
else: continue
words = line[a1:b1].split(',')
# print '#', line, words
for word in words:
word = word.strip()
if word not in list:
list.append(word)
fp.close()
# Compute closure (this is in fact totally general)
#
def closure(table):
modules = list(table.keys())
#
# Initialize reach with a copy of table
#
reach = {}
for mod in modules:
reach[mod] = table[mod][:]
#
# Iterate until no more change
#
change = 1
while change:
change = 0
for mod in modules:
for mo in reach[mod]:
if mo in modules:
for m in reach[mo]:
if m not in reach[mod]:
reach[mod].append(m)
change = 1
#
return reach
# Invert a table (this is again totally general).
# All keys of the original table are made keys of the inverse,
# so there may be empty lists in the inverse.
#
def inverse(table):
inv = {}
for key in table.keys():
if key not in inv:
inv[key] = []
for item in table[key]:
store(inv, item, key)
return inv
# Store "item" in "dict" under "key".
# The dictionary maps keys to lists of items.
# If there is no list for the key yet, it is created.
#
def store(dict, key, item):
if key in dict:
dict[key].append(item)
else:
dict[key] = [item]
# Tabulate results neatly
#
def printresults(table):
modules = sorted(table.keys())
maxlen = 0
for mod in modules: maxlen = max(maxlen, len(mod))
for mod in modules:
list = sorted(table[mod])
print(mod.ljust(maxlen), ':', end=' ')
if mod in list:
print('(*)', end=' ')
for ref in list:
print(ref, end=' ')
print()
# Call main and honor exit status
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
| 3,915 | 166 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/mailerdaemon.py | #!/usr/bin/env python3
"""Classes to parse mailer-daemon messages."""
import calendar
import email.message
import re
import os
import sys
class Unparseable(Exception):
pass
class ErrorMessage(email.message.Message):
def __init__(self):
email.message.Message.__init__(self)
self.sub = ''
def is_warning(self):
sub = self.get('Subject')
if not sub:
return 0
sub = sub.lower()
if sub.startswith('waiting mail'):
return 1
if 'warning' in sub:
return 1
self.sub = sub
return 0
def get_errors(self):
for p in EMPARSERS:
self.rewindbody()
try:
return p(self.fp, self.sub)
except Unparseable:
pass
raise Unparseable
# List of re's or tuples of re's.
# If a re, it should contain at least a group (?P<email>...) which
# should refer to the email address. The re can also contain a group
# (?P<reason>...) which should refer to the reason (error message).
# If no reason is present, the emparse_list_reason list is used to
# find a reason.
# If a tuple, the tuple should contain 2 re's. The first re finds a
# location, the second re is repeated one or more times to find
# multiple email addresses. The second re is matched (not searched)
# where the previous match ended.
# The re's are compiled using the re module.
emparse_list_list = [
'error: (?P<reason>unresolvable): (?P<email>.+)',
('----- The following addresses had permanent fatal errors -----\n',
'(?P<email>[^ \n].*)\n( .*\n)?'),
'remote execution.*\n.*rmail (?P<email>.+)',
('The following recipients did not receive your message:\n\n',
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
'^<(?P<email>.*)>:\n(?P<reason>.*)',
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
'^Original-Recipient: rfc822;(?P<email>.*)',
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
]
# compile the re's in the list and store them in-place.
for i in range(len(emparse_list_list)):
x = emparse_list_list[i]
if type(x) is type(''):
x = re.compile(x, re.MULTILINE)
else:
xl = []
for x in x:
xl.append(re.compile(x, re.MULTILINE))
x = tuple(xl)
del xl
emparse_list_list[i] = x
del x
del i
# list of re's used to find reasons (error messages).
# if a string, "<>" is replaced by a copy of the email address.
# The expressions are searched for in order. After the first match,
# no more expressions are searched for. So, order is important.
emparse_list_reason = [
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
r'<>\.\.\. (?P<reason>.*)',
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
]
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
def emparse_list(fp, sub):
data = fp.read()
res = emparse_list_from.search(data)
if res is None:
from_index = len(data)
else:
from_index = res.start(0)
errors = []
emails = []
reason = None
for regexp in emparse_list_list:
if type(regexp) is type(()):
res = regexp[0].search(data, 0, from_index)
if res is not None:
try:
reason = res.group('reason')
except IndexError:
pass
while 1:
res = regexp[1].match(data, res.end(0), from_index)
if res is None:
break
emails.append(res.group('email'))
break
else:
res = regexp.search(data, 0, from_index)
if res is not None:
emails.append(res.group('email'))
try:
reason = res.group('reason')
except IndexError:
pass
break
if not emails:
raise Unparseable
if not reason:
reason = sub
if reason[:15] == 'returned mail: ':
reason = reason[15:]
for regexp in emparse_list_reason:
if type(regexp) is type(''):
for i in range(len(emails)-1,-1,-1):
email = emails[i]
exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
res = exp.search(data)
if res is not None:
errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
del emails[i]
continue
res = regexp.search(data)
if res is not None:
reason = res.group('reason')
break
for email in emails:
errors.append(' '.join((email.strip()+': '+reason).split()))
return errors
EMPARSERS = [emparse_list]
def sort_numeric(a, b):
a = int(a)
b = int(b)
if a < b:
return -1
elif a > b:
return 1
else:
return 0
def parsedir(dir, modify):
os.chdir(dir)
pat = re.compile('^[0-9]*$')
errordict = {}
errorfirst = {}
errorlast = {}
nok = nwarn = nbad = 0
# find all numeric file names and sort them
files = list(filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.')))
files.sort(sort_numeric)
for fn in files:
# Lets try to parse the file.
fp = open(fn)
m = email.message_from_file(fp, _class=ErrorMessage)
sender = m.getaddr('From')
print('%s\t%-40s\t'%(fn, sender[1]), end=' ')
if m.is_warning():
fp.close()
print('warning only')
nwarn = nwarn + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
continue
try:
errors = m.get_errors()
except Unparseable:
print('** Not parseable')
nbad = nbad + 1
fp.close()
continue
print(len(errors), 'errors')
# Remember them
for e in errors:
try:
mm, dd = m.getdate('date')[1:1+2]
date = '%s %02d' % (calendar.month_abbr[mm], dd)
except:
date = '??????'
if e not in errordict:
errordict[e] = 1
errorfirst[e] = '%s (%s)' % (fn, date)
else:
errordict[e] = errordict[e] + 1
errorlast[e] = '%s (%s)' % (fn, date)
fp.close()
nok = nok + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
print('--------------')
print(nok, 'files parsed,',nwarn,'files warning-only,', end=' ')
print(nbad,'files unparseable')
print('--------------')
list = []
for e in errordict.keys():
list.append((errordict[e], errorfirst[e], errorlast[e], e))
list.sort()
for num, first, last, e in list:
print('%d %s - %s\t%s' % (num, first, last, e))
def main():
modify = 0
if len(sys.argv) > 1 and sys.argv[1] == '-d':
modify = 1
del sys.argv[1]
if len(sys.argv) > 1:
for folder in sys.argv[1:]:
parsedir(folder, modify)
else:
parsedir('/ufs/jack/Mail/errorsinbox', modify)
if __name__ == '__main__' or sys.argv[0] == __name__:
main()
| 8,040 | 247 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/db2pickle.py | #!/usr/bin/env python3
"""
Synopsis: %(prog)s [-h|-g|-b|-r|-a] dbfile [ picklefile ]
Convert the database file given on the command line to a pickle
representation. The optional flags indicate the type of the database:
-a - open using dbm (any supported format)
-b - open as bsddb btree file
-d - open as dbm file
-g - open as gdbm file
-h - open as bsddb hash file
-r - open as bsddb recno file
The default is hash. If a pickle file is named it is opened for write
access (deleting any existing data). If no pickle file is named, the pickle
output is written to standard output.
"""
import getopt
try:
import bsddb
except ImportError:
bsddb = None
try:
import dbm.ndbm as dbm
except ImportError:
dbm = None
try:
import dbm.gnu as gdbm
except ImportError:
gdbm = None
try:
import dbm.ndbm as anydbm
except ImportError:
anydbm = None
import sys
try:
import pickle as pickle
except ImportError:
import pickle
prog = sys.argv[0]
def usage():
sys.stderr.write(__doc__ % globals())
def main(args):
try:
opts, args = getopt.getopt(args, "hbrdag",
["hash", "btree", "recno", "dbm",
"gdbm", "anydbm"])
except getopt.error:
usage()
return 1
if len(args) == 0 or len(args) > 2:
usage()
return 1
elif len(args) == 1:
dbfile = args[0]
pfile = sys.stdout
else:
dbfile = args[0]
try:
pfile = open(args[1], 'wb')
except IOError:
sys.stderr.write("Unable to open %s\n" % args[1])
return 1
dbopen = None
for opt, arg in opts:
if opt in ("-h", "--hash"):
try:
dbopen = bsddb.hashopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-b", "--btree"):
try:
dbopen = bsddb.btopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-r", "--recno"):
try:
dbopen = bsddb.rnopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-a", "--anydbm"):
try:
dbopen = anydbm.open
except AttributeError:
sys.stderr.write("dbm module unavailable.\n")
return 1
elif opt in ("-g", "--gdbm"):
try:
dbopen = gdbm.open
except AttributeError:
sys.stderr.write("dbm.gnu module unavailable.\n")
return 1
elif opt in ("-d", "--dbm"):
try:
dbopen = dbm.open
except AttributeError:
sys.stderr.write("dbm.ndbm module unavailable.\n")
return 1
if dbopen is None:
if bsddb is None:
sys.stderr.write("bsddb module unavailable - ")
sys.stderr.write("must specify dbtype.\n")
return 1
else:
dbopen = bsddb.hashopen
try:
db = dbopen(dbfile, 'r')
except bsddb.error:
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
for k in db.keys():
pickle.dump((k, db[k]), pfile, 1==1)
db.close()
pfile.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 3,630 | 136 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/gprof2html.py | #! /usr/bin/env python3
"""Transform gprof(1) output into useful HTML."""
import html
import os
import re
import sys
import webbrowser
header = """\
<html>
<head>
<title>gprof output (%s)</title>
</head>
<body>
<pre>
"""
trailer = """\
</pre>
</body>
</html>
"""
def add_escapes(filename):
with open(filename) as fp:
for line in fp:
yield html.escape(line)
def main():
filename = "gprof.out"
if sys.argv[1:]:
filename = sys.argv[1]
outputfilename = filename + ".html"
input = add_escapes(filename)
output = open(outputfilename, "w")
output.write(header % filename)
for line in input:
output.write(line)
if line.startswith(" time"):
break
labels = {}
for line in input:
m = re.match(r"(.* )(\w+)\n", line)
if not m:
output.write(line)
break
stuff, fname = m.group(1, 2)
labels[fname] = fname
output.write('%s<a name="flat:%s" href="#call:%s">%s</a>\n' %
(stuff, fname, fname, fname))
for line in input:
output.write(line)
if line.startswith("index % time"):
break
for line in input:
m = re.match(r"(.* )(\w+)(( <cycle.*>)? \[\d+\])\n", line)
if not m:
output.write(line)
if line.startswith("Index by function name"):
break
continue
prefix, fname, suffix = m.group(1, 2, 3)
if fname not in labels:
output.write(line)
continue
if line.startswith("["):
output.write('%s<a name="call:%s" href="#flat:%s">%s</a>%s\n' %
(prefix, fname, fname, fname, suffix))
else:
output.write('%s<a href="#call:%s">%s</a>%s\n' %
(prefix, fname, fname, suffix))
for line in input:
for part in re.findall(r"(\w+(?:\.c)?|\W+)", line):
if part in labels:
part = '<a href="#call:%s">%s</a>' % (part, part)
output.write(part)
output.write(trailer)
output.close()
webbrowser.open("file:" + os.path.abspath(outputfilename))
if __name__ == '__main__':
main()
| 2,229 | 86 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/generate_opcode_h.py | # This script generates the opcode.h header file.
import sys
header = """/* Auto-generated by Tools/scripts/generate_opcode_h.py */
#ifndef Py_OPCODE_H
#define Py_OPCODE_H
#ifdef __cplusplus
extern "C" {
#endif
/* Instruction opcodes for compiled code */
"""
footer = """
/* EXCEPT_HANDLER is a special, implicit block type which is created when
entering an except handler. It is not an opcode but we define it here
as we want it to be available to both frameobject.c and ceval.c, while
remaining private.*/
#define EXCEPT_HANDLER 257
enum cmp_op {PyCmp_LT=Py_LT, PyCmp_LE=Py_LE, PyCmp_EQ=Py_EQ, PyCmp_NE=Py_NE,
PyCmp_GT=Py_GT, PyCmp_GE=Py_GE, PyCmp_IN, PyCmp_NOT_IN,
PyCmp_IS, PyCmp_IS_NOT, PyCmp_EXC_MATCH, PyCmp_BAD};
#define HAS_ARG(op) ((op) >= HAVE_ARGUMENT)
#ifdef __cplusplus
}
#endif
#endif /* !Py_OPCODE_H */
"""
def main(opcode_py, outfile='Include/opcode.h'):
opcode = {}
exec(open(opcode_py).read(), opcode)
opmap = opcode['opmap']
with open(outfile, 'w') as fobj:
fobj.write(header)
for name in opcode['opname']:
if name in opmap:
fobj.write("#define %-23s %3s\n" % (name, opmap[name]))
if name == 'POP_EXCEPT': # Special entry for HAVE_ARGUMENT
fobj.write("#define %-23s %3d\n" %
('HAVE_ARGUMENT', opcode['HAVE_ARGUMENT']))
fobj.write(footer)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| 1,501 | 53 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/ifdef.py | #! /usr/bin/env python3
# Selectively preprocess #ifdef / #ifndef statements.
# Usage:
# ifdef [-Dname] ... [-Uname] ... [file] ...
#
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
# commands that test for one of the names mentioned in the -D and -U
# options. On standard output it writes a copy of the input file(s)
# minus those code sections that are suppressed by the selected
# combination of defined/undefined symbols. The #if(n)def/#else/#else
# lines themselves (if the #if(n)def tests for one of the mentioned
# names) are removed as well.
# Features: Arbitrary nesting of recognized and unrecognized
# preprocessor statements works correctly. Unrecognized #if* commands
# are left in place, so it will never remove too much, only too
# little. It does accept whitespace around the '#' character.
# Restrictions: There should be no comments or other symbols on the
# #if(n)def lines. The effect of #define/#undef commands in the input
# file or in included files is not taken into account. Tests using
# #if and the defined() pseudo function are not recognized. The #elif
# command is not recognized. Improperly nesting is not detected.
# Lines that look like preprocessor commands but which are actually
# part of comments or string literals will be mistaken for
# preprocessor commands.
import sys
import getopt
defs = []
undefs = []
def main():
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
for o, a in opts:
if o == '-D':
defs.append(a)
if o == '-U':
undefs.append(a)
if not args:
args = ['-']
for filename in args:
if filename == '-':
process(sys.stdin, sys.stdout)
else:
f = open(filename, 'r')
process(f, sys.stdout)
f.close()
def process(fpi, fpo):
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
ok = 1
stack = []
while 1:
line = fpi.readline()
if not line: break
while line[-2:] == '\\\n':
nextline = fpi.readline()
if not nextline: break
line = line + nextline
tmp = line.strip()
if tmp[:1] != '#':
if ok: fpo.write(line)
continue
tmp = tmp[1:].strip()
words = tmp.split()
keyword = words[0]
if keyword not in keywords:
if ok: fpo.write(line)
continue
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
if keyword == 'ifdef':
ko = 1
else:
ko = 0
word = words[1]
if word in defs:
stack.append((ok, ko, word))
if not ko: ok = 0
elif word in undefs:
stack.append((ok, not ko, word))
if ko: ok = 0
else:
stack.append((ok, -1, word))
if ok: fpo.write(line)
elif keyword == 'if':
stack.append((ok, -1, ''))
if ok: fpo.write(line)
elif keyword == 'else' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
else:
s_ko = not s_ko
ok = s_ok
if not s_ko: ok = 0
stack[-1] = s_ok, s_ko, s_word
elif keyword == 'endif' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
del stack[-1]
ok = s_ok
else:
sys.stderr.write('Unknown keyword %s\n' % keyword)
if stack:
sys.stderr.write('stack: %s\n' % stack)
if __name__ == '__main__':
main()
| 3,720 | 113 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/fixdiv.py | #! /usr/bin/env python3
"""fixdiv - tool to fix division operators.
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
This runs the script `yourscript.py' while writing warning messages
about all uses of the classic division operator to the file
`warnings'. The warnings look like this:
<file>:<line>: DeprecationWarning: classic <type> division
The warnings are written to stderr, so you must use `2>' for the I/O
redirect. I know of no way to redirect stderr on Windows in a DOS
box, so you will have to modify the script to set sys.stderr to some
kind of log file if you want to do this on Windows.
The warnings are not limited to the script; modules imported by the
script may also trigger warnings. In fact a useful technique is to
write a test script specifically intended to exercise all code in a
particular module or set of modules.
Then run `python fixdiv.py warnings'. This first reads the warnings,
looking for classic division warnings, and sorts them by file name and
line number. Then, for each file that received at least one warning,
it parses the file and tries to match the warnings up to the division
operators found in the source code. If it is successful, it writes
its findings to stdout, preceded by a line of dashes and a line of the
form:
Index: <file>
If the only findings found are suggestions to change a / operator into
a // operator, the output is acceptable input for the Unix 'patch'
program.
Here are the possible messages on stdout (N stands for a line number):
- A plain-diff-style change ('NcN', a line marked by '<', a line
containing '---', and a line marked by '>'):
A / operator was found that should be changed to //. This is the
recommendation when only int and/or long arguments were seen.
- 'True division / operator at line N' and a line marked by '=':
A / operator was found that can remain unchanged. This is the
recommendation when only float and/or complex arguments were seen.
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
A / operator was found for which int or long as well as float or
complex arguments were seen. This is highly unlikely; if it occurs,
you may have to restructure the code to keep the classic semantics,
or maybe you don't care about the classic semantics.
- 'No conclusive evidence on line N', line marked by '*':
A / operator was found for which no warnings were seen. This could
be code that was never executed, or code that was only executed
with user-defined objects as arguments. You will have to
investigate further. Note that // can be overloaded separately from
/, using __floordiv__. True division can also be separately
overloaded, using __truediv__. Classic division should be the same
as either of those. (XXX should I add a warning for division on
user-defined objects, to disambiguate this case from code that was
never executed?)
- 'Phantom ... warnings for line N', line marked by '*':
A warning was seen for a line not containing a / operator. The most
likely cause is a warning about code executed by 'exec' or eval()
(see note below), or an indirect invocation of the / operator, for
example via the div() function in the operator module. It could
also be caused by a change to the file between the time the test
script was run to collect warnings and the time fixdiv was run.
- 'More than one / operator in line N'; or
'More than one / operator per statement in lines N-N':
The scanner found more than one / operator on a single line, or in a
statement split across multiple lines. Because the warnings
framework doesn't (and can't) show the offset within the line, and
the code generator doesn't always give the correct line number for
operations in a multi-line statement, we can't be sure whether all
operators in the statement were executed. To be on the safe side,
by default a warning is issued about this case. In practice, these
cases are usually safe, and the -m option suppresses these warning.
- 'Can't find the / operator in line N', line marked by '*':
This really shouldn't happen. It means that the tokenize module
reported a '/' operator but the line it returns didn't contain a '/'
character at the indicated position.
- 'Bad warning for line N: XYZ', line marked by '*':
This really shouldn't happen. It means that a 'classic XYZ
division' warning was read with XYZ being something other than
'int', 'long', 'float', or 'complex'.
Notes:
- The augmented assignment operator /= is handled the same way as the
/ operator.
- This tool never looks at the // operator; no warnings are ever
generated for use of this operator.
- This tool never looks at the / operator when a future division
statement is in effect; no warnings are generated in this case, and
because the tool only looks at files for which at least one classic
division warning was seen, it will never look at files containing a
future division statement.
- Warnings may be issued for code not read from a file, but executed
using the exec() or eval() functions. These may have
<string> in the filename position, in which case the fixdiv script
will attempt and fail to open a file named '<string>' and issue a
warning about this failure; or these may be reported as 'Phantom'
warnings (see above). You're on your own to deal with these. You
could make all recommended changes and add a future division
statement to all affected files, and then re-run the test script; it
should not issue any warnings. If there are any, and you have a
hard time tracking down where they are generated, you can use the
-Werror option to force an error instead of a first warning,
generating a traceback.
- The tool should be run from the same directory as that from which
the original script was run, otherwise it won't be able to open
files given by relative pathnames.
"""
import sys
import getopt
import re
import tokenize
multi_ok = 0
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hm")
except getopt.error as msg:
usage(msg)
return 2
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-m":
global multi_ok
multi_ok = 1
if not args:
usage("at least one file argument is required")
return 2
if args[1:]:
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
warnings = readwarnings(args[0])
if warnings is None:
return 1
files = list(warnings.keys())
if not files:
print("No classic division warnings read from", args[0])
return
files.sort()
exit = None
for filename in files:
x = process(filename, warnings[filename])
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
PATTERN = (r"^(.+?):(\d+): DeprecationWarning: "
r"classic (int|long|float|complex) division$")
def readwarnings(warningsfile):
prog = re.compile(PATTERN)
try:
f = open(warningsfile)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return
warnings = {}
while 1:
line = f.readline()
if not line:
break
m = prog.match(line)
if not m:
if line.find("division") >= 0:
sys.stderr.write("Warning: ignored input " + line)
continue
filename, lineno, what = m.groups()
list = warnings.get(filename)
if list is None:
warnings[filename] = list = []
list.append((int(lineno), sys.intern(what)))
f.close()
return warnings
def process(filename, list):
print("-"*70)
assert list # if this fails, readwarnings() is broken
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return 1
print("Index:", filename)
f = FileContext(fp)
list.sort()
index = 0 # list[:index] has been processed, list[index:] is still to do
g = tokenize.generate_tokens(f.readline)
while 1:
startlineno, endlineno, slashes = lineinfo = scanline(g)
if startlineno is None:
break
assert startlineno <= endlineno is not None
orphans = []
while index < len(list) and list[index][0] < startlineno:
orphans.append(list[index])
index += 1
if orphans:
reportphantomwarnings(orphans, f)
warnings = []
while index < len(list) and list[index][0] <= endlineno:
warnings.append(list[index])
index += 1
if not slashes and not warnings:
pass
elif slashes and not warnings:
report(slashes, "No conclusive evidence")
elif warnings and not slashes:
reportphantomwarnings(warnings, f)
else:
if len(slashes) > 1:
if not multi_ok:
rows = []
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
rows.append(row)
lastrow = row
assert rows
if len(rows) == 1:
print("*** More than one / operator in line", rows[0])
else:
print("*** More than one / operator per statement", end=' ')
print("in lines %d-%d" % (rows[0], rows[-1]))
intlong = []
floatcomplex = []
bad = []
for lineno, what in warnings:
if what in ("int", "long"):
intlong.append(what)
elif what in ("float", "complex"):
floatcomplex.append(what)
else:
bad.append(what)
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
lastrow = row
line = chop(line)
if line[col:col+1] != "/":
print("*** Can't find the / operator in line %d:" % row)
print("*", line)
continue
if bad:
print("*** Bad warning for line %d:" % row, bad)
print("*", line)
elif intlong and not floatcomplex:
print("%dc%d" % (row, row))
print("<", line)
print("---")
print(">", line[:col] + "/" + line[col:])
elif floatcomplex and not intlong:
print("True division / operator at line %d:" % row)
print("=", line)
elif intlong and floatcomplex:
print("*** Ambiguous / operator (%s, %s) at line %d:" % (
"|".join(intlong), "|".join(floatcomplex), row))
print("?", line)
fp.close()
def reportphantomwarnings(warnings, f):
blocks = []
lastrow = None
lastblock = None
for row, what in warnings:
if row != lastrow:
lastblock = [row]
blocks.append(lastblock)
lastblock.append(what)
for block in blocks:
row = block[0]
whats = "/".join(block[1:])
print("*** Phantom %s warnings for line %d:" % (whats, row))
f.report(row, mark="*")
def report(slashes, message):
lastrow = None
for (row, col), line in slashes:
if row != lastrow:
print("*** %s on line %d:" % (message, row))
print("*", chop(line))
lastrow = row
class FileContext:
def __init__(self, fp, window=5, lineno=1):
self.fp = fp
self.window = 5
self.lineno = 1
self.eoflookahead = 0
self.lookahead = []
self.buffer = []
def fill(self):
while len(self.lookahead) < self.window and not self.eoflookahead:
line = self.fp.readline()
if not line:
self.eoflookahead = 1
break
self.lookahead.append(line)
def readline(self):
self.fill()
if not self.lookahead:
return ""
line = self.lookahead.pop(0)
self.buffer.append(line)
self.lineno += 1
return line
def __getitem__(self, index):
self.fill()
bufstart = self.lineno - len(self.buffer)
lookend = self.lineno + len(self.lookahead)
if bufstart <= index < self.lineno:
return self.buffer[index - bufstart]
if self.lineno <= index < lookend:
return self.lookahead[index - self.lineno]
raise KeyError
def report(self, first, last=None, mark="*"):
if last is None:
last = first
for i in range(first, last+1):
try:
line = self[first]
except KeyError:
line = "<missing line>"
print(mark, chop(line))
def scanline(g):
slashes = []
startlineno = None
endlineno = None
for type, token, start, end, line in g:
endlineno = end[0]
if startlineno is None:
startlineno = endlineno
if token in ("/", "/="):
slashes.append((start, line))
if type == tokenize.NEWLINE:
break
return startlineno, endlineno, slashes
def chop(line):
if line.endswith("\n"):
return line[:-1]
else:
return line
if __name__ == "__main__":
sys.exit(main())
| 13,882 | 379 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/md5sum.py | #! /usr/bin/env python3
"""Python utility to print MD5 checksums of argument files.
"""
bufsize = 8096
fnfilter = None
rmode = 'rb'
usage = """
usage: md5sum.py [-b] [-t] [-l] [-s bufsize] [file ...]
-b : read files in binary mode (default)
-t : read files in text mode (you almost certainly don't want this!)
-l : print last pathname component only
-s bufsize: read buffer size (default %d)
file ... : files to sum; '-' or no files means stdin
""" % bufsize
import io
import sys
import os
import getopt
from hashlib import md5
def sum(*files):
sts = 0
if files and isinstance(files[-1], io.IOBase):
out, files = files[-1], files[:-1]
else:
out = sys.stdout
if len(files) == 1 and not isinstance(files[0], str):
files = files[0]
for f in files:
if isinstance(f, str):
if f == '-':
sts = printsumfp(sys.stdin, '<stdin>', out) or sts
else:
sts = printsum(f, out) or sts
else:
sts = sum(f, out) or sts
return sts
def printsum(filename, out=sys.stdout):
try:
fp = open(filename, rmode)
except IOError as msg:
sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
return 1
if fnfilter:
filename = fnfilter(filename)
sts = printsumfp(fp, filename, out)
fp.close()
return sts
def printsumfp(fp, filename, out=sys.stdout):
m = md5()
try:
while 1:
data = fp.read(bufsize)
if not data:
break
if isinstance(data, str):
data = data.encode(fp.encoding)
m.update(data)
except IOError as msg:
sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
return 1
out.write('%s %s\n' % (m.hexdigest(), filename))
return 0
def main(args = sys.argv[1:], out=sys.stdout):
global fnfilter, rmode, bufsize
try:
opts, args = getopt.getopt(args, 'blts:')
except getopt.error as msg:
sys.stderr.write('%s: %s\n%s' % (sys.argv[0], msg, usage))
return 2
for o, a in opts:
if o == '-l':
fnfilter = os.path.basename
elif o == '-b':
rmode = 'rb'
elif o == '-t':
rmode = 'r'
elif o == '-s':
bufsize = int(a)
if not args:
args = ['-']
return sum(args, out)
if __name__ == '__main__' or __name__ == sys.argv[0]:
sys.exit(main(sys.argv[1:], sys.stdout))
| 2,508 | 94 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/dutree.doc | Path: cwi.nl!sun4nl!mcsun!uunet!cs.utexas.edu!convex!usenet
From: [email protected] (Tom Christiansen)
Newsgroups: comp.lang.perl
Subject: Re: The problems of Perl (Re: Question (silly?))
Message-ID: <[email protected]>
Date: 17 Jan 92 05:31:15 GMT
References: <[email protected]> <[email protected]> <=#[email protected]>
Sender: [email protected] (news access account)
Reply-To: [email protected] (Tom Christiansen)
Organization: CONVEX Realtime Development, Colorado Springs, CO
Lines: 83
Nntp-Posting-Host: pixel.convex.com
From the keyboard of [email protected] (Felix Lee):
:And Perl is definitely awkward with data types. I haven't yet found a
:pleasant way of shoving non-trivial data types into Perl's grammar.
Yes, it's pretty aweful at that, alright. Sometimes I write perl programs
that need them, and sometimes it just takes a little creativity. But
sometimes it's not worth it. I actually wrote a C program the other day
(gasp) because I didn't want to deal with a game matrix with six links per node.
:Here's a very simple problem that's tricky to express in Perl: process
:the output of "du" to produce output that's indented to reflect the
:tree structure, and with each subtree sorted by size. Something like:
: 434 /etc
: | 344 .
: | 50 install
: | 35 uucp
: | 3 nserve
: | | 2 .
: | | 1 auth.info
: | 1 sm
: | 1 sm.bak
At first I thought I could just keep one local list around
at once, but this seems inherently recursive. Which means
I need an real recursive data structure. Maybe you could
do it with one of the %assoc arrays Larry uses in the begat
programs, but I broke down and got dirty. I think the hardest
part was matching Felix's desired output exactly. It's not
blazingly fast: I should probably inline the &childof routine,
but it *was* faster to write than I could have written the
equivalent C program.
--tom
--
"GUIs normally make it simple to accomplish simple actions and impossible
to accomplish complex actions." --Doug Gwyn (22/Jun/91 in comp.unix.wizards)
Tom Christiansen [email protected] convex!tchrist
| 2,237 | 55 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/mkreal.py | #! /usr/bin/env python3
# mkreal
#
# turn a symlink to a directory into a real directory
import sys
import os
from stat import *
join = os.path.join
error = 'mkreal error'
BUFSIZE = 32*1024
def mkrealfile(name):
st = os.stat(name) # Get the mode
mode = S_IMODE(st[ST_MODE])
linkto = os.readlink(name) # Make sure again it's a symlink
f_in = open(name, 'r') # This ensures it's a file
os.unlink(name)
f_out = open(name, 'w')
while 1:
buf = f_in.read(BUFSIZE)
if not buf: break
f_out.write(buf)
del f_out # Flush data to disk before changing mode
os.chmod(name, mode)
def mkrealdir(name):
st = os.stat(name) # Get the mode
mode = S_IMODE(st[ST_MODE])
linkto = os.readlink(name)
files = os.listdir(name)
os.unlink(name)
os.mkdir(name, mode)
os.chmod(name, mode)
linkto = join(os.pardir, linkto)
#
for filename in files:
if filename not in (os.curdir, os.pardir):
os.symlink(join(linkto, filename), join(name, filename))
def main():
sys.stdout = sys.stderr
progname = os.path.basename(sys.argv[0])
if progname == '-c': progname = 'mkreal'
args = sys.argv[1:]
if not args:
print('usage:', progname, 'path ...')
sys.exit(2)
status = 0
for name in args:
if not os.path.islink(name):
print(progname+':', name+':', 'not a symlink')
status = 1
else:
if os.path.isdir(name):
mkrealdir(name)
else:
mkrealfile(name)
sys.exit(status)
if __name__ == '__main__':
main()
| 1,631 | 67 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/idle3 | #! /usr/bin/env python3
from idlelib.pyshell import main
if __name__ == '__main__':
main()
| 96 | 6 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/pickle2db.py | #!/usr/bin/env python3
"""
Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
Read the given picklefile as a series of key/value pairs and write to a new
database. If the database already exists, any contents are deleted. The
optional flags indicate the type of the output database:
-a - open using dbm (open any supported format)
-b - open as bsddb btree file
-d - open as dbm.ndbm file
-g - open as dbm.gnu file
-h - open as bsddb hash file
-r - open as bsddb recno file
The default is hash. If a pickle file is named it is opened for read
access. If no pickle file is named, the pickle input is read from standard
input.
Note that recno databases can only contain integer keys, so you can't dump a
hash or btree database using db2pickle.py and reconstitute it to a recno
database with %(prog)s unless your keys are integers.
"""
import getopt
try:
import bsddb
except ImportError:
bsddb = None
try:
import dbm.ndbm as dbm
except ImportError:
dbm = None
try:
import dbm.gnu as gdbm
except ImportError:
gdbm = None
try:
import dbm.ndbm as anydbm
except ImportError:
anydbm = None
import sys
try:
import pickle as pickle
except ImportError:
import pickle
prog = sys.argv[0]
def usage():
sys.stderr.write(__doc__ % globals())
def main(args):
try:
opts, args = getopt.getopt(args, "hbrdag",
["hash", "btree", "recno", "dbm", "anydbm",
"gdbm"])
except getopt.error:
usage()
return 1
if len(args) == 0 or len(args) > 2:
usage()
return 1
elif len(args) == 1:
pfile = sys.stdin
dbfile = args[0]
else:
try:
pfile = open(args[0], 'rb')
except IOError:
sys.stderr.write("Unable to open %s\n" % args[0])
return 1
dbfile = args[1]
dbopen = None
for opt, arg in opts:
if opt in ("-h", "--hash"):
try:
dbopen = bsddb.hashopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-b", "--btree"):
try:
dbopen = bsddb.btopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-r", "--recno"):
try:
dbopen = bsddb.rnopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-a", "--anydbm"):
try:
dbopen = anydbm.open
except AttributeError:
sys.stderr.write("dbm module unavailable.\n")
return 1
elif opt in ("-g", "--gdbm"):
try:
dbopen = gdbm.open
except AttributeError:
sys.stderr.write("dbm.gnu module unavailable.\n")
return 1
elif opt in ("-d", "--dbm"):
try:
dbopen = dbm.open
except AttributeError:
sys.stderr.write("dbm.ndbm module unavailable.\n")
return 1
if dbopen is None:
if bsddb is None:
sys.stderr.write("bsddb module unavailable - ")
sys.stderr.write("must specify dbtype.\n")
return 1
else:
dbopen = bsddb.hashopen
try:
db = dbopen(dbfile, 'c')
except bsddb.error:
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
else:
for k in list(db.keys()):
del db[k]
while 1:
try:
(key, val) = pickle.load(pfile)
except EOFError:
break
db[key] = val
db.close()
pfile.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 4,021 | 148 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/checkpyc.py | #! /usr/bin/env python3
# Check that all ".pyc" files exist and are up-to-date
# Uses module 'os'
import sys
import os
from stat import ST_MTIME
import importlib.util
# PEP 3147 compatibility (PYC Repository Directories)
cache_from_source = (importlib.util.cache_from_source if sys.implementation.cache_tag
else lambda path: path + 'c')
def main():
if len(sys.argv) > 1:
verbose = (sys.argv[1] == '-v')
silent = (sys.argv[1] == '-s')
else:
verbose = silent = False
MAGIC = importlib.util.MAGIC_NUMBER
if not silent:
print('Using MAGIC word', repr(MAGIC))
for dirname in sys.path:
try:
names = os.listdir(dirname)
except OSError:
print('Cannot list directory', repr(dirname))
continue
if not silent:
print('Checking ', repr(dirname), '...')
for name in sorted(names):
if name.endswith('.py'):
name = os.path.join(dirname, name)
try:
st = os.stat(name)
except OSError:
print('Cannot stat', repr(name))
continue
if verbose:
print('Check', repr(name), '...')
name_c = cache_from_source(name)
try:
with open(name_c, 'rb') as f:
magic_str = f.read(4)
mtime_str = f.read(4)
except IOError:
print('Cannot open', repr(name_c))
continue
if magic_str != MAGIC:
print('Bad MAGIC word in ".pyc" file', end=' ')
print(repr(name_c))
continue
mtime = get_long(mtime_str)
if mtime in {0, -1}:
print('Bad ".pyc" file', repr(name_c))
elif mtime != st[ST_MTIME]:
print('Out-of-date ".pyc" file', end=' ')
print(repr(name_c))
def get_long(s):
if len(s) != 4:
return -1
return s[0] + (s[1] << 8) + (s[2] << 16) + (s[3] << 24)
if __name__ == '__main__':
main()
| 2,215 | 70 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/pyvenv | #!/usr/bin/env python3
if __name__ == '__main__':
import sys
import pathlib
executable = pathlib.Path(sys.executable or 'python3').name
print('WARNING: the pyenv script is deprecated in favour of '
f'`{executable} -m venv`', file=sys.stderr)
rc = 1
try:
import venv
venv.main()
rc = 0
except Exception as e:
print('Error: %s' % e, file=sys.stderr)
sys.exit(rc)
| 437 | 18 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/finddiv.py | #! /usr/bin/env python3
"""finddiv - a grep-like tool that looks for division operators.
Usage: finddiv [-l] file_or_directory ...
For directory arguments, all files in the directory whose name ends in
.py are processed, and subdirectories are processed recursively.
This actually tokenizes the files to avoid false hits in comments or
strings literals.
By default, this prints all lines containing a / or /= operator, in
grep -n style. With the -l option specified, it prints the filename
of files that contain at least one / or /= operator.
"""
import os
import sys
import getopt
import tokenize
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "lh")
except getopt.error as msg:
usage(msg)
return 2
if not args:
usage("at least one file argument is required")
return 2
listnames = 0
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-l":
listnames = 1
exit = None
for filename in args:
x = process(filename, listnames)
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
def process(filename, listnames):
if os.path.isdir(filename):
return processdir(filename, listnames)
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("Can't open: %s\n" % msg)
return 1
g = tokenize.generate_tokens(fp.readline)
lastrow = None
for type, token, (row, col), end, line in g:
if token in ("/", "/="):
if listnames:
print(filename)
break
if row != lastrow:
lastrow = row
print("%s:%d:%s" % (filename, row, line), end=' ')
fp.close()
def processdir(dir, listnames):
try:
names = os.listdir(dir)
except OSError as msg:
sys.stderr.write("Can't list directory: %s\n" % dir)
return 1
files = []
for name in names:
fn = os.path.join(dir, name)
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
files.append(fn)
files.sort(key=os.path.normcase)
exit = None
for fn in files:
x = process(fn, listnames)
exit = exit or x
return exit
if __name__ == "__main__":
sys.exit(main())
| 2,497 | 90 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/byext.py | #! /usr/bin/env python3
"""Show file statistics by extension."""
import os
import sys
class Stats:
def __init__(self):
self.stats = {}
def statargs(self, args):
for arg in args:
if os.path.isdir(arg):
self.statdir(arg)
elif os.path.isfile(arg):
self.statfile(arg)
else:
sys.stderr.write("Can't find %s\n" % arg)
self.addstats("<???>", "unknown", 1)
def statdir(self, dir):
self.addstats("<dir>", "dirs", 1)
try:
names = os.listdir(dir)
except OSError as err:
sys.stderr.write("Can't list %s: %s\n" % (dir, err))
self.addstats("<dir>", "unlistable", 1)
return
for name in sorted(names):
if name.startswith(".#"):
continue # Skip CVS temp files
if name.endswith("~"):
continue # Skip Emacs backup files
full = os.path.join(dir, name)
if os.path.islink(full):
self.addstats("<lnk>", "links", 1)
elif os.path.isdir(full):
self.statdir(full)
else:
self.statfile(full)
def statfile(self, filename):
head, ext = os.path.splitext(filename)
head, base = os.path.split(filename)
if ext == base:
ext = "" # E.g. .cvsignore is deemed not to have an extension
ext = os.path.normcase(ext)
if not ext:
ext = "<none>"
self.addstats(ext, "files", 1)
try:
with open(filename, "rb") as f:
data = f.read()
except IOError as err:
sys.stderr.write("Can't open %s: %s\n" % (filename, err))
self.addstats(ext, "unopenable", 1)
return
self.addstats(ext, "bytes", len(data))
if b'\0' in data:
self.addstats(ext, "binary", 1)
return
if not data:
self.addstats(ext, "empty", 1)
# self.addstats(ext, "chars", len(data))
lines = str(data, "latin-1").splitlines()
self.addstats(ext, "lines", len(lines))
del lines
words = data.split()
self.addstats(ext, "words", len(words))
def addstats(self, ext, key, n):
d = self.stats.setdefault(ext, {})
d[key] = d.get(key, 0) + n
def report(self):
exts = sorted(self.stats)
# Get the column keys
columns = {}
for ext in exts:
columns.update(self.stats[ext])
cols = sorted(columns)
colwidth = {}
colwidth["ext"] = max([len(ext) for ext in exts])
minwidth = 6
self.stats["TOTAL"] = {}
for col in cols:
total = 0
cw = max(minwidth, len(col))
for ext in exts:
value = self.stats[ext].get(col)
if value is None:
w = 0
else:
w = len("%d" % value)
total += value
cw = max(cw, w)
cw = max(cw, len(str(total)))
colwidth[col] = cw
self.stats["TOTAL"][col] = total
exts.append("TOTAL")
for ext in exts:
self.stats[ext]["ext"] = ext
cols.insert(0, "ext")
def printheader():
for col in cols:
print("%*s" % (colwidth[col], col), end=' ')
print()
printheader()
for ext in exts:
for col in cols:
value = self.stats[ext].get(col, "")
print("%*s" % (colwidth[col], value), end=' ')
print()
printheader() # Another header at the bottom
def main():
args = sys.argv[1:]
if not args:
args = [os.curdir]
s = Stats()
s.statargs(args)
s.report()
if __name__ == "__main__":
main()
| 3,916 | 133 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/update_file.py | """
A script that replaces an old file with a new one, only if the contents
actually changed. If not, the new file is simply deleted.
This avoids wholesale rebuilds when a code (re)generation phase does not
actually change the in-tree generated code.
"""
import os
import sys
def main(old_path, new_path):
with open(old_path, 'rb') as f:
old_contents = f.read()
with open(new_path, 'rb') as f:
new_contents = f.read()
if old_contents != new_contents:
os.replace(new_path, old_path)
else:
os.unlink(new_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: %s <path to be updated> <path with new contents>" % (sys.argv[0],))
sys.exit(1)
main(sys.argv[1], sys.argv[2])
| 762 | 29 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/fixps.py | #!/usr/bin/env python3
# Fix Python script(s) to reference the interpreter via /usr/bin/env python.
# Warning: this overwrites the file without making a backup.
import sys
import re
def main():
for filename in sys.argv[1:]:
try:
f = open(filename, 'r')
except IOError as msg:
print(filename, ': can\'t open :', msg)
continue
line = f.readline()
if not re.match('^#! */usr/local/bin/python', line):
print(filename, ': not a /usr/local/bin/python script')
f.close()
continue
rest = f.read()
f.close()
line = re.sub('/usr/local/bin/python',
'/usr/bin/env python', line)
print(filename, ':', repr(line))
f = open(filename, "w")
f.write(line)
f.write(rest)
f.close()
if __name__ == '__main__':
main()
| 899 | 34 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/find_recursionlimit.py | #! /usr/bin/env python3
"""Find the maximum recursion limit that prevents interpreter termination.
This script finds the maximum safe recursion limit on a particular
platform. If you need to change the recursion limit on your system,
this script will tell you a safe upper bound. To use the new limit,
call sys.setrecursionlimit().
This module implements several ways to create infinite recursion in
Python. Different implementations end up pushing different numbers of
C stack frames, depending on how many calls through Python's abstract
C API occur.
After each round of tests, it prints a message:
"Limit of NNNN is fine".
The highest printed value of "NNNN" is therefore the highest potentially
safe limit for your system (which depends on the OS, architecture, but also
the compilation flags). Please note that it is practically impossible to
test all possible recursion paths in the interpreter, so the results of
this test should not be trusted blindly -- although they give a good hint
of which values are reasonable.
NOTE: When the C stack space allocated by your system is exceeded due
to excessive recursion, exact behaviour depends on the platform, although
the interpreter will always fail in a likely brutal way: either a
segmentation fault, a MemoryError, or just a silent abort.
NB: A program that does not use __methods__ can set a higher limit.
"""
import sys
import itertools
class RecursiveBlowup1:
def __init__(self):
self.__init__()
def test_init():
return RecursiveBlowup1()
class RecursiveBlowup2:
def __repr__(self):
return repr(self)
def test_repr():
return repr(RecursiveBlowup2())
class RecursiveBlowup4:
def __add__(self, x):
return x + self
def test_add():
return RecursiveBlowup4() + RecursiveBlowup4()
class RecursiveBlowup5:
def __getattr__(self, attr):
return getattr(self, attr)
def test_getattr():
return RecursiveBlowup5().attr
class RecursiveBlowup6:
def __getitem__(self, item):
return self[item - 2] + self[item - 1]
def test_getitem():
return RecursiveBlowup6()[5]
def test_recurse():
return test_recurse()
def test_cpickle(_cache={}):
import io
try:
import _pickle
except ImportError:
print("cannot import _pickle, skipped!")
return
k, l = None, None
for n in itertools.count():
try:
l = _cache[n]
continue # Already tried and it works, let's save some time
except KeyError:
for i in range(100):
l = [k, l]
k = {i: l}
_pickle.Pickler(io.BytesIO(), protocol=-1).dump(l)
_cache[n] = l
def test_compiler_recursion():
# The compiler uses a scaling factor to support additional levels
# of recursion. This is a sanity check of that scaling to ensure
# it still raises RecursionError even at higher recursion limits
compile("()" * (10 * sys.getrecursionlimit()), "<single>", "single")
def check_limit(n, test_func_name):
sys.setrecursionlimit(n)
if test_func_name.startswith("test_"):
print(test_func_name[5:])
else:
print(test_func_name)
test_func = globals()[test_func_name]
try:
test_func()
# AttributeError can be raised because of the way e.g. PyDict_GetItem()
# silences all exceptions and returns NULL, which is usually interpreted
# as "missing attribute".
except (RecursionError, AttributeError):
pass
else:
print("Yikes!")
if __name__ == '__main__':
limit = 1000
while 1:
check_limit(limit, "test_recurse")
check_limit(limit, "test_add")
check_limit(limit, "test_repr")
check_limit(limit, "test_init")
check_limit(limit, "test_getattr")
check_limit(limit, "test_getitem")
check_limit(limit, "test_cpickle")
check_limit(limit, "test_compiler_recursion")
print("Limit of %d is fine" % limit)
limit = limit + 100
| 3,995 | 129 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/findnocoding.py | #!/usr/bin/env python3
"""List all those Python files that require a coding directive
Usage: findnocoding.py dir1 [dir2...]
"""
__author__ = "Oleg Broytmann, Georg Brandl"
import sys, os, re, getopt
# our pysource module finds Python source files
try:
import pysource
except ImportError:
# emulate the module with a simple os.walk
class pysource:
has_python_ext = looks_like_python = can_be_compiled = None
def walk_python_files(self, paths, *args, **kwargs):
for path in paths:
if os.path.isfile(path):
yield path.endswith(".py")
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".py"):
yield os.path.join(root, filename)
pysource = pysource()
print("The pysource module is not available; "
"no sophisticated Python source file search will be done.", file=sys.stderr)
decl_re = re.compile(rb'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)')
blank_re = re.compile(rb'^[ \t\f]*(?:[#\r\n]|$)')
def get_declaration(line):
match = decl_re.match(line)
if match:
return match.group(1)
return b''
def has_correct_encoding(text, codec):
try:
str(text, codec)
except UnicodeDecodeError:
return False
else:
return True
def needs_declaration(fullpath):
try:
infile = open(fullpath, 'rb')
except IOError: # Oops, the file was removed - ignore it
return None
with infile:
line1 = infile.readline()
line2 = infile.readline()
if (get_declaration(line1) or
blank_re.match(line1) and get_declaration(line2)):
# the file does have an encoding declaration, so trust it
return False
# check the whole file for non utf-8 characters
rest = infile.read()
if has_correct_encoding(line1+line2+rest, "utf-8"):
return False
return True
usage = """Usage: %s [-cd] paths...
-c: recognize Python source files trying to compile them
-d: debug output""" % sys.argv[0]
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'cd')
except getopt.error as msg:
print(msg, file=sys.stderr)
print(usage, file=sys.stderr)
sys.exit(1)
is_python = pysource.looks_like_python
debug = False
for o, a in opts:
if o == '-c':
is_python = pysource.can_be_compiled
elif o == '-d':
debug = True
if not args:
print(usage, file=sys.stderr)
sys.exit(1)
for fullpath in pysource.walk_python_files(args, is_python):
if debug:
print("Testing for coding: %s" % fullpath)
result = needs_declaration(fullpath)
if result:
print(fullpath)
| 2,952 | 108 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/checkpip.py | #!/usr/bin/env python3
"""
Checks that the version of the projects bundled in ensurepip are the latest
versions available.
"""
import ensurepip
import json
import urllib.request
import sys
def main():
outofdate = False
for project, version in ensurepip._PROJECTS:
data = json.loads(urllib.request.urlopen(
"https://pypi.org/pypi/{}/json".format(project),
cadefault=True,
).read().decode("utf8"))
upstream_version = data["info"]["version"]
if version != upstream_version:
outofdate = True
print("The latest version of {} on PyPI is {}, but ensurepip "
"has {}".format(project, upstream_version, version))
if outofdate:
sys.exit(1)
if __name__ == "__main__":
main()
| 793 | 33 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/parseentities.py | #!/usr/bin/env python3
""" Utility for parsing HTML entity definitions available from:
http://www.w3.org/ as e.g.
http://www.w3.org/TR/REC-html40/HTMLlat1.ent
Input is read from stdin, output is written to stdout in form of a
Python snippet defining a dictionary "entitydefs" mapping literal
entity name to character or numeric entity.
Marc-Andre Lemburg, [email protected], 1999.
Use as you like. NO WARRANTIES.
"""
import re,sys
entityRE = re.compile(r'<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
def parse(text,pos=0,endpos=None):
pos = 0
if endpos is None:
endpos = len(text)
d = {}
while 1:
m = entityRE.search(text,pos,endpos)
if not m:
break
name,charcode,comment = m.groups()
d[name] = charcode,comment
pos = m.end()
return d
def writefile(f,defs):
f.write("entitydefs = {\n")
items = sorted(defs.items())
for name, (charcode,comment) in items:
if charcode[:2] == '&#':
code = int(charcode[2:-1])
if code < 256:
charcode = r"'\%o'" % code
else:
charcode = repr(charcode)
else:
charcode = repr(charcode)
comment = ' '.join(comment.split())
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
f.write('\n}\n')
if __name__ == '__main__':
if len(sys.argv) > 1:
infile = open(sys.argv[1])
else:
infile = sys.stdin
if len(sys.argv) > 2:
outfile = open(sys.argv[2],'w')
else:
outfile = sys.stdout
text = infile.read()
defs = parse(text)
writefile(outfile,defs)
| 1,695 | 63 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/fixheader.py | #! /usr/bin/env python3
# Add some standard cpp magic to a header file
import sys
def main():
args = sys.argv[1:]
for filename in args:
process(filename)
def process(filename):
try:
f = open(filename, 'r')
except IOError as msg:
sys.stderr.write('%s: can\'t open: %s\n' % (filename, str(msg)))
return
data = f.read()
f.close()
if data[:2] != '/*':
sys.stderr.write('%s does not begin with C comment\n' % filename)
return
try:
f = open(filename, 'w')
except IOError as msg:
sys.stderr.write('%s: can\'t write: %s\n' % (filename, str(msg)))
return
sys.stderr.write('Processing %s ...\n' % filename)
magic = 'Py_'
for c in filename:
if ord(c)<=0x80 and c.isalnum():
magic = magic + c.upper()
else: magic = magic + '_'
sys.stdout = f
print('#ifndef', magic)
print('#define', magic)
print('#ifdef __cplusplus')
print('extern "C" {')
print('#endif')
print()
f.write(data)
print()
print('#ifdef __cplusplus')
print('}')
print('#endif')
print('#endif /*', '!'+magic, '*/')
if __name__ == '__main__':
main()
| 1,208 | 50 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/fixnotice.py | #! /usr/bin/env python3
"""(Ostensibly) fix copyright notices in files.
Actually, this script will simply replace a block of text in a file from one
string to another. It will only do this once though, i.e. not globally
throughout the file. It writes a backup file and then does an os.rename()
dance for atomicity.
Usage: fixnotices.py [options] [filenames]
Options:
-h / --help
Print this message and exit
--oldnotice=file
Use the notice in the file as the old (to be replaced) string, instead
of the hard coded value in the script.
--newnotice=file
Use the notice in the file as the new (replacement) string, instead of
the hard coded value in the script.
--dry-run
Don't actually make the changes, but print out the list of files that
would change. When used with -v, a status will be printed for every
file.
-v / --verbose
Print a message for every file looked at, indicating whether the file
is changed or not.
"""
OLD_NOTICE = """/***********************************************************
Copyright (c) 2000, BeOpen.com.
Copyright (c) 1995-2000, Corporation for National Research Initiatives.
Copyright (c) 1990-1995, Stichting Mathematisch Centrum.
All rights reserved.
See the file "Misc/COPYRIGHT" for information on usage and
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
******************************************************************/
"""
import os
import sys
import getopt
NEW_NOTICE = ""
DRYRUN = 0
VERBOSE = 0
def usage(code, msg=''):
print(__doc__ % globals())
if msg:
print(msg)
sys.exit(code)
def main():
global DRYRUN, OLD_NOTICE, NEW_NOTICE, VERBOSE
try:
opts, args = getopt.getopt(sys.argv[1:], 'hv',
['help', 'oldnotice=', 'newnotice=',
'dry-run', 'verbose'])
except getopt.error as msg:
usage(1, msg)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-v', '--verbose'):
VERBOSE = 1
elif opt == '--dry-run':
DRYRUN = 1
elif opt == '--oldnotice':
fp = open(arg)
OLD_NOTICE = fp.read()
fp.close()
elif opt == '--newnotice':
fp = open(arg)
NEW_NOTICE = fp.read()
fp.close()
for arg in args:
process(arg)
def process(file):
f = open(file)
data = f.read()
f.close()
i = data.find(OLD_NOTICE)
if i < 0:
if VERBOSE:
print('no change:', file)
return
elif DRYRUN or VERBOSE:
print(' change:', file)
if DRYRUN:
# Don't actually change the file
return
data = data[:i] + NEW_NOTICE + data[i+len(OLD_NOTICE):]
new = file + ".new"
backup = file + ".bak"
f = open(new, "w")
f.write(data)
f.close()
os.rename(file, backup)
os.rename(new, file)
if __name__ == '__main__':
main()
| 3,059 | 114 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/analyze_dxp.py | """
Some helper functions to analyze the output of sys.getdxp() (which is
only available if Python was built with -DDYNAMIC_EXECUTION_PROFILE).
These will tell you which opcodes have been executed most frequently
in the current process, and, if Python was also built with -DDXPAIRS,
will tell you which instruction _pairs_ were executed most frequently,
which may help in choosing new instructions.
If Python was built without -DDYNAMIC_EXECUTION_PROFILE, importing
this module will raise a RuntimeError.
If you're running a script you want to profile, a simple way to get
the common pairs is:
$ PYTHONPATH=$PYTHONPATH:<python_srcdir>/Tools/scripts \
./python -i -O the_script.py --args
...
> from analyze_dxp import *
> s = render_common_pairs()
> open('/tmp/some_file', 'w').write(s)
"""
import copy
import opcode
import operator
import sys
import threading
if not hasattr(sys, "getdxp"):
raise RuntimeError("Can't import analyze_dxp: Python built without"
" -DDYNAMIC_EXECUTION_PROFILE.")
_profile_lock = threading.RLock()
_cumulative_profile = sys.getdxp()
# If Python was built with -DDXPAIRS, sys.getdxp() returns a list of
# lists of ints. Otherwise it returns just a list of ints.
def has_pairs(profile):
"""Returns True if the Python that produced the argument profile
was built with -DDXPAIRS."""
return len(profile) > 0 and isinstance(profile[0], list)
def reset_profile():
"""Forgets any execution profile that has been gathered so far."""
with _profile_lock:
sys.getdxp() # Resets the internal profile
global _cumulative_profile
_cumulative_profile = sys.getdxp() # 0s out our copy.
def merge_profile():
"""Reads sys.getdxp() and merges it into this module's cached copy.
We need this because sys.getdxp() 0s itself every time it's called."""
with _profile_lock:
new_profile = sys.getdxp()
if has_pairs(new_profile):
for first_inst in range(len(_cumulative_profile)):
for second_inst in range(len(_cumulative_profile[first_inst])):
_cumulative_profile[first_inst][second_inst] += (
new_profile[first_inst][second_inst])
else:
for inst in range(len(_cumulative_profile)):
_cumulative_profile[inst] += new_profile[inst]
def snapshot_profile():
"""Returns the cumulative execution profile until this call."""
with _profile_lock:
merge_profile()
return copy.deepcopy(_cumulative_profile)
def common_instructions(profile):
"""Returns the most common opcodes in order of descending frequency.
The result is a list of tuples of the form
(opcode, opname, # of occurrences)
"""
if has_pairs(profile) and profile:
inst_list = profile[-1]
else:
inst_list = profile
result = [(op, opcode.opname[op], count)
for op, count in enumerate(inst_list)
if count > 0]
result.sort(key=operator.itemgetter(2), reverse=True)
return result
def common_pairs(profile):
"""Returns the most common opcode pairs in order of descending frequency.
The result is a list of tuples of the form
((1st opcode, 2nd opcode),
(1st opname, 2nd opname),
# of occurrences of the pair)
"""
if not has_pairs(profile):
return []
result = [((op1, op2), (opcode.opname[op1], opcode.opname[op2]), count)
# Drop the row of single-op profiles with [:-1]
for op1, op1profile in enumerate(profile[:-1])
for op2, count in enumerate(op1profile)
if count > 0]
result.sort(key=operator.itemgetter(2), reverse=True)
return result
def render_common_pairs(profile=None):
"""Renders the most common opcode pairs to a string in order of
descending frequency.
The result is a series of lines of the form:
# of occurrences: ('1st opname', '2nd opname')
"""
if profile is None:
profile = snapshot_profile()
def seq():
for _, ops, count in common_pairs(profile):
yield "%s: %s\n" % (count, ops)
return ''.join(seq())
| 4,183 | 130 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/scripts/copytime.py | #! /usr/bin/env python3
# Copy one file's atime and mtime to another
import sys
import os
from stat import ST_ATIME, ST_MTIME # Really constants 7 and 8
def main():
if len(sys.argv) != 3:
sys.stderr.write('usage: copytime source destination\n')
sys.exit(2)
file1, file2 = sys.argv[1], sys.argv[2]
try:
stat1 = os.stat(file1)
except OSError:
sys.stderr.write(file1 + ': cannot stat\n')
sys.exit(1)
try:
os.utime(file2, (stat1[ST_ATIME], stat1[ST_MTIME]))
except OSError:
sys.stderr.write(file2 + ': cannot change time\n')
sys.exit(2)
if __name__ == '__main__':
main()
| 663 | 27 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/iobench/iobench.py | # -*- coding: utf-8 -*-
# This file should be kept compatible with both Python 2.6 and Python >= 3.0.
import itertools
import os
import platform
import re
import sys
import time
from optparse import OptionParser
out = sys.stdout
TEXT_ENCODING = 'utf8'
NEWLINES = 'lf'
# Compatibility
try:
xrange
except NameError:
xrange = range
def text_open(fn, mode, encoding=None):
try:
return open(fn, mode, encoding=encoding or TEXT_ENCODING)
except TypeError:
if 'r' in mode:
mode += 'U' # 'U' mode is needed only in Python 2.x
return open(fn, mode)
def get_file_sizes():
for s in ['20 KB', '400 KB', '10 MB']:
size, unit = s.split()
size = int(size) * {'KB': 1024, 'MB': 1024 ** 2}[unit]
yield s.replace(' ', ''), size
def get_binary_files():
return ((name + ".bin", size) for name, size in get_file_sizes())
def get_text_files():
return (("%s-%s-%s.txt" % (name, TEXT_ENCODING, NEWLINES), size)
for name, size in get_file_sizes())
def with_open_mode(mode):
def decorate(f):
f.file_open_mode = mode
return f
return decorate
def with_sizes(*sizes):
def decorate(f):
f.file_sizes = sizes
return f
return decorate
# Here begin the tests
@with_open_mode("r")
@with_sizes("medium")
def read_bytewise(f):
""" read one unit at a time """
f.seek(0)
while f.read(1):
pass
@with_open_mode("r")
@with_sizes("medium")
def read_small_chunks(f):
""" read 20 units at a time """
f.seek(0)
while f.read(20):
pass
@with_open_mode("r")
@with_sizes("medium")
def read_big_chunks(f):
""" read 4096 units at a time """
f.seek(0)
while f.read(4096):
pass
@with_open_mode("r")
@with_sizes("small", "medium", "large")
def read_whole_file(f):
""" read whole contents at once """
f.seek(0)
while f.read():
pass
@with_open_mode("rt")
@with_sizes("medium")
def read_lines(f):
""" read one line at a time """
f.seek(0)
for line in f:
pass
@with_open_mode("r")
@with_sizes("medium")
def seek_forward_bytewise(f):
""" seek forward one unit at a time """
f.seek(0, 2)
size = f.tell()
f.seek(0, 0)
for i in xrange(0, size - 1):
f.seek(i, 0)
@with_open_mode("r")
@with_sizes("medium")
def seek_forward_blockwise(f):
""" seek forward 1000 units at a time """
f.seek(0, 2)
size = f.tell()
f.seek(0, 0)
for i in xrange(0, size - 1, 1000):
f.seek(i, 0)
@with_open_mode("rb")
@with_sizes("medium")
def read_seek_bytewise(f):
""" alternate read & seek one unit """
f.seek(0)
while f.read(1):
f.seek(1, 1)
@with_open_mode("rb")
@with_sizes("medium")
def read_seek_blockwise(f):
""" alternate read & seek 1000 units """
f.seek(0)
while f.read(1000):
f.seek(1000, 1)
@with_open_mode("w")
@with_sizes("small")
def write_bytewise(f, source):
""" write one unit at a time """
for i in xrange(0, len(source)):
f.write(source[i:i+1])
@with_open_mode("w")
@with_sizes("medium")
def write_small_chunks(f, source):
""" write 20 units at a time """
for i in xrange(0, len(source), 20):
f.write(source[i:i+20])
@with_open_mode("w")
@with_sizes("medium")
def write_medium_chunks(f, source):
""" write 4096 units at a time """
for i in xrange(0, len(source), 4096):
f.write(source[i:i+4096])
@with_open_mode("w")
@with_sizes("large")
def write_large_chunks(f, source):
""" write 1e6 units at a time """
for i in xrange(0, len(source), 1000000):
f.write(source[i:i+1000000])
@with_open_mode("w+")
@with_sizes("small")
def modify_bytewise(f, source):
""" modify one unit at a time """
f.seek(0)
for i in xrange(0, len(source)):
f.write(source[i:i+1])
@with_open_mode("w+")
@with_sizes("medium")
def modify_small_chunks(f, source):
""" modify 20 units at a time """
f.seek(0)
for i in xrange(0, len(source), 20):
f.write(source[i:i+20])
@with_open_mode("w+")
@with_sizes("medium")
def modify_medium_chunks(f, source):
""" modify 4096 units at a time """
f.seek(0)
for i in xrange(0, len(source), 4096):
f.write(source[i:i+4096])
@with_open_mode("wb+")
@with_sizes("medium")
def modify_seek_forward_bytewise(f, source):
""" alternate write & seek one unit """
f.seek(0)
for i in xrange(0, len(source), 2):
f.write(source[i:i+1])
f.seek(i+2)
@with_open_mode("wb+")
@with_sizes("medium")
def modify_seek_forward_blockwise(f, source):
""" alternate write & seek 1000 units """
f.seek(0)
for i in xrange(0, len(source), 2000):
f.write(source[i:i+1000])
f.seek(i+2000)
# XXX the 2 following tests don't work with py3k's text IO
@with_open_mode("wb+")
@with_sizes("medium")
def read_modify_bytewise(f, source):
""" alternate read & write one unit """
f.seek(0)
for i in xrange(0, len(source), 2):
f.read(1)
f.write(source[i+1:i+2])
@with_open_mode("wb+")
@with_sizes("medium")
def read_modify_blockwise(f, source):
""" alternate read & write 1000 units """
f.seek(0)
for i in xrange(0, len(source), 2000):
f.read(1000)
f.write(source[i+1000:i+2000])
read_tests = [
read_bytewise, read_small_chunks, read_lines, read_big_chunks,
None, read_whole_file, None,
seek_forward_bytewise, seek_forward_blockwise,
read_seek_bytewise, read_seek_blockwise,
]
write_tests = [
write_bytewise, write_small_chunks, write_medium_chunks, write_large_chunks,
]
modify_tests = [
modify_bytewise, modify_small_chunks, modify_medium_chunks,
None,
modify_seek_forward_bytewise, modify_seek_forward_blockwise,
read_modify_bytewise, read_modify_blockwise,
]
def run_during(duration, func):
_t = time.time
n = 0
start = os.times()
start_timestamp = _t()
real_start = start[4] or start_timestamp
while True:
func()
n += 1
if _t() - start_timestamp > duration:
break
end = os.times()
real = (end[4] if start[4] else time.time()) - real_start
return n, real, sum(end[0:2]) - sum(start[0:2])
def warm_cache(filename):
with open(filename, "rb") as f:
f.read()
def run_all_tests(options):
def print_label(filename, func):
name = re.split(r'[-.]', filename)[0]
out.write(
("[%s] %s... "
% (name.center(7), func.__doc__.strip())
).ljust(52))
out.flush()
def print_results(size, n, real, cpu):
bw = n * float(size) / 1024 ** 2 / real
bw = ("%4d MB/s" if bw > 100 else "%.3g MB/s") % bw
out.write(bw.rjust(12) + "\n")
if cpu < 0.90 * real:
out.write(" warning: test above used only %d%% CPU, "
"result may be flawed!\n" % (100.0 * cpu / real))
def run_one_test(name, size, open_func, test_func, *args):
mode = test_func.file_open_mode
print_label(name, test_func)
if "w" not in mode or "+" in mode:
warm_cache(name)
with open_func(name) as f:
n, real, cpu = run_during(1.5, lambda: test_func(f, *args))
print_results(size, n, real, cpu)
def run_test_family(tests, mode_filter, files, open_func, *make_args):
for test_func in tests:
if test_func is None:
out.write("\n")
continue
if mode_filter in test_func.file_open_mode:
continue
for s in test_func.file_sizes:
name, size = files[size_names[s]]
#name += file_ext
args = tuple(f(name, size) for f in make_args)
run_one_test(name, size,
open_func, test_func, *args)
size_names = {
"small": 0,
"medium": 1,
"large": 2,
}
print("Python %s" % sys.version)
if sys.version_info < (3, 3):
if sys.maxunicode > 0xffff:
text = "UCS-4 (wide build)"
else:
text = "UTF-16 (narrow build)"
else:
text = "PEP 393"
print("Unicode: %s" % text)
print(platform.platform())
binary_files = list(get_binary_files())
text_files = list(get_text_files())
if "b" in options:
print("Binary unit = one byte")
if "t" in options:
print("Text unit = one character (%s-decoded)" % TEXT_ENCODING)
# Binary reads
if "b" in options and "r" in options:
print("\n** Binary input **\n")
run_test_family(read_tests, "t", binary_files, lambda fn: open(fn, "rb"))
# Text reads
if "t" in options and "r" in options:
print("\n** Text input **\n")
run_test_family(read_tests, "b", text_files, lambda fn: text_open(fn, "r"))
# Binary writes
if "b" in options and "w" in options:
print("\n** Binary append **\n")
def make_test_source(name, size):
with open(name, "rb") as f:
return f.read()
run_test_family(write_tests, "t", binary_files,
lambda fn: open(os.devnull, "wb"), make_test_source)
# Text writes
if "t" in options and "w" in options:
print("\n** Text append **\n")
def make_test_source(name, size):
with text_open(name, "r") as f:
return f.read()
run_test_family(write_tests, "b", text_files,
lambda fn: text_open(os.devnull, "w"), make_test_source)
# Binary overwrites
if "b" in options and "w" in options:
print("\n** Binary overwrite **\n")
def make_test_source(name, size):
with open(name, "rb") as f:
return f.read()
run_test_family(modify_tests, "t", binary_files,
lambda fn: open(fn, "r+b"), make_test_source)
# Text overwrites
if "t" in options and "w" in options:
print("\n** Text overwrite **\n")
def make_test_source(name, size):
with text_open(name, "r") as f:
return f.read()
run_test_family(modify_tests, "b", text_files,
lambda fn: text_open(fn, "r+"), make_test_source)
def prepare_files():
print("Preparing files...")
# Binary files
for name, size in get_binary_files():
if os.path.isfile(name) and os.path.getsize(name) == size:
continue
with open(name, "wb") as f:
f.write(os.urandom(size))
# Text files
chunk = []
with text_open(__file__, "r", encoding='utf8') as f:
for line in f:
if line.startswith("# <iobench text chunk marker>"):
break
else:
raise RuntimeError(
"Couldn't find chunk marker in %s !" % __file__)
if NEWLINES == "all":
it = itertools.cycle(["\n", "\r", "\r\n"])
else:
it = itertools.repeat(
{"cr": "\r", "lf": "\n", "crlf": "\r\n"}[NEWLINES])
chunk = "".join(line.replace("\n", next(it)) for line in f)
if isinstance(chunk, bytes):
chunk = chunk.decode('utf8')
chunk = chunk.encode(TEXT_ENCODING)
for name, size in get_text_files():
if os.path.isfile(name) and os.path.getsize(name) == size:
continue
head = chunk * (size // len(chunk))
tail = chunk[:size % len(chunk)]
# Adjust tail to end on a character boundary
while True:
try:
tail.decode(TEXT_ENCODING)
break
except UnicodeDecodeError:
tail = tail[:-1]
with open(name, "wb") as f:
f.write(head)
f.write(tail)
def main():
global TEXT_ENCODING, NEWLINES
usage = "usage: %prog [-h|--help] [options]"
parser = OptionParser(usage=usage)
parser.add_option("-b", "--binary",
action="store_true", dest="binary", default=False,
help="run binary I/O tests")
parser.add_option("-t", "--text",
action="store_true", dest="text", default=False,
help="run text I/O tests")
parser.add_option("-r", "--read",
action="store_true", dest="read", default=False,
help="run read tests")
parser.add_option("-w", "--write",
action="store_true", dest="write", default=False,
help="run write & modify tests")
parser.add_option("-E", "--encoding",
action="store", dest="encoding", default=None,
help="encoding for text tests (default: %s)" % TEXT_ENCODING)
parser.add_option("-N", "--newlines",
action="store", dest="newlines", default='lf',
help="line endings for text tests "
"(one of: {lf (default), cr, crlf, all})")
parser.add_option("-m", "--io-module",
action="store", dest="io_module", default=None,
help="io module to test (default: builtin open())")
options, args = parser.parse_args()
if args:
parser.error("unexpected arguments")
NEWLINES = options.newlines.lower()
if NEWLINES not in ('lf', 'cr', 'crlf', 'all'):
parser.error("invalid 'newlines' option: %r" % NEWLINES)
test_options = ""
if options.read:
test_options += "r"
if options.write:
test_options += "w"
elif not options.read:
test_options += "rw"
if options.text:
test_options += "t"
if options.binary:
test_options += "b"
elif not options.text:
test_options += "tb"
if options.encoding:
TEXT_ENCODING = options.encoding
if options.io_module:
globals()['open'] = __import__(options.io_module, {}, {}, ['open']).open
prepare_files()
run_all_tests(test_options)
if __name__ == "__main__":
main()
# -- This part to exercise text reading. Don't change anything! --
# <iobench text chunk marker>
"""
1.
Gáttir allar,
áðr gangi fram,
um skoðask skyli,
um skyggnast skyli,
þvà at óvÃst er at vita,
hvar óvinir
sitja á fleti fyrir.
2.
Gefendr heilir!
Gestr er inn kominn,
hvar skal sitja sjá?
Mjök er bráðr,
sá er á bröndum skal
sÃns of freista frama.
3.
Elds er þörf,
þeims inn er kominn
ok á kné kalinn;
matar ok váða
er manni þörf,
þeim er hefr um fjall farit.
4.
Vatns er þörf,
þeim er til verðar kemr,
þerru ok þjóðlaðar,
góðs of æðis,
ef sér geta mætti,
orðs ok endrþögu.
5.
Vits er þörf,
þeim er vÃða ratar;
dælt er heima hvat;
at augabragði verðr,
sá er ekki kann
ok með snotrum sitr.
6.
At hyggjandi sinni
skyli-t maðr hræsinn vera,
heldr gætinn at geði;
þá er horskr ok þögull
kemr heimisgarða til,
sjaldan verðr vÃti vörum,
þvà at óbrigðra vin
fær maðr aldregi
en mannvit mikit.
7.
Inn vari gestr,
er til verðar kemr,
þunnu hljóði þegir,
eyrum hlýðir,
en augum skoðar;
svá nýsisk fróðra hverr fyrir.
8.
Hinn er sæll,
er sér of getr
lof ok lÃknstafi;
ódælla er við þat,
er maðr eiga skal
annars brjóstum Ã.
"""
"""
C'est revenir tard, je le sens, sur un sujet trop rebattu et déjà presque oublié. Mon état, qui ne me permet plus aucun travail suivi, mon aversion pour le genre polémique, ont causé ma lenteur à écrire et ma répugnance à publier. J'aurais même tout à fait supprimé ces Lettres, ou plutôt je lie les aurais point écrites, s'il n'eût été question que de moi : Mais ma patrie ne m'est pas tellement devenue étrangère que je puisse voir tranquillement opprimer ses citoyens, surtout lorsqu'ils n'ont compromis leurs droits qu'en défendant ma cause. Je serais le dernier des hommes si dans une telle occasion j'écoutais un sentiment qui n'est plus ni douceur ni patience, mais faiblesse et lâcheté, dans celui qu'il empêche de remplir son devoir.
Rien de moins important pour le public, j'en conviens, que la matière de ces lettres. La constitution d'une petite République, le sort d'un petit particulier, l'exposé de quelques injustices, la réfutation de quelques sophismes ; tout cela n'a rien en soi d'assez considérable pour mériter beaucoup de lecteurs : mais si mes sujets sont petits mes objets sont grands, et dignes de l'attention de tout honnête homme. Laissons Genève à sa place, et Rousseau dans sa dépression ; mais la religion, mais la liberté, la justice ! voilà , qui que vous soyez, ce qui n'est pas au-dessous de vous.
Qu'on ne cherche pas même ici dans le style le dédommagement de l'aridité de la matière. Ceux que quelques traits heureux de ma plume ont si fort irrités trouveront de quoi s'apaiser dans ces lettres, L'honneur de défendre un opprimé eût enflammé mon coeur si j'avais parlé pour un autre. Réduit au triste emploi de me défendre moi-même, j'ai dû me borner à raisonner ; m'échauffer eût été m'avilir. J'aurai donc trouvé grâce en ce point devant ceux qui s'imaginent qu'il est essentiel à la vérité d'être dite froidement ; opinion que pourtant j'ai peine à comprendre. Lorsqu'une vive persuasion nous anime, le moyen d'employer un langage glacé ? Quand Archimède tout transporté courait nu dans les rues de Syracuse, en avait-il moins trouvé la vérité parce qu'il se passionnait pour elle ? Tout au contraire, celui qui la sent ne peut s'abstenir de l'adorer ; celui qui demeure froid ne l'a pas vue.
Quoi qu'il en soit, je prie les lecteurs de vouloir bien mettre à part mon beau style, et d'examiner seulement si je raisonne bien ou mal ; car enfin, de cela seul qu'un auteur s'exprime en bons termes, je ne vois pas comment il peut s'ensuivre que cet auteur ne sait ce qu'il dit.
"""
| 17,772 | 557 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/unittestgui/README.txt | unittestgui.py is GUI framework and application for use with Python unit
testing framework. It executes tests written using the framework provided
by the 'unittest' module.
Based on the original by Steve Purcell, from:
http://pyunit.sourceforge.net/
Updated for unittest test discovery by Mark Roddy and Python 3
support by Brian Curtin.
For details on how to make your tests work with test discovery,
and for explanations of the configuration options, see the unittest
documentation:
http://docs.python.org/library/unittest.html#test-discovery
| 556 | 17 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/unittestgui/unittestgui.py | #!/usr/bin/env python3
"""
GUI framework and application for use with Python unit testing framework.
Execute tests written using the framework provided by the 'unittest' module.
Updated for unittest test discovery by Mark Roddy and Python 3
support by Brian Curtin.
Based on the original by Steve Purcell, from:
http://pyunit.sourceforge.net/
Copyright (c) 1999, 2000, 2001 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__author__ = "Steve Purcell ([email protected])"
import sys
import traceback
import unittest
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import simpledialog
##############################################################################
# GUI framework classes
##############################################################################
class BaseGUITestRunner(object):
"""Subclass this class to create a GUI TestRunner that uses a specific
windowing toolkit. The class takes care of running tests in the correct
manner, and making callbacks to the derived class to obtain information
or signal that events have occurred.
"""
def __init__(self, *args, **kwargs):
self.currentResult = None
self.running = 0
self.__rollbackImporter = None
self.__rollbackImporter = RollbackImporter()
self.test_suite = None
#test discovery variables
self.directory_to_read = ''
self.top_level_dir = ''
self.test_file_glob_pattern = 'test*.py'
self.initGUI(*args, **kwargs)
def errorDialog(self, title, message):
"Override to display an error arising from GUI usage"
pass
def getDirectoryToDiscover(self):
"Override to prompt user for directory to perform test discovery"
pass
def runClicked(self):
"To be called in response to user choosing to run a test"
if self.running: return
if not self.test_suite:
self.errorDialog("Test Discovery", "You discover some tests first!")
return
self.currentResult = GUITestResult(self)
self.totalTests = self.test_suite.countTestCases()
self.running = 1
self.notifyRunning()
self.test_suite.run(self.currentResult)
self.running = 0
self.notifyStopped()
def stopClicked(self):
"To be called in response to user stopping the running of a test"
if self.currentResult:
self.currentResult.stop()
def discoverClicked(self):
self.__rollbackImporter.rollbackImports()
directory = self.getDirectoryToDiscover()
if not directory:
return
self.directory_to_read = directory
try:
# Explicitly use 'None' value if no top level directory is
# specified (indicated by empty string) as discover() explicitly
# checks for a 'None' to determine if no tld has been specified
top_level_dir = self.top_level_dir or None
tests = unittest.defaultTestLoader.discover(directory, self.test_file_glob_pattern, top_level_dir)
self.test_suite = tests
except:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(*sys.exc_info())
self.errorDialog("Unable to run test '%s'" % directory,
"Error loading specified test: %s, %s" % (exc_type, exc_value))
return
self.notifyTestsDiscovered(self.test_suite)
# Required callbacks
def notifyTestsDiscovered(self, test_suite):
"Override to display information about the suite of discovered tests"
pass
def notifyRunning(self):
"Override to set GUI in 'running' mode, enabling 'stop' button etc."
pass
def notifyStopped(self):
"Override to set GUI in 'stopped' mode, enabling 'run' button etc."
pass
def notifyTestFailed(self, test, err):
"Override to indicate that a test has just failed"
pass
def notifyTestErrored(self, test, err):
"Override to indicate that a test has just errored"
pass
def notifyTestSkipped(self, test, reason):
"Override to indicate that test was skipped"
pass
def notifyTestFailedExpectedly(self, test, err):
"Override to indicate that test has just failed expectedly"
pass
def notifyTestStarted(self, test):
"Override to indicate that a test is about to run"
pass
def notifyTestFinished(self, test):
"""Override to indicate that a test has finished (it may already have
failed or errored)"""
pass
class GUITestResult(unittest.TestResult):
"""A TestResult that makes callbacks to its associated GUI TestRunner.
Used by BaseGUITestRunner. Need not be created directly.
"""
def __init__(self, callback):
unittest.TestResult.__init__(self)
self.callback = callback
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.callback.notifyTestErrored(test, err)
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.callback.notifyTestFailed(test, err)
def addSkip(self, test, reason):
super(GUITestResult,self).addSkip(test, reason)
self.callback.notifyTestSkipped(test, reason)
def addExpectedFailure(self, test, err):
super(GUITestResult,self).addExpectedFailure(test, err)
self.callback.notifyTestFailedExpectedly(test, err)
def stopTest(self, test):
unittest.TestResult.stopTest(self, test)
self.callback.notifyTestFinished(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.callback.notifyTestStarted(test)
class RollbackImporter:
"""This tricky little class is used to make sure that modules under test
will be reloaded the next time they are imported.
"""
def __init__(self):
self.previousModules = sys.modules.copy()
def rollbackImports(self):
for modname in sys.modules.copy().keys():
if not modname in self.previousModules:
# Force reload when modname next imported
del(sys.modules[modname])
##############################################################################
# Tkinter GUI
##############################################################################
class DiscoverSettingsDialog(simpledialog.Dialog):
"""
Dialog box for prompting test discovery settings
"""
def __init__(self, master, top_level_dir, test_file_glob_pattern, *args, **kwargs):
self.top_level_dir = top_level_dir
self.dirVar = tk.StringVar()
self.dirVar.set(top_level_dir)
self.test_file_glob_pattern = test_file_glob_pattern
self.testPatternVar = tk.StringVar()
self.testPatternVar.set(test_file_glob_pattern)
simpledialog.Dialog.__init__(self, master, title="Discover Settings",
*args, **kwargs)
def body(self, master):
tk.Label(master, text="Top Level Directory").grid(row=0)
self.e1 = tk.Entry(master, textvariable=self.dirVar)
self.e1.grid(row = 0, column=1)
tk.Button(master, text="...",
command=lambda: self.selectDirClicked(master)).grid(row=0,column=3)
tk.Label(master, text="Test File Pattern").grid(row=1)
self.e2 = tk.Entry(master, textvariable = self.testPatternVar)
self.e2.grid(row = 1, column=1)
return None
def selectDirClicked(self, master):
dir_path = filedialog.askdirectory(parent=master)
if dir_path:
self.dirVar.set(dir_path)
def apply(self):
self.top_level_dir = self.dirVar.get()
self.test_file_glob_pattern = self.testPatternVar.get()
class TkTestRunner(BaseGUITestRunner):
"""An implementation of BaseGUITestRunner using Tkinter.
"""
def initGUI(self, root, initialTestName):
"""Set up the GUI inside the given root window. The test name entry
field will be pre-filled with the given initialTestName.
"""
self.root = root
self.statusVar = tk.StringVar()
self.statusVar.set("Idle")
#tk vars for tracking counts of test result types
self.runCountVar = tk.IntVar()
self.failCountVar = tk.IntVar()
self.errorCountVar = tk.IntVar()
self.skipCountVar = tk.IntVar()
self.expectFailCountVar = tk.IntVar()
self.remainingCountVar = tk.IntVar()
self.top = tk.Frame()
self.top.pack(fill=tk.BOTH, expand=1)
self.createWidgets()
def getDirectoryToDiscover(self):
return filedialog.askdirectory()
def settingsClicked(self):
d = DiscoverSettingsDialog(self.top, self.top_level_dir, self.test_file_glob_pattern)
self.top_level_dir = d.top_level_dir
self.test_file_glob_pattern = d.test_file_glob_pattern
def notifyTestsDiscovered(self, test_suite):
discovered = test_suite.countTestCases()
self.runCountVar.set(0)
self.failCountVar.set(0)
self.errorCountVar.set(0)
self.remainingCountVar.set(discovered)
self.progressBar.setProgressFraction(0.0)
self.errorListbox.delete(0, tk.END)
self.statusVar.set("Discovering tests from %s. Found: %s" %
(self.directory_to_read, discovered))
self.stopGoButton['state'] = tk.NORMAL
def createWidgets(self):
"""Creates and packs the various widgets.
Why is it that GUI code always ends up looking a mess, despite all the
best intentions to keep it tidy? Answers on a postcard, please.
"""
# Status bar
statusFrame = tk.Frame(self.top, relief=tk.SUNKEN, borderwidth=2)
statusFrame.pack(anchor=tk.SW, fill=tk.X, side=tk.BOTTOM)
tk.Label(statusFrame, width=1, textvariable=self.statusVar).pack(side=tk.TOP, fill=tk.X)
# Area to enter name of test to run
leftFrame = tk.Frame(self.top, borderwidth=3)
leftFrame.pack(fill=tk.BOTH, side=tk.LEFT, anchor=tk.NW, expand=1)
suiteNameFrame = tk.Frame(leftFrame, borderwidth=3)
suiteNameFrame.pack(fill=tk.X)
# Progress bar
progressFrame = tk.Frame(leftFrame, relief=tk.GROOVE, borderwidth=2)
progressFrame.pack(fill=tk.X, expand=0, anchor=tk.NW)
tk.Label(progressFrame, text="Progress:").pack(anchor=tk.W)
self.progressBar = ProgressBar(progressFrame, relief=tk.SUNKEN,
borderwidth=2)
self.progressBar.pack(fill=tk.X, expand=1)
# Area with buttons to start/stop tests and quit
buttonFrame = tk.Frame(self.top, borderwidth=3)
buttonFrame.pack(side=tk.LEFT, anchor=tk.NW, fill=tk.Y)
tk.Button(buttonFrame, text="Discover Tests",
command=self.discoverClicked).pack(fill=tk.X)
self.stopGoButton = tk.Button(buttonFrame, text="Start",
command=self.runClicked, state=tk.DISABLED)
self.stopGoButton.pack(fill=tk.X)
tk.Button(buttonFrame, text="Close",
command=self.top.quit).pack(side=tk.BOTTOM, fill=tk.X)
tk.Button(buttonFrame, text="Settings",
command=self.settingsClicked).pack(side=tk.BOTTOM, fill=tk.X)
# Area with labels reporting results
for label, var in (('Run:', self.runCountVar),
('Failures:', self.failCountVar),
('Errors:', self.errorCountVar),
('Skipped:', self.skipCountVar),
('Expected Failures:', self.expectFailCountVar),
('Remaining:', self.remainingCountVar),
):
tk.Label(progressFrame, text=label).pack(side=tk.LEFT)
tk.Label(progressFrame, textvariable=var,
foreground="blue").pack(side=tk.LEFT, fill=tk.X,
expand=1, anchor=tk.W)
# List box showing errors and failures
tk.Label(leftFrame, text="Failures and errors:").pack(anchor=tk.W)
listFrame = tk.Frame(leftFrame, relief=tk.SUNKEN, borderwidth=2)
listFrame.pack(fill=tk.BOTH, anchor=tk.NW, expand=1)
self.errorListbox = tk.Listbox(listFrame, foreground='red',
selectmode=tk.SINGLE,
selectborderwidth=0)
self.errorListbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1,
anchor=tk.NW)
listScroll = tk.Scrollbar(listFrame, command=self.errorListbox.yview)
listScroll.pack(side=tk.LEFT, fill=tk.Y, anchor=tk.N)
self.errorListbox.bind("<Double-1>",
lambda e, self=self: self.showSelectedError())
self.errorListbox.configure(yscrollcommand=listScroll.set)
def errorDialog(self, title, message):
messagebox.showerror(parent=self.root, title=title,
message=message)
def notifyRunning(self):
self.runCountVar.set(0)
self.failCountVar.set(0)
self.errorCountVar.set(0)
self.remainingCountVar.set(self.totalTests)
self.errorInfo = []
while self.errorListbox.size():
self.errorListbox.delete(0)
#Stopping seems not to work, so simply disable the start button
#self.stopGoButton.config(command=self.stopClicked, text="Stop")
self.stopGoButton.config(state=tk.DISABLED)
self.progressBar.setProgressFraction(0.0)
self.top.update_idletasks()
def notifyStopped(self):
self.stopGoButton.config(state=tk.DISABLED)
#self.stopGoButton.config(command=self.runClicked, text="Start")
self.statusVar.set("Idle")
def notifyTestStarted(self, test):
self.statusVar.set(str(test))
self.top.update_idletasks()
def notifyTestFailed(self, test, err):
self.failCountVar.set(1 + self.failCountVar.get())
self.errorListbox.insert(tk.END, "Failure: %s" % test)
self.errorInfo.append((test,err))
def notifyTestErrored(self, test, err):
self.errorCountVar.set(1 + self.errorCountVar.get())
self.errorListbox.insert(tk.END, "Error: %s" % test)
self.errorInfo.append((test,err))
def notifyTestSkipped(self, test, reason):
super(TkTestRunner, self).notifyTestSkipped(test, reason)
self.skipCountVar.set(1 + self.skipCountVar.get())
def notifyTestFailedExpectedly(self, test, err):
super(TkTestRunner, self).notifyTestFailedExpectedly(test, err)
self.expectFailCountVar.set(1 + self.expectFailCountVar.get())
def notifyTestFinished(self, test):
self.remainingCountVar.set(self.remainingCountVar.get() - 1)
self.runCountVar.set(1 + self.runCountVar.get())
fractionDone = float(self.runCountVar.get())/float(self.totalTests)
fillColor = len(self.errorInfo) and "red" or "green"
self.progressBar.setProgressFraction(fractionDone, fillColor)
def showSelectedError(self):
selection = self.errorListbox.curselection()
if not selection: return
selected = int(selection[0])
txt = self.errorListbox.get(selected)
window = tk.Toplevel(self.root)
window.title(txt)
window.protocol('WM_DELETE_WINDOW', window.quit)
test, error = self.errorInfo[selected]
tk.Label(window, text=str(test),
foreground="red", justify=tk.LEFT).pack(anchor=tk.W)
tracebackLines = traceback.format_exception(*error)
tracebackText = "".join(tracebackLines)
tk.Label(window, text=tracebackText, justify=tk.LEFT).pack()
tk.Button(window, text="Close",
command=window.quit).pack(side=tk.BOTTOM)
window.bind('<Key-Return>', lambda e, w=window: w.quit())
window.mainloop()
window.destroy()
class ProgressBar(tk.Frame):
"""A simple progress bar that shows a percentage progress in
the given colour."""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.canvas = tk.Canvas(self, height='20', width='60',
background='white', borderwidth=3)
self.canvas.pack(fill=tk.X, expand=1)
self.rect = self.text = None
self.canvas.bind('<Configure>', self.paint)
self.setProgressFraction(0.0)
def setProgressFraction(self, fraction, color='blue'):
self.fraction = fraction
self.color = color
self.paint()
self.canvas.update_idletasks()
def paint(self, *args):
totalWidth = self.canvas.winfo_width()
width = int(self.fraction * float(totalWidth))
height = self.canvas.winfo_height()
if self.rect is not None: self.canvas.delete(self.rect)
if self.text is not None: self.canvas.delete(self.text)
self.rect = self.canvas.create_rectangle(0, 0, width, height,
fill=self.color)
percentString = "%3.0f%%" % (100.0 * self.fraction)
self.text = self.canvas.create_text(totalWidth/2, height/2,
anchor=tk.CENTER,
text=percentString)
def main(initialTestName=""):
root = tk.Tk()
root.title("PyUnit")
runner = TkTestRunner(root, initialTestName)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
else:
main()
| 18,560 | 479 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/parser/unparse.py | "Usage: unparse.py <path to source file>"
import sys
import ast
import tokenize
import io
import os
# Large float and imaginary literals get turned into infinities in the AST.
# We unparse those infinities to INFSTR.
INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between.
"""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class Unparser:
"""Methods in this class recursively traverse an AST and
output source code for the abstract syntax; original formatting
is disregarded. """
def __init__(self, tree, file = sys.stdout):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self._indent = 0
self.dispatch(tree)
print("", file=self.f)
self.f.flush()
def fill(self, text = ""):
"Indent a piece of text, according to the current indentation level"
self.f.write("\n"+" "*self._indent + text)
def write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def enter(self):
"Print ':', and increase the indentation."
self.write(":")
self._indent += 1
def leave(self):
"Decrease the indentation level."
self._indent -= 1
def dispatch(self, tree):
"Dispatcher function, dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_"+tree.__class__.__name__)
meth(tree)
############### Unparsing methods ######################
# There should be one method per concrete grammar type #
# Constructors should be grouped by sum type. Ideally, #
# this would follow the order in the grammar, but #
# currently doesn't. #
########################################################
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
# stmt
def _Expr(self, tree):
self.fill()
self.dispatch(tree.value)
def _Import(self, t):
self.fill("import ")
interleave(lambda: self.write(", "), self.dispatch, t.names)
def _ImportFrom(self, t):
self.fill("from ")
self.write("." * t.level)
if t.module:
self.write(t.module)
self.write(" import ")
interleave(lambda: self.write(", "), self.dispatch, t.names)
def _Assign(self, t):
self.fill()
for target in t.targets:
self.dispatch(target)
self.write(" = ")
self.dispatch(t.value)
def _AugAssign(self, t):
self.fill()
self.dispatch(t.target)
self.write(" "+self.binop[t.op.__class__.__name__]+"= ")
self.dispatch(t.value)
def _AnnAssign(self, t):
self.fill()
if not t.simple and isinstance(t.target, ast.Name):
self.write('(')
self.dispatch(t.target)
if not t.simple and isinstance(t.target, ast.Name):
self.write(')')
self.write(": ")
self.dispatch(t.annotation)
if t.value:
self.write(" = ")
self.dispatch(t.value)
def _Return(self, t):
self.fill("return")
if t.value:
self.write(" ")
self.dispatch(t.value)
def _Pass(self, t):
self.fill("pass")
def _Break(self, t):
self.fill("break")
def _Continue(self, t):
self.fill("continue")
def _Delete(self, t):
self.fill("del ")
interleave(lambda: self.write(", "), self.dispatch, t.targets)
def _Assert(self, t):
self.fill("assert ")
self.dispatch(t.test)
if t.msg:
self.write(", ")
self.dispatch(t.msg)
def _Global(self, t):
self.fill("global ")
interleave(lambda: self.write(", "), self.write, t.names)
def _Nonlocal(self, t):
self.fill("nonlocal ")
interleave(lambda: self.write(", "), self.write, t.names)
def _Await(self, t):
self.write("(")
self.write("await")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _Yield(self, t):
self.write("(")
self.write("yield")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _YieldFrom(self, t):
self.write("(")
self.write("yield from")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _Raise(self, t):
self.fill("raise")
if not t.exc:
assert not t.cause
return
self.write(" ")
self.dispatch(t.exc)
if t.cause:
self.write(" from ")
self.dispatch(t.cause)
def _Try(self, t):
self.fill("try")
self.enter()
self.dispatch(t.body)
self.leave()
for ex in t.handlers:
self.dispatch(ex)
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
if t.finalbody:
self.fill("finally")
self.enter()
self.dispatch(t.finalbody)
self.leave()
def _ExceptHandler(self, t):
self.fill("except")
if t.type:
self.write(" ")
self.dispatch(t.type)
if t.name:
self.write(" as ")
self.write(t.name)
self.enter()
self.dispatch(t.body)
self.leave()
def _ClassDef(self, t):
self.write("\n")
for deco in t.decorator_list:
self.fill("@")
self.dispatch(deco)
self.fill("class "+t.name)
self.write("(")
comma = False
for e in t.bases:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
for e in t.keywords:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
def _FunctionDef(self, t):
self.__FunctionDef_helper(t, "def")
def _AsyncFunctionDef(self, t):
self.__FunctionDef_helper(t, "async def")
def __FunctionDef_helper(self, t, fill_suffix):
self.write("\n")
for deco in t.decorator_list:
self.fill("@")
self.dispatch(deco)
def_str = fill_suffix+" "+t.name + "("
self.fill(def_str)
self.dispatch(t.args)
self.write(")")
if t.returns:
self.write(" -> ")
self.dispatch(t.returns)
self.enter()
self.dispatch(t.body)
self.leave()
def _For(self, t):
self.__For_helper("for ", t)
def _AsyncFor(self, t):
self.__For_helper("async for ", t)
def __For_helper(self, fill, t):
self.fill(fill)
self.dispatch(t.target)
self.write(" in ")
self.dispatch(t.iter)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _If(self, t):
self.fill("if ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while (t.orelse and len(t.orelse) == 1 and
isinstance(t.orelse[0], ast.If)):
t = t.orelse[0]
self.fill("elif ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
self.fill("while ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _With(self, t):
self.fill("with ")
interleave(lambda: self.write(", "), self.dispatch, t.items)
self.enter()
self.dispatch(t.body)
self.leave()
def _AsyncWith(self, t):
self.fill("async with ")
interleave(lambda: self.write(", "), self.dispatch, t.items)
self.enter()
self.dispatch(t.body)
self.leave()
# expr
def _Bytes(self, t):
self.write(repr(t.s))
def _Str(self, tree):
self.write(repr(tree.s))
def _JoinedStr(self, t):
self.write("f")
string = io.StringIO()
self._fstring_JoinedStr(t, string.write)
self.write(repr(string.getvalue()))
def _FormattedValue(self, t):
self.write("f")
string = io.StringIO()
self._fstring_FormattedValue(t, string.write)
self.write(repr(string.getvalue()))
def _fstring_JoinedStr(self, t, write):
for value in t.values:
meth = getattr(self, "_fstring_" + type(value).__name__)
meth(value, write)
def _fstring_Str(self, t, write):
value = t.s.replace("{", "{{").replace("}", "}}")
write(value)
def _fstring_Constant(self, t, write):
assert isinstance(t.value, str)
value = t.value.replace("{", "{{").replace("}", "}}")
write(value)
def _fstring_FormattedValue(self, t, write):
write("{")
expr = io.StringIO()
Unparser(t.value, expr)
expr = expr.getvalue().rstrip("\n")
if expr.startswith("{"):
write(" ") # Separate pair of opening brackets as "{ {"
write(expr)
if t.conversion != -1:
conversion = chr(t.conversion)
assert conversion in "sra"
write(f"!{conversion}")
if t.format_spec:
write(":")
meth = getattr(self, "_fstring_" + type(t.format_spec).__name__)
meth(t.format_spec, write)
write("}")
def _Name(self, t):
self.write(t.id)
def _write_constant(self, value):
if isinstance(value, (float, complex)):
self.write(repr(value).replace("inf", INFSTR))
else:
self.write(repr(value))
def _Constant(self, t):
value = t.value
if isinstance(value, tuple):
self.write("(")
if len(value) == 1:
self._write_constant(value[0])
self.write(",")
else:
interleave(lambda: self.write(", "), self._write_constant, value)
self.write(")")
else:
self._write_constant(t.value)
def _NameConstant(self, t):
self.write(repr(t.value))
def _Num(self, t):
# Substitute overflowing decimal literal for AST infinities.
self.write(repr(t.n).replace("inf", INFSTR))
def _List(self, t):
self.write("[")
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write("]")
def _ListComp(self, t):
self.write("[")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write("]")
def _GeneratorExp(self, t):
self.write("(")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write(")")
def _SetComp(self, t):
self.write("{")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write("}")
def _DictComp(self, t):
self.write("{")
self.dispatch(t.key)
self.write(": ")
self.dispatch(t.value)
for gen in t.generators:
self.dispatch(gen)
self.write("}")
def _comprehension(self, t):
if t.is_async:
self.write(" async for ")
else:
self.write(" for ")
self.dispatch(t.target)
self.write(" in ")
self.dispatch(t.iter)
for if_clause in t.ifs:
self.write(" if ")
self.dispatch(if_clause)
def _IfExp(self, t):
self.write("(")
self.dispatch(t.body)
self.write(" if ")
self.dispatch(t.test)
self.write(" else ")
self.dispatch(t.orelse)
self.write(")")
def _Set(self, t):
assert(t.elts) # should be at least one element
self.write("{")
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write("}")
def _Dict(self, t):
self.write("{")
def write_key_value_pair(k, v):
self.dispatch(k)
self.write(": ")
self.dispatch(v)
def write_item(item):
k, v = item
if k is None:
# for dictionary unpacking operator in dicts {**{'y': 2}}
# see PEP 448 for details
self.write("**")
self.dispatch(v)
else:
write_key_value_pair(k, v)
interleave(lambda: self.write(", "), write_item, zip(t.keys, t.values))
self.write("}")
def _Tuple(self, t):
self.write("(")
if len(t.elts) == 1:
elt = t.elts[0]
self.dispatch(elt)
self.write(",")
else:
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(")")
unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
def _UnaryOp(self, t):
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.write(" ")
self.dispatch(t.operand)
self.write(")")
binop = { "Add":"+", "Sub":"-", "Mult":"*", "MatMult":"@", "Div":"/", "Mod":"%",
"LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
"FloorDiv":"//", "Pow": "**"}
def _BinOp(self, t):
self.write("(")
self.dispatch(t.left)
self.write(" " + self.binop[t.op.__class__.__name__] + " ")
self.dispatch(t.right)
self.write(")")
cmpops = {"Eq":"==", "NotEq":"!=", "Lt":"<", "LtE":"<=", "Gt":">", "GtE":">=",
"Is":"is", "IsNot":"is not", "In":"in", "NotIn":"not in"}
def _Compare(self, t):
self.write("(")
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.dispatch(e)
self.write(")")
boolops = {ast.And: 'and', ast.Or: 'or'}
def _BoolOp(self, t):
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self,t):
self.dispatch(t.value)
# Special case: 3.__abs__() is a syntax error, so if t.value
# is an integer literal then we need to either parenthesize
# it or add an extra space to get 3 .__abs__().
if ((isinstance(t.value, ast.Num) and isinstance(t.value.n, int))
or (isinstance(t.value, ast.Constant) and isinstance(t.value.value, int))):
self.write(" ")
self.write(".")
self.write(t.attr)
def _Call(self, t):
self.dispatch(t.func)
self.write("(")
comma = False
for e in t.args:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
for e in t.keywords:
if comma: self.write(", ")
else: comma = True
self.dispatch(e)
self.write(")")
def _Subscript(self, t):
self.dispatch(t.value)
self.write("[")
self.dispatch(t.slice)
self.write("]")
def _Starred(self, t):
self.write("*")
self.dispatch(t.value)
# slice
def _Ellipsis(self, t):
self.write("...")
def _Index(self, t):
self.dispatch(t.value)
def _Slice(self, t):
if t.lower:
self.dispatch(t.lower)
self.write(":")
if t.upper:
self.dispatch(t.upper)
if t.step:
self.write(":")
self.dispatch(t.step)
def _ExtSlice(self, t):
interleave(lambda: self.write(', '), self.dispatch, t.dims)
# argument
def _arg(self, t):
self.write(t.arg)
if t.annotation:
self.write(": ")
self.dispatch(t.annotation)
# others
def _arguments(self, t):
first = True
# normal arguments
defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults
for a, d in zip(t.args, defaults):
if first:first = False
else: self.write(", ")
self.dispatch(a)
if d:
self.write("=")
self.dispatch(d)
# varargs, or bare '*' if no varargs but keyword-only arguments present
if t.vararg or t.kwonlyargs:
if first:first = False
else: self.write(", ")
self.write("*")
if t.vararg:
self.write(t.vararg.arg)
if t.vararg.annotation:
self.write(": ")
self.dispatch(t.vararg.annotation)
# keyword-only arguments
if t.kwonlyargs:
for a, d in zip(t.kwonlyargs, t.kw_defaults):
if first:first = False
else: self.write(", ")
self.dispatch(a),
if d:
self.write("=")
self.dispatch(d)
# kwargs
if t.kwarg:
if first:first = False
else: self.write(", ")
self.write("**"+t.kwarg.arg)
if t.kwarg.annotation:
self.write(": ")
self.dispatch(t.kwarg.annotation)
def _keyword(self, t):
if t.arg is None:
self.write("**")
else:
self.write(t.arg)
self.write("=")
self.dispatch(t.value)
def _Lambda(self, t):
self.write("(")
self.write("lambda ")
self.dispatch(t.args)
self.write(": ")
self.dispatch(t.body)
self.write(")")
def _alias(self, t):
self.write(t.name)
if t.asname:
self.write(" as "+t.asname)
def _withitem(self, t):
self.dispatch(t.context_expr)
if t.optional_vars:
self.write(" as ")
self.dispatch(t.optional_vars)
def roundtrip(filename, output=sys.stdout):
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST)
Unparser(tree, output)
def testdir(a):
try:
names = [n for n in os.listdir(a) if n.endswith('.py')]
except OSError:
print("Directory not readable: %s" % a, file=sys.stderr)
else:
for n in names:
fullname = os.path.join(a, n)
if os.path.isfile(fullname):
output = io.StringIO()
print('Testing %s' % fullname)
try:
roundtrip(fullname, output)
except Exception as e:
print(' Failed to compile, exception is %s' % repr(e))
elif os.path.isdir(fullname):
testdir(fullname)
def main(args):
if args[0] == '--testdir':
for a in args[1:]:
testdir(a)
else:
for a in args:
roundtrip(a)
if __name__=='__main__':
main(sys.argv[1:])
| 20,138 | 707 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/markov.py | #!/usr/bin/env python3
"""
Markov chain simulation of words or characters.
"""
class Markov:
def __init__(self, histsize, choice):
self.histsize = histsize
self.choice = choice
self.trans = {}
def add(self, state, next):
self.trans.setdefault(state, []).append(next)
def put(self, seq):
n = self.histsize
add = self.add
add(None, seq[:0])
for i in range(len(seq)):
add(seq[max(0, i-n):i], seq[i:i+1])
add(seq[len(seq)-n:], None)
def get(self):
choice = self.choice
trans = self.trans
n = self.histsize
seq = choice(trans[None])
while True:
subseq = seq[max(0, len(seq)-n):]
options = trans[subseq]
next = choice(options)
if not next:
break
seq += next
return seq
def test():
import sys, random, getopt
args = sys.argv[1:]
try:
opts, args = getopt.getopt(args, '0123456789cdwq')
except getopt.error:
print('Usage: %s [-#] [-cddqw] [file] ...' % sys.argv[0])
print('Options:')
print('-#: 1-digit history size (default 2)')
print('-c: characters (default)')
print('-w: words')
print('-d: more debugging output')
print('-q: no debugging output')
print('Input files (default stdin) are split in paragraphs')
print('separated blank lines and each paragraph is split')
print('in words by whitespace, then reconcatenated with')
print('exactly one space separating words.')
print('Output consists of paragraphs separated by blank')
print('lines, where lines are no longer than 72 characters.')
sys.exit(2)
histsize = 2
do_words = False
debug = 1
for o, a in opts:
if '-0' <= o <= '-9': histsize = int(o[1:])
if o == '-c': do_words = False
if o == '-d': debug += 1
if o == '-q': debug = 0
if o == '-w': do_words = True
if not args:
args = ['-']
m = Markov(histsize, random.choice)
try:
for filename in args:
if filename == '-':
f = sys.stdin
if f.isatty():
print('Sorry, need stdin from file')
continue
else:
f = open(filename, 'r')
if debug: print('processing', filename, '...')
text = f.read()
f.close()
paralist = text.split('\n\n')
for para in paralist:
if debug > 1: print('feeding ...')
words = para.split()
if words:
if do_words:
data = tuple(words)
else:
data = ' '.join(words)
m.put(data)
except KeyboardInterrupt:
print('Interrupted -- continue with data read so far')
if not m.trans:
print('No valid input files')
return
if debug: print('done.')
if debug > 1:
for key in m.trans.keys():
if key is None or len(key) < histsize:
print(repr(key), m.trans[key])
if histsize == 0: print(repr(''), m.trans[''])
print()
while True:
data = m.get()
if do_words:
words = data
else:
words = data.split()
n = 0
limit = 72
for w in words:
if n + len(w) > limit:
print()
n = 0
print(w, end=' ')
n += len(w) + 1
print()
print()
if __name__ == "__main__":
test()
| 3,685 | 126 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/beer.py | #!/usr/bin/env python3
"""
A Python version of the classic "bottles of beer on the wall" programming
example.
By Guido van Rossum, demystified after a version by Fredrik Lundh.
"""
import sys
n = 100
if sys.argv[1:]:
n = int(sys.argv[1])
def bottle(n):
if n == 0: return "no more bottles of beer"
if n == 1: return "one bottle of beer"
return str(n) + " bottles of beer"
for i in range(n, 0, -1):
print(bottle(i), "on the wall,")
print(bottle(i) + ".")
print("Take one down, pass it around,")
print(bottle(i-1), "on the wall.")
| 566 | 26 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/mcast.py | #!/usr/bin/env python3
"""
Send/receive UDP multicast packets.
Requires that your OS kernel supports IP multicast.
Usage:
mcast -s (sender, IPv4)
mcast -s -6 (sender, IPv6)
mcast (receivers, IPv4)
mcast -6 (receivers, IPv6)
"""
MYPORT = 8123
MYGROUP_4 = '225.0.0.250'
MYGROUP_6 = 'ff15:7079:7468:6f6e:6465:6d6f:6d63:6173'
MYTTL = 1 # Increase to reach other networks
import time
import struct
import socket
import sys
def main():
group = MYGROUP_6 if "-6" in sys.argv[1:] else MYGROUP_4
if "-s" in sys.argv[1:]:
sender(group)
else:
receiver(group)
def sender(group):
addrinfo = socket.getaddrinfo(group, None)[0]
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
# Set Time-to-live (optional)
ttl_bin = struct.pack('@i', MYTTL)
if addrinfo[0] == socket.AF_INET: # IPv4
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl_bin)
else:
s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, ttl_bin)
while True:
data = repr(time.time()).encode('utf-8') + b'\0'
s.sendto(data, (addrinfo[4][0], MYPORT))
time.sleep(1)
def receiver(group):
# Look up multicast group address in name server and find out IP version
addrinfo = socket.getaddrinfo(group, None)[0]
# Create a socket
s = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind it to the port
s.bind(('', MYPORT))
group_bin = socket.inet_pton(addrinfo[0], addrinfo[4][0])
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = group_bin + struct.pack('@I', 0)
s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
# Loop, printing any data we receive
while True:
data, sender = s.recvfrom(1500)
while data[-1:] == '\0': data = data[:-1] # Strip trailing \0's
print(str(sender) + ' ' + repr(data))
if __name__ == '__main__':
main()
| 2,223 | 83 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/rpythond.py | #!/usr/bin/env python3
"""
Remote python server.
Execute Python commands remotely and send output back.
WARNING: This version has a gaping security hole -- it accepts requests
from any host on the Internet!
"""
import sys
from socket import socket, AF_INET, SOCK_STREAM
import io
import traceback
PORT = 4127
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
while True:
conn, (remotehost, remoteport) = s.accept()
print('connection from', remotehost, remoteport)
request = b''
while 1:
data = conn.recv(BUFSIZE)
if not data:
break
request += data
reply = execute(request.decode())
conn.send(reply.encode())
conn.close()
def execute(request):
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = sys.stderr = fakefile = io.StringIO()
try:
try:
exec(request, {}, {})
except:
print()
traceback.print_exc(100)
finally:
sys.stderr = stderr
sys.stdout = stdout
return fakefile.getvalue()
try:
main()
except KeyboardInterrupt:
pass
| 1,286 | 59 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/README | This directory contains a collection of demonstration scripts for
various aspects of Python programming.
beer.py Well-known programming example: Bottles of beer.
eiffel.py Python advanced magic: A metaclass for Eiffel post/preconditions.
hanoi.py Well-known programming example: Towers of Hanoi.
life.py Curses programming: Simple game-of-life.
markov.py Algorithms: Markov chain simulation.
mcast.py Network programming: Send and receive UDP multicast packets.
queens.py Well-known programming example: N-Queens problem.
redemo.py Regular Expressions: GUI script to test regexes.
rpython.py Network programming: Small client for remote code execution.
rpythond.py Network programming: Small server for remote code execution.
sortvisu.py GUI programming: Visualization of different sort algorithms.
ss1.py GUI/Application programming: A simple spreadsheet application.
vector.py Python basics: A vector class with demonstrating special methods. | 1,014 | 16 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/queens.py | #!/usr/bin/env python3
"""
N queens problem.
The (well-known) problem is due to Niklaus Wirth.
This solution is inspired by Dijkstra (Structured Programming). It is
a classic recursive backtracking approach.
"""
N = 8 # Default; command line overrides
class Queens:
def __init__(self, n=N):
self.n = n
self.reset()
def reset(self):
n = self.n
self.y = [None] * n # Where is the queen in column x
self.row = [0] * n # Is row[y] safe?
self.up = [0] * (2*n-1) # Is upward diagonal[x-y] safe?
self.down = [0] * (2*n-1) # Is downward diagonal[x+y] safe?
self.nfound = 0 # Instrumentation
def solve(self, x=0): # Recursive solver
for y in range(self.n):
if self.safe(x, y):
self.place(x, y)
if x+1 == self.n:
self.display()
else:
self.solve(x+1)
self.remove(x, y)
def safe(self, x, y):
return not self.row[y] and not self.up[x-y] and not self.down[x+y]
def place(self, x, y):
self.y[x] = y
self.row[y] = 1
self.up[x-y] = 1
self.down[x+y] = 1
def remove(self, x, y):
self.y[x] = None
self.row[y] = 0
self.up[x-y] = 0
self.down[x+y] = 0
silent = 0 # If true, count solutions only
def display(self):
self.nfound = self.nfound + 1
if self.silent:
return
print('+-' + '--'*self.n + '+')
for y in range(self.n-1, -1, -1):
print('|', end=' ')
for x in range(self.n):
if self.y[x] == y:
print("Q", end=' ')
else:
print(".", end=' ')
print('|')
print('+-' + '--'*self.n + '+')
def main():
import sys
silent = 0
n = N
if sys.argv[1:2] == ['-n']:
silent = 1
del sys.argv[1]
if sys.argv[1:]:
n = int(sys.argv[1])
q = Queens(n)
q.silent = silent
q.solve()
print("Found", q.nfound, "solutions.")
if __name__ == "__main__":
main()
| 2,270 | 86 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/rpython.py | #!/usr/bin/env python3
"""
Remote python client.
Execute Python commands remotely and send output back.
"""
import sys
from socket import socket, AF_INET, SOCK_STREAM, SHUT_WR
PORT = 4127
BUFSIZE = 1024
def main():
if len(sys.argv) < 3:
print("usage: rpython host command")
sys.exit(2)
host = sys.argv[1]
port = PORT
i = host.find(':')
if i >= 0:
port = int(port[i+1:])
host = host[:i]
command = ' '.join(sys.argv[2:])
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, port))
s.send(command.encode())
s.shutdown(SHUT_WR)
reply = b''
while True:
data = s.recv(BUFSIZE)
if not data:
break
reply += data
print(reply.decode(), end=' ')
s.close()
main()
| 778 | 39 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/vector.py | #!/usr/bin/env python3
"""
A demonstration of classes and their special methods in Python.
"""
class Vec:
"""A simple vector class.
Instances of the Vec class can be constructed from numbers
>>> a = Vec(1, 2, 3)
>>> b = Vec(3, 2, 1)
added
>>> a + b
Vec(4, 4, 4)
subtracted
>>> a - b
Vec(-2, 0, 2)
and multiplied by a scalar on the left
>>> 3.0 * a
Vec(3.0, 6.0, 9.0)
or on the right
>>> a * 3.0
Vec(3.0, 6.0, 9.0)
"""
def __init__(self, *v):
self.v = list(v)
@classmethod
def fromlist(cls, v):
if not isinstance(v, list):
raise TypeError
inst = cls()
inst.v = v
return inst
def __repr__(self):
args = ', '.join(repr(x) for x in self.v)
return 'Vec({})'.format(args)
def __len__(self):
return len(self.v)
def __getitem__(self, i):
return self.v[i]
def __add__(self, other):
# Element-wise addition
v = [x + y for x, y in zip(self.v, other.v)]
return Vec.fromlist(v)
def __sub__(self, other):
# Element-wise subtraction
v = [x - y for x, y in zip(self.v, other.v)]
return Vec.fromlist(v)
def __mul__(self, scalar):
# Multiply by scalar
v = [x * scalar for x in self.v]
return Vec.fromlist(v)
__rmul__ = __mul__
def test():
import doctest
doctest.testmod()
test()
| 1,452 | 75 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/demo/eiffel.py | #!/usr/bin/env python3
"""
Support Eiffel-style preconditions and postconditions for functions.
An example for Python metaclasses.
"""
import unittest
from types import FunctionType as function
class EiffelBaseMetaClass(type):
def __new__(meta, name, bases, dict):
meta.convert_methods(dict)
return super(EiffelBaseMetaClass, meta).__new__(
meta, name, bases, dict)
@classmethod
def convert_methods(cls, dict):
"""Replace functions in dict with EiffelMethod wrappers.
The dict is modified in place.
If a method ends in _pre or _post, it is removed from the dict
regardless of whether there is a corresponding method.
"""
# find methods with pre or post conditions
methods = []
for k, v in dict.items():
if k.endswith('_pre') or k.endswith('_post'):
assert isinstance(v, function)
elif isinstance(v, function):
methods.append(k)
for m in methods:
pre = dict.get("%s_pre" % m)
post = dict.get("%s_post" % m)
if pre or post:
dict[m] = cls.make_eiffel_method(dict[m], pre, post)
class EiffelMetaClass1(EiffelBaseMetaClass):
# an implementation of the "eiffel" meta class that uses nested functions
@staticmethod
def make_eiffel_method(func, pre, post):
def method(self, *args, **kwargs):
if pre:
pre(self, *args, **kwargs)
rv = func(self, *args, **kwargs)
if post:
post(self, rv, *args, **kwargs)
return rv
if func.__doc__:
method.__doc__ = func.__doc__
return method
class EiffelMethodWrapper:
def __init__(self, inst, descr):
self._inst = inst
self._descr = descr
def __call__(self, *args, **kwargs):
return self._descr.callmethod(self._inst, args, kwargs)
class EiffelDescriptor:
def __init__(self, func, pre, post):
self._func = func
self._pre = pre
self._post = post
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, cls):
return EiffelMethodWrapper(obj, self)
def callmethod(self, inst, args, kwargs):
if self._pre:
self._pre(inst, *args, **kwargs)
x = self._func(inst, *args, **kwargs)
if self._post:
self._post(inst, x, *args, **kwargs)
return x
class EiffelMetaClass2(EiffelBaseMetaClass):
# an implementation of the "eiffel" meta class that uses descriptors
make_eiffel_method = EiffelDescriptor
class Tests(unittest.TestCase):
def testEiffelMetaClass1(self):
self._test(EiffelMetaClass1)
def testEiffelMetaClass2(self):
self._test(EiffelMetaClass2)
def _test(self, metaclass):
class Eiffel(metaclass=metaclass):
pass
class Test(Eiffel):
def m(self, arg):
"""Make it a little larger"""
return arg + 1
def m2(self, arg):
"""Make it a little larger"""
return arg + 1
def m2_pre(self, arg):
assert arg > 0
def m2_post(self, result, arg):
assert result > arg
class Sub(Test):
def m2(self, arg):
return arg**2
def m2_post(self, Result, arg):
super(Sub, self).m2_post(Result, arg)
assert Result < 100
t = Test()
self.assertEqual(t.m(1), 2)
self.assertEqual(t.m2(1), 2)
self.assertRaises(AssertionError, t.m2, 0)
s = Sub()
self.assertRaises(AssertionError, s.m2, 1)
self.assertRaises(AssertionError, s.m2, 10)
self.assertEqual(s.m2(5), 25)
if __name__ == "__main__":
unittest.main()
| 3,906 | 147 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/gdb/libpython.py | #!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax
# compatible (2.6+ and 3.0+). See #19308.
from __future__ import print_function
import gdb
import os
import locale
import sys
if sys.version_info[0] >= 3:
unichr = chr
xrange = range
long = int
# Look up the gdb.Type for some standard types:
# Those need to be refreshed as types (pointer sizes) may change when
# gdb loads different executables
def _type_char_ptr():
return gdb.lookup_type('char').pointer() # char*
def _type_unsigned_char_ptr():
return gdb.lookup_type('unsigned char').pointer() # unsigned char*
def _type_unsigned_short_ptr():
return gdb.lookup_type('unsigned short').pointer()
def _type_unsigned_int_ptr():
return gdb.lookup_type('unsigned int').pointer()
def _sizeof_void_p():
return gdb.lookup_type('void').pointer().sizeof
# value computed later, see PyUnicodeObjectPtr.proxy()
_is_pep393 = None
Py_TPFLAGS_HEAPTYPE = (1 << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
EVALFRAME = '_PyEval_EvalFrameDefault'
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given an integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(int(val)))
if sys.version_info[0] >= 3:
def write_unicode(file, text):
file.write(text)
else:
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
try:
os_fsencode = os.fsencode
except AttributeError:
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to io.StringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
ob_type = self.type()
tp_name = ob_type.field('tp_name')
return tp_name.string()
# NullPyObjectPtr: NULL tp_name?
# RuntimeError: Can't even read the object at all?
# UnicodeDecodeError: Failed to decode tp_name bytestring
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
# RuntimeError: NULL pointers
# UnicodeDecodeError: string() fails to decode the bytestring
except (RuntimeError, UnicodeDecodeError):
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print('tp_flags = 0x%08x' % tp_flags)
#print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
'method-wrapper': wrapperobject,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by all classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(_sizeof_void_p() - 1)
) & ~(_sizeof_void_p() - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % _sizeof_void_p() == 0
dictptr = self._gdbval.cast(_type_char_ptr()) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# Class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
try:
ml_name = m_ml['ml_name'].string()
except UnicodeDecodeError:
ml_name = '<ml_name:UnicodeDecodeError>'
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analogous to dict.iteritems()
'''
keys = self.field('ma_keys')
values = self.field('ma_values')
entries, nentries = self._get_entries(keys)
for i in safe_range(nentries):
ep = entries[i]
if long(values):
pyop_value = PyObjectPtr.from_pyobject_ptr(values[i])
else:
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
def _get_entries(self, keys):
dk_nentries = int(keys['dk_nentries'])
dk_size = int(keys['dk_size'])
try:
# <= Python 3.5
return keys['dk_entries'], dk_size
except RuntimeError:
# >= Python 3.6
pass
if dk_size <= 0xFF:
offset = dk_size
elif dk_size <= 0xFFFF:
offset = 2 * dk_size
elif dk_size <= 0xFFFFFFFF:
offset = 4 * dk_size
else:
offset = 8 * dk_size
ent_addr = keys['dk_indices'].address
ent_addr = ent_addr.cast(_type_unsigned_char_ptr()) + offset
ent_ptr_t = gdb.lookup_type('PyDictKeyEntry').pointer()
ent_addr = ent_addr.cast(ent_ptr_t)
return ent_addr, dk_nentries
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return ()
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return ()
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
try:
return self.co.addr2line(self.f_lasti)
except Exception:
# bpo-34989: addr2line() is a complex function, it can fail in many
# ways. For example, it fails with a TypeError on "FakeRepr" if
# gdb fails to load debug symbols. Use a catch-all "except
# Exception" to make the whole function safe. The caller has to
# handle None anyway for optimized Python.
return None
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
lineno = self.current_line_num()
if lineno is None:
return '(failed to get frame line number)'
filename = self.filename()
try:
with open(os_fsencode(filename), 'r') as fp:
lines = fp.readlines()
except IOError:
return None
try:
# Convert from 1-based current_line_num to 0-based list offset
return lines[lineno - 1]
except IndexError:
return None
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
lineno = self.current_line_num()
lineno = str(lineno) if lineno is not None else "?"
out.write('Frame 0x%x, for file %s, line %s, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
lineno,
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
return
visited = set()
lineno = self.current_line_num()
lineno = str(lineno) if lineno is not None else "?"
sys.stdout.write(' File "%s", line %s, in %s\n'
% (self.co_filename.proxyval(visited),
lineno,
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
@classmethod
def _dummy_key(self):
return gdb.lookup_global_symbol('_PySet_Dummy').value()
def __iter__(self):
dummy_ptr = self._dummy_key()
table = self.field('table')
for i in safe_range(self.field('mask') + 1):
setentry = table[i]
key = setentry['key']
if key != 0 and key != dummy_ptr:
yield PyObjectPtr.from_pyobject_ptr(key)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = (key.proxyval(visited) for key in self)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
for key in self:
if not first:
out.write(', ')
first = False
key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr())
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
global _is_pep393
if _is_pep393 is None:
fields = gdb.lookup_type('PyUnicodeObject').target().fields()
_is_pep393 = 'data' in [f.name for f in fields]
if _is_pep393:
# Python 3.3 and newer
may_have_surrogates = False
compact = self.field('_base')
ascii = compact['_base']
state = ascii['state']
is_compact_ascii = (int(state['ascii']) and int(state['compact']))
if not int(state['ready']):
# string is not ready
field_length = long(compact['wstr_length'])
may_have_surrogates = True
field_str = ascii['wstr']
else:
field_length = long(ascii['length'])
if is_compact_ascii:
field_str = ascii.address + 1
elif int(state['compact']):
field_str = compact.address + 1
else:
field_str = self.field('data')['any']
repr_kind = int(state['kind'])
if repr_kind == 1:
field_str = field_str.cast(_type_unsigned_char_ptr())
elif repr_kind == 2:
field_str = field_str.cast(_type_unsigned_short_ptr())
elif repr_kind == 4:
field_str = field_str.cast(_type_unsigned_int_ptr())
else:
# Python 3.2 and earlier
field_length = long(self.field('length'))
field_str = self.field('str')
may_have_surrogates = self.char_width() == 2
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-1, UCS-2 or UCS-4 code points:
if not may_have_surrogates:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([
(_unichr(ucs) if ucs <= 0x10ffff else '\ufffd')
for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
class wrapperobject(PyObjectPtr):
_typename = 'wrapperobject'
def safe_name(self):
try:
name = self.field('descr')['d_base']['name'].string()
return repr(name)
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return '<unknown name>'
def safe_tp_name(self):
try:
return self.field('self')['ob_type']['tp_name'].string()
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return '<unknown tp_name>'
def safe_self_addresss(self):
try:
address = long(self.field('self'))
return '%#x' % address
except (NullPyObjectPtr, RuntimeError):
return '<failed to get self address>'
def proxyval(self, visited):
name = self.safe_name()
tp_name = self.safe_tp_name()
self_address = self.safe_self_addresss()
return ("<method-wrapper %s of %s object at %s>"
% (name, tp_name, self_address))
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
out.write(proxy)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code != gdb.TYPE_CODE_PTR:
return None
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject", "wrapperobject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj is None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
# We divide frames into:
# - "python frames":
# - "bytecode frames" i.e. PyEval_EvalFrameEx
# - "other python frames": things that are of interest from a python
# POV, but aren't bytecode (e.g. GC, GIL)
# - everything else
def is_python_frame(self):
'''Is this a _PyEval_EvalFrameDefault frame, or some other important
frame? (see is_other_python_frame for what "important" means in this
context)'''
if self.is_evalframe():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframe(self):
'''Is this a _PyEval_EvalFrameDefault frame?'''
if self._gdbframe.name() == EVALFRAME:
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a _PyEval_EvalFrameDefault frame:
return True
return False
def is_other_python_frame(self):
'''Is this frame worth displaying in python backtraces?
Examples:
- waiting on the GIL
- garbage-collecting
- within a CFunction
If it is, return a descriptive string
For other frames, return False
'''
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
if self.is_gc_collect():
return 'Garbage-collecting'
# Detect invocations of PyCFunction instances:
frame = self._gdbframe
caller = frame.name()
if not caller:
return False
if caller == 'PyCFunction_Call':
arg_name = 'func'
# Within that frame:
# "func" is the local containing the PyObject* of the
# PyCFunctionObject instance
# "f" is the same value, but cast to (PyCFunctionObject*)
# "self" is the (PyObject*) of the 'self'
try:
# Use the prettyprinter for the func:
func = frame.read_var(arg_name)
return str(func)
except ValueError:
return ('PyCFunction invocation (unable to read %s: '
'missing debuginfos?)' % arg_name)
except RuntimeError:
return 'PyCFunction invocation (unable to read %s)' % arg_name
elif caller == '_PyCFunction_FastCallDict':
arg_name = 'func_obj'
try:
func = frame.read_var(arg_name)
return str(func)
except ValueError:
return ('PyCFunction invocation (unable to read %s: '
'missing debuginfos?)' % arg_name)
except RuntimeError:
return 'PyCFunction invocation (unable to read %s)' % arg_name
if caller == 'wrapper_call':
arg_name = 'wp'
try:
func = frame.read_var(arg_name)
return str(func)
except ValueError:
return ('<wrapper_call invocation (unable to read %s: '
'missing debuginfos?)>' % arg_name)
except RuntimeError:
return '<wrapper_call invocation (unable to read %s)>' % arg_name
# This frame isn't worth reporting:
return False
def is_waiting_for_gil(self):
'''Is this frame waiting on the GIL?'''
# This assumes the _POSIX_THREADS version of Python/ceval_gil.h:
name = self._gdbframe.name()
if name:
return 'pthread_cond_timedwait' in name
def is_gc_collect(self):
'''Is this frame "collect" within the garbage-collector?'''
return self._gdbframe.name() == 'collect'
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
# gdb is unable to get the "f" argument of PyEval_EvalFrameEx()
# because it was "optimized out". Try to get "f" from the frame
# of the caller, PyEval_EvalCodeEx().
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
return orig_frame
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python-related code in the selected
frame, or None'''
try:
frame = cls.get_selected_frame()
except gdb.error:
# No frame: Python didn't start yet
return None
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
# Not found:
return None
@classmethod
def get_selected_bytecode_frame(cls):
'''Try to obtain the Frame for the python bytecode interpreter in the
selected GDB frame, or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframe():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write('#%i %s\n' % (self.get_index(), info))
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(' %s\n' % info)
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
# py-list requires an actual PyEval_EvalFrameEx frame:
frame = Frame.get_selected_bytecode_frame()
if not frame:
print('Unable to locate gdb frame for python bytecode interpreter')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
print('Unable to read information on python frame')
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if lineno is None:
print('Unable to read python frame line number')
return
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_python_frame():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print('Unable to find an older python frame')
else:
print('Unable to find a newer python frame')
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
sys.stdout.write('Traceback (most recent call first):\n')
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print('%r not found' % name)
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
| 65,766 | 1,960 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/bkfile.py | from builtins import open as _orig_open
def open(file, mode='r', bufsize=-1):
if 'w' not in mode:
return _orig_open(file, mode, bufsize)
import os
backup = file + '~'
try:
os.unlink(backup)
except OSError:
pass
try:
os.rename(file, backup)
except OSError:
return _orig_open(file, mode, bufsize)
f = _orig_open(file, mode, bufsize)
_orig_close = f.close
def close():
_orig_close()
import filecmp
if filecmp.cmp(backup, file, shallow=False):
import os
os.unlink(file)
os.rename(backup, file)
f.close = close
return f
| 664 | 27 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/makemakefile.py | # Write the actual Makefile.
import os
def makemakefile(outfp, makevars, files, target):
outfp.write("# Makefile generated by freeze.py script\n\n")
keys = sorted(makevars.keys())
for key in keys:
outfp.write("%s=%s\n" % (key, makevars[key]))
outfp.write("\nall: %s\n\n" % target)
deps = []
for i in range(len(files)):
file = files[i]
if file[-2:] == '.c':
base = os.path.basename(file)
dest = base[:-2] + '.o'
outfp.write("%s: %s\n" % (dest, file))
outfp.write("\t$(CC) $(PY_CFLAGS) $(PY_CPPFLAGS) -c %s\n" % file)
files[i] = dest
deps.append(dest)
outfp.write("\n%s: %s\n" % (target, ' '.join(deps)))
outfp.write("\t$(LINKCC) $(PY_LDFLAGS) $(LINKFORSHARED) %s -o %s $(LDLAST)\n" %
(' '.join(files), target))
outfp.write("\nclean:\n\t-rm -f *.o %s\n" % target)
| 916 | 29 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/parsesetup.py | # Parse Makefiles and Python Setup(.in) files.
import re
# Extract variable definitions from a Makefile.
# Return a dictionary mapping names to values.
# May raise IOError.
makevardef = re.compile('^([a-zA-Z0-9_]+)[ \t]*=(.*)')
def getmakevars(filename):
variables = {}
fp = open(filename)
pendingline = ""
try:
while 1:
line = fp.readline()
if pendingline:
line = pendingline + line
pendingline = ""
if not line:
break
if line.endswith('\\\n'):
pendingline = line[:-2]
matchobj = makevardef.match(line)
if not matchobj:
continue
(name, value) = matchobj.group(1, 2)
# Strip trailing comment
i = value.find('#')
if i >= 0:
value = value[:i]
value = value.strip()
variables[name] = value
finally:
fp.close()
return variables
# Parse a Python Setup(.in) file.
# Return two dictionaries, the first mapping modules to their
# definitions, the second mapping variable names to their values.
# May raise IOError.
setupvardef = re.compile('^([a-zA-Z0-9_]+)=(.*)')
def getsetupinfo(filename):
modules = {}
variables = {}
fp = open(filename)
pendingline = ""
try:
while 1:
line = fp.readline()
if pendingline:
line = pendingline + line
pendingline = ""
if not line:
break
# Strip comments
i = line.find('#')
if i >= 0:
line = line[:i]
if line.endswith('\\\n'):
pendingline = line[:-2]
continue
matchobj = setupvardef.match(line)
if matchobj:
(name, value) = matchobj.group(1, 2)
variables[name] = value.strip()
else:
words = line.split()
if words:
modules[words[0]] = words[1:]
finally:
fp.close()
return modules, variables
# Test the above functions.
def test():
import sys
import os
if not sys.argv[1:]:
print('usage: python parsesetup.py Makefile*|Setup* ...')
sys.exit(2)
for arg in sys.argv[1:]:
base = os.path.basename(arg)
if base[:8] == 'Makefile':
print('Make style parsing:', arg)
v = getmakevars(arg)
prdict(v)
elif base[:5] == 'Setup':
print('Setup style parsing:', arg)
m, v = getsetupinfo(arg)
prdict(m)
prdict(v)
else:
print(arg, 'is neither a Makefile nor a Setup file')
print('(name must begin with "Makefile" or "Setup")')
def prdict(d):
keys = sorted(d.keys())
for key in keys:
value = d[key]
print("%-15s" % key, str(value))
if __name__ == '__main__':
test()
| 3,008 | 112 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/makefreeze.py | import marshal
import bkfile
# Write a file containing frozen code for the modules in the dictionary.
header = """
#include "third_party/python/Include/Python.h"
static struct _frozen _PyImport_FrozenModules[] = {
"""
trailer = """\
{0, 0, 0} /* sentinel */
};
"""
# if __debug__ == 0 (i.e. -O option given), set Py_OptimizeFlag in frozen app.
default_entry_point = """
int
main(int argc, char **argv)
{
extern int Py_FrozenMain(int, char **);
""" + ((not __debug__ and """
Py_OptimizeFlag++;
""") or "") + """
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(argc, argv);
}
"""
def makefreeze(base, dict, debug=0, entry_point=None, fail_import=()):
if entry_point is None: entry_point = default_entry_point
done = []
files = []
mods = sorted(dict.keys())
for mod in mods:
m = dict[mod]
mangled = "__".join(mod.split("."))
if m.__code__:
file = 'M_' + mangled + '.c'
with bkfile.open(base + file, 'w') as outfp:
files.append(file)
if debug:
print("freezing", mod, "...")
str = marshal.dumps(m.__code__)
size = len(str)
if m.__path__:
# Indicate package by negative size
size = -size
done.append((mod, mangled, size))
writecode(outfp, mangled, str)
if debug:
print("generating table of frozen modules")
with bkfile.open(base + 'frozen.c', 'w') as outfp:
for mod, mangled, size in done:
outfp.write('extern unsigned char M_%s[];\n' % mangled)
outfp.write(header)
for mod, mangled, size in done:
outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size))
outfp.write('\n')
# The following modules have a NULL code pointer, indicating
# that the frozen program should not search for them on the host
# system. Importing them will *always* raise an ImportError.
# The zero value size is never used.
for mod in fail_import:
outfp.write('\t{"%s", NULL, 0},\n' % (mod,))
outfp.write(trailer)
outfp.write(entry_point)
return files
# Write a C initializer for a module containing the frozen python code.
# The array is called M_<mod>.
def writecode(outfp, mod, str):
outfp.write('unsigned char M_%s[] = {' % mod)
for i in range(0, len(str), 16):
outfp.write('\n\t')
for c in bytes(str[i:i+16]):
outfp.write('%d,' % c)
outfp.write('\n};\n')
## def writecode(outfp, mod, str):
## outfp.write('unsigned char M_%s[%d] = "%s";\n' % (mod, len(str),
## '\\"'.join(map(lambda s: repr(s)[1:-1], str.split('"')))))
| 2,799 | 88 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/flag.py | initialized = True
print("Hello world!")
| 41 | 3 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/README | THE FREEZE SCRIPT
=================
(Directions for Windows are at the end of this file.)
What is Freeze?
---------------
Freeze make it possible to ship arbitrary Python programs to people
who don't have Python. The shipped file (called a "frozen" version of
your Python program) is an executable, so this only works if your
platform is compatible with that on the receiving end (this is usually
a matter of having the same major operating system revision and CPU
type).
The shipped file contains a Python interpreter and large portions of
the Python run-time. Some measures have been taken to avoid linking
unneeded modules, but the resulting binary is usually not small.
The Python source code of your program (and of the library modules
written in Python that it uses) is not included in the binary --
instead, the compiled byte-code (the instruction stream used
internally by the interpreter) is incorporated. This gives some
protection of your Python source code, though not much -- a
disassembler for Python byte-code is available in the standard Python
library. At least someone running "strings" on your binary won't see
the source.
How does Freeze know which modules to include?
----------------------------------------------
Previous versions of Freeze used a pretty simple-minded algorithm to
find the modules that your program uses, essentially searching for
lines starting with the word "import". It was pretty easy to trick it
into making mistakes, either missing valid import statements, or
mistaking string literals (e.g. doc strings) for import statements.
This has been remedied: Freeze now uses the regular Python parser to
parse the program (and all its modules) and scans the generated byte
code for IMPORT instructions. It may still be confused -- it will not
know about calls to the __import__ built-in function, or about import
statements constructed on the fly and executed using the 'exec'
statement, and it will consider import statements even when they are
unreachable (e.g. "if 0: import foobar").
This new version of Freeze also knows about Python's new package
import mechanism, and uses exactly the same rules to find imported
modules and packages. One exception: if you write 'from package
import *', Python will look into the __all__ variable of the package
to determine which modules are to be imported, while Freeze will do a
directory listing.
One tricky issue: Freeze assumes that the Python interpreter and
environment you're using to run Freeze is the same one that would be
used to run your program, which should also be the same whose sources
and installed files you will learn about in the next section. In
particular, your PYTHONPATH setting should be the same as for running
your program locally. (Tip: if the program doesn't run when you type
"python hello.py" there's little chance of getting the frozen version
to run.)
How do I use Freeze?
--------------------
Normally, you should be able to use it as follows:
python freeze.py hello.py
where hello.py is your program and freeze.py is the main file of
Freeze (in actuality, you'll probably specify an absolute pathname
such as /usr/joe/python/Tools/freeze/freeze.py).
What do I do next?
------------------
Freeze creates a number of files: frozen.c, config.c and Makefile,
plus one file for each Python module that gets included named
M_<module>.c. To produce the frozen version of your program, you can
simply type "make". This should produce a binary file. If the
filename argument to Freeze was "hello.py", the binary will be called
"hello".
Note: you can use the -o option to freeze to specify an alternative
directory where these files are created. This makes it easier to
clean up after you've shipped the frozen binary. You should invoke
"make" in the given directory.
Freezing Tkinter programs
-------------------------
Unfortunately, it is currently not possible to freeze programs that
use Tkinter without a Tcl/Tk installation. The best way to ship a
frozen Tkinter program is to decide in advance where you are going
to place the Tcl and Tk library files in the distributed setup, and
then declare these directories in your frozen Python program using
the TCL_LIBRARY, TK_LIBRARY and TIX_LIBRARY environment variables.
For example, assume you will ship your frozen program in the directory
<root>/bin/windows-x86 and will place your Tcl library files
in <root>/lib/tcl8.2 and your Tk library files in <root>/lib/tk8.2. Then
placing the following lines in your frozen Python script before importing
Tkinter or Tix would set the environment correctly for Tcl/Tk/Tix:
import os
import os.path
RootDir = os.path.dirname(os.path.dirname(os.getcwd()))
import sys
if sys.platform == "win32":
sys.path = ['', '..\\..\\lib\\python-2.0']
os.environ['TCL_LIBRARY'] = RootDir + '\\lib\\tcl8.2'
os.environ['TK_LIBRARY'] = RootDir + '\\lib\\tk8.2'
os.environ['TIX_LIBRARY'] = RootDir + '\\lib\\tix8.1'
elif sys.platform == "linux2":
sys.path = ['', '../../lib/python-2.0']
os.environ['TCL_LIBRARY'] = RootDir + '/lib/tcl8.2'
os.environ['TK_LIBRARY'] = RootDir + '/lib/tk8.2'
os.environ['TIX_LIBRARY'] = RootDir + '/lib/tix8.1'
elif sys.platform == "solaris":
sys.path = ['', '../../lib/python-2.0']
os.environ['TCL_LIBRARY'] = RootDir + '/lib/tcl8.2'
os.environ['TK_LIBRARY'] = RootDir + '/lib/tk8.2'
os.environ['TIX_LIBRARY'] = RootDir + '/lib/tix8.1'
This also adds <root>/lib/python-2.0 to your Python path
for any Python files such as _tkinter.pyd you may need.
Note that the dynamic libraries (such as tcl82.dll tk82.dll python20.dll
under Windows, or libtcl8.2.so and libtcl8.2.so under Unix) are required
at program load time, and are searched by the operating system loader
before Python can be started. Under Windows, the environment
variable PATH is consulted, and under Unix, it may be the
environment variable LD_LIBRARY_PATH and/or the system
shared library cache (ld.so). An additional preferred directory for
finding the dynamic libraries is built into the .dll or .so files at
compile time - see the LIB_RUNTIME_DIR variable in the Tcl makefile.
The OS must find the dynamic libraries or your frozen program won't start.
Usually I make sure that the .so or .dll files are in the same directory
as the executable, but this may not be foolproof.
A workaround to installing your Tcl library files with your frozen
executable would be possible, in which the Tcl/Tk library files are
incorporated in a frozen Python module as string literals and written
to a temporary location when the program runs; this is currently left
as an exercise for the reader. An easier approach is to freeze the
Tcl/Tk/Tix code into the dynamic libraries using the Tcl ET code,
or the Tix Stand-Alone-Module code. Of course, you can also simply
require that Tcl/Tk is required on the target installation, but be
careful that the version corresponds.
There are some caveats using frozen Tkinter applications:
Under Windows if you use the -s windows option, writing
to stdout or stderr is an error.
The Tcl [info nameofexecutable] will be set to where the
program was frozen, not where it is run from.
The global variables argc and argv do not exist.
A warning about shared library modules
--------------------------------------
When your Python installation uses shared library modules such as
_tkinter.pyd, these will not be incorporated in the frozen program.
Again, the frozen program will work when you test it, but it won't
work when you ship it to a site without a Python installation.
Freeze prints a warning when this is the case at the end of the
freezing process:
Warning: unknown modules remain: ...
When this occurs, the best thing to do is usually to rebuild Python
using static linking only. Or use the approach described in the previous
section to declare a library path using sys.path, and place the modules
such as _tkinter.pyd there.
Troubleshooting
---------------
If you have trouble using Freeze for a large program, it's probably
best to start playing with a really simple program first (like the file
hello.py). If you can't get that to work there's something
fundamentally wrong -- perhaps you haven't installed Python. To do a
proper install, you should do "make install" in the Python root
directory.
Usage under Windows 95 or NT
----------------------------
Under Windows 95 or NT, you *must* use the -p option and point it to
the top of the Python source tree.
WARNING: the resulting executable is not self-contained; it requires
the Python DLL, currently PYTHON20.DLL (it does not require the
standard library of .py files though). It may also require one or
more extension modules loaded from .DLL or .PYD files; the module
names are printed in the warning message about remaining unknown
modules.
The driver script generates a Makefile that works with the Microsoft
command line C compiler (CL). To compile, run "nmake"; this will
build a target "hello.exe" if the source was "hello.py". Only the
files frozenmain.c and frozen.c are used; no config.c is generated or
used, since the standard DLL is used.
In order for this to work, you must have built Python using the VC++
(Developer Studio) 5.0 compiler. The provided project builds
python20.lib in the subdirectory pcbuild\Release of thje Python source
tree, and this is where the generated Makefile expects it to be. If
this is not the case, you can edit the Makefile or (probably better)
winmakemakefile.py (e.g., if you are using the 4.2 compiler, the
python20.lib file is generated in the subdirectory vc40 of the Python
source tree).
It is possible to create frozen programs that don't have a console
window, by specifying the option '-s windows'. See the Usage below.
Usage
-----
Here is a list of all of the options (taken from freeze.__doc__):
usage: freeze [options...] script [module]...
Options:
-p prefix: This is the prefix used when you ran ``make install''
in the Python build directory.
(If you never ran this, freeze won't work.)
The default is whatever sys.prefix evaluates to.
It can also be the top directory of the Python source
tree; then -P must point to the build tree.
-P exec_prefix: Like -p but this is the 'exec_prefix', used to
install objects etc. The default is whatever sys.exec_prefix
evaluates to, or the -p argument if given.
If -p points to the Python source tree, -P must point
to the build tree, if different.
-e extension: A directory containing additional .o files that
may be used to resolve modules. This directory
should also have a Setup file describing the .o files.
On Windows, the name of a .INI file describing one
or more extensions is passed.
More than one -e option may be given.
-o dir: Directory where the output files are created; default '.'.
-m: Additional arguments are module names instead of filenames.
-a package=dir: Additional directories to be added to the package's
__path__. Used to simulate directories added by the
package at runtime (eg, by OpenGL and win32com).
More than one -a option may be given for each package.
-l file: Pass the file to the linker (windows only)
-d: Debugging mode for the module finder.
-q: Make the module finder totally quiet.
-h: Print this help message.
-x module Exclude the specified module.
-i filename: Include a file with additional command line options. Used
to prevent command lines growing beyond the capabilities of
the shell/OS. All arguments specified in filename
are read and the -i option replaced with the parsed
params (note - quoting args in this file is NOT supported)
-s subsystem: Specify the subsystem (For Windows only.);
'console' (default), 'windows', 'service' or 'com_dll'
-w: Toggle Windows (NT or 95) behavior.
(For debugging only -- on a win32 platform, win32 behavior
is automatic.)
Arguments:
script: The Python script to be executed by the resulting binary.
module ...: Additional Python modules (referenced by pathname)
that will be included in the resulting binary. These
may be .py or .pyc files. If -m is specified, these are
module names that are search in the path instead.
--Guido van Rossum (home page: http://www.python.org/~guido/)
| 12,652 | 297 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/freeze.py | #! /usr/bin/env python3
"""Freeze a Python script into a binary.
usage: freeze [options...] script [module]...
Options:
-p prefix: This is the prefix used when you ran ``make install''
in the Python build directory.
(If you never ran this, freeze won't work.)
The default is whatever sys.prefix evaluates to.
It can also be the top directory of the Python source
tree; then -P must point to the build tree.
-P exec_prefix: Like -p but this is the 'exec_prefix', used to
install objects etc. The default is whatever sys.exec_prefix
evaluates to, or the -p argument if given.
If -p points to the Python source tree, -P must point
to the build tree, if different.
-e extension: A directory containing additional .o files that
may be used to resolve modules. This directory
should also have a Setup file describing the .o files.
On Windows, the name of a .INI file describing one
or more extensions is passed.
More than one -e option may be given.
-o dir: Directory where the output files are created; default '.'.
-m: Additional arguments are module names instead of filenames.
-a package=dir: Additional directories to be added to the package's
__path__. Used to simulate directories added by the
package at runtime (eg, by OpenGL and win32com).
More than one -a option may be given for each package.
-l file: Pass the file to the linker (windows only)
-d: Debugging mode for the module finder.
-q: Make the module finder totally quiet.
-h: Print this help message.
-x module Exclude the specified module. It will still be imported
by the frozen binary if it exists on the host system.
-X module Like -x, except the module can never be imported by
the frozen binary.
-E: Freeze will fail if any modules can't be found (that
were not excluded using -x or -X).
-i filename: Include a file with additional command line options. Used
to prevent command lines growing beyond the capabilities of
the shell/OS. All arguments specified in filename
are read and the -i option replaced with the parsed
params (note - quoting args in this file is NOT supported)
-s subsystem: Specify the subsystem (For Windows only.);
'console' (default), 'windows', 'service' or 'com_dll'
-w: Toggle Windows (NT or 95) behavior.
(For debugging only -- on a win32 platform, win32 behavior
is automatic.)
-r prefix=f: Replace path prefix.
Replace prefix with f in the source path references
contained in the resulting binary.
Arguments:
script: The Python script to be executed by the resulting binary.
module ...: Additional Python modules (referenced by pathname)
that will be included in the resulting binary. These
may be .py or .pyc files. If -m is specified, these are
module names that are search in the path instead.
NOTES:
In order to use freeze successfully, you must have built Python and
installed it ("make install").
The script should not use modules provided only as shared libraries;
if it does, the resulting binary is not self-contained.
"""
# Import standard modules
import modulefinder
import getopt
import os
import sys
# Import the freeze-private modules
import checkextensions
import makeconfig
import makefreeze
import makemakefile
import parsesetup
import bkfile
# Main program
def main():
# overridable context
prefix = None # settable with -p option
exec_prefix = None # settable with -P option
extensions = []
exclude = [] # settable with -x option
addn_link = [] # settable with -l, but only honored under Windows.
path = sys.path[:]
modargs = 0
debug = 1
odir = ''
win = sys.platform[:3] == 'win'
replace_paths = [] # settable with -r option
error_if_any_missing = 0
# default the exclude list for each platform
if win: exclude = exclude + [
'dos', 'dospath', 'mac', 'macpath', 'macfs', 'MACFS', 'posix', ]
fail_import = exclude[:]
# output files
frozen_c = 'frozen.c'
config_c = 'config.c'
target = 'a.out' # normally derived from script name
makefile = 'Makefile'
subsystem = 'console'
# parse command line by first replacing any "-i" options with the
# file contents.
pos = 1
while pos < len(sys.argv)-1:
# last option can not be "-i", so this ensures "pos+1" is in range!
if sys.argv[pos] == '-i':
try:
options = open(sys.argv[pos+1]).read().split()
except IOError as why:
usage("File name '%s' specified with the -i option "
"can not be read - %s" % (sys.argv[pos+1], why) )
# Replace the '-i' and the filename with the read params.
sys.argv[pos:pos+2] = options
pos = pos + len(options) - 1 # Skip the name and the included args.
pos = pos + 1
# Now parse the command line with the extras inserted.
try:
opts, args = getopt.getopt(sys.argv[1:], 'r:a:dEe:hmo:p:P:qs:wX:x:l:')
except getopt.error as msg:
usage('getopt error: ' + str(msg))
# process option arguments
for o, a in opts:
if o == '-h':
print(__doc__)
return
if o == '-d':
debug = debug + 1
if o == '-e':
extensions.append(a)
if o == '-m':
modargs = 1
if o == '-o':
odir = a
if o == '-p':
prefix = a
if o == '-P':
exec_prefix = a
if o == '-q':
debug = 0
if o == '-w':
win = not win
if o == '-s':
if not win:
usage("-s subsystem option only on Windows")
subsystem = a
if o == '-x':
exclude.append(a)
if o == '-X':
exclude.append(a)
fail_import.append(a)
if o == '-E':
error_if_any_missing = 1
if o == '-l':
addn_link.append(a)
if o == '-a':
modulefinder.AddPackagePath(*a.split("=", 2))
if o == '-r':
f,r = a.split("=", 2)
replace_paths.append( (f,r) )
# modules that are imported by the Python runtime
implicits = []
for module in ('site', 'warnings', 'encodings.utf_8', 'encodings.latin_1'):
if module not in exclude:
implicits.append(module)
# default prefix and exec_prefix
if not exec_prefix:
if prefix:
exec_prefix = prefix
else:
exec_prefix = sys.exec_prefix
if not prefix:
prefix = sys.prefix
# determine whether -p points to the Python source tree
ishome = os.path.exists(os.path.join(prefix, 'Python', 'ceval.c'))
# locations derived from options
version = '%d.%d' % sys.version_info[:2]
flagged_version = version + sys.abiflags
if win:
extensions_c = 'frozen_extensions.c'
if ishome:
print("(Using Python source directory)")
binlib = exec_prefix
incldir = os.path.join(prefix, 'Include')
config_h_dir = exec_prefix
config_c_in = os.path.join(prefix, 'Modules', 'config.c.in')
frozenmain_c = os.path.join(prefix, 'Python', 'frozenmain.c')
makefile_in = os.path.join(exec_prefix, 'Makefile')
if win:
frozendllmain_c = os.path.join(exec_prefix, 'Pc\\frozen_dllmain.c')
else:
binlib = os.path.join(exec_prefix,
'lib', 'python%s' % version,
'config-%s' % flagged_version)
incldir = os.path.join(prefix, 'include', 'python%s' % flagged_version)
config_h_dir = os.path.join(exec_prefix, 'include',
'python%s' % flagged_version)
config_c_in = os.path.join(binlib, 'config.c.in')
frozenmain_c = os.path.join(binlib, 'frozenmain.c')
makefile_in = os.path.join(binlib, 'Makefile')
frozendllmain_c = os.path.join(binlib, 'frozen_dllmain.c')
supp_sources = []
defines = []
includes = ['-I' + incldir, '-I' + config_h_dir]
# sanity check of directories and files
check_dirs = [prefix, exec_prefix, binlib, incldir]
if not win:
# These are not directories on Windows.
check_dirs = check_dirs + extensions
for dir in check_dirs:
if not os.path.exists(dir):
usage('needed directory %s not found' % dir)
if not os.path.isdir(dir):
usage('%s: not a directory' % dir)
if win:
files = supp_sources + extensions # extensions are files on Windows.
else:
files = [config_c_in, makefile_in] + supp_sources
for file in supp_sources:
if not os.path.exists(file):
usage('needed file %s not found' % file)
if not os.path.isfile(file):
usage('%s: not a plain file' % file)
if not win:
for dir in extensions:
setup = os.path.join(dir, 'Setup')
if not os.path.exists(setup):
usage('needed file %s not found' % setup)
if not os.path.isfile(setup):
usage('%s: not a plain file' % setup)
# check that enough arguments are passed
if not args:
usage('at least one filename argument required')
# check that file arguments exist
for arg in args:
if arg == '-m':
break
# if user specified -m on the command line before _any_
# file names, then nothing should be checked (as the
# very first file should be a module name)
if modargs:
break
if not os.path.exists(arg):
usage('argument %s not found' % arg)
if not os.path.isfile(arg):
usage('%s: not a plain file' % arg)
# process non-option arguments
scriptfile = args[0]
modules = args[1:]
# derive target name from script name
base = os.path.basename(scriptfile)
base, ext = os.path.splitext(base)
if base:
if base != scriptfile:
target = base
else:
target = base + '.bin'
# handle -o option
base_frozen_c = frozen_c
base_config_c = config_c
base_target = target
if odir and not os.path.isdir(odir):
try:
os.mkdir(odir)
print("Created output directory", odir)
except OSError as msg:
usage('%s: mkdir failed (%s)' % (odir, str(msg)))
base = ''
if odir:
base = os.path.join(odir, '')
frozen_c = os.path.join(odir, frozen_c)
config_c = os.path.join(odir, config_c)
target = os.path.join(odir, target)
makefile = os.path.join(odir, makefile)
if win: extensions_c = os.path.join(odir, extensions_c)
# Handle special entry point requirements
# (on Windows, some frozen programs do not use __main__, but
# import the module directly. Eg, DLLs, Services, etc
custom_entry_point = None # Currently only used on Windows
python_entry_is_main = 1 # Is the entry point called __main__?
# handle -s option on Windows
if win:
import winmakemakefile
try:
custom_entry_point, python_entry_is_main = \
winmakemakefile.get_custom_entry_point(subsystem)
except ValueError as why:
usage(why)
# Actual work starts here...
# collect all modules of the program
dir = os.path.dirname(scriptfile)
path[0] = dir
mf = modulefinder.ModuleFinder(path, debug, exclude, replace_paths)
if win and subsystem=='service':
# If a Windows service, then add the "built-in" module.
mod = mf.add_module("servicemanager")
mod.__file__="dummy.pyd" # really built-in to the resulting EXE
for mod in implicits:
mf.import_hook(mod)
for mod in modules:
if mod == '-m':
modargs = 1
continue
if modargs:
if mod[-2:] == '.*':
mf.import_hook(mod[:-2], None, ["*"])
else:
mf.import_hook(mod)
else:
mf.load_file(mod)
# Alias "importlib._bootstrap" to "_frozen_importlib" so that the
# import machinery can bootstrap. Do the same for
# importlib._bootstrap_external.
mf.modules["_frozen_importlib"] = mf.modules["importlib._bootstrap"]
mf.modules["_frozen_importlib_external"] = mf.modules["importlib._bootstrap_external"]
# Add the main script as either __main__, or the actual module name.
if python_entry_is_main:
mf.run_script(scriptfile)
else:
mf.load_file(scriptfile)
if debug > 0:
mf.report()
print()
dict = mf.modules
if error_if_any_missing:
missing = mf.any_missing()
if missing:
sys.exit("There are some missing modules: %r" % missing)
# generate output for frozen modules
files = makefreeze.makefreeze(base, dict, debug, custom_entry_point,
fail_import)
# look for unfrozen modules (builtin and of unknown origin)
builtins = []
unknown = []
mods = sorted(dict.keys())
for mod in mods:
if dict[mod].__code__:
continue
if not dict[mod].__file__:
builtins.append(mod)
else:
unknown.append(mod)
# search for unknown modules in extensions directories (not on Windows)
addfiles = []
frozen_extensions = [] # Windows list of modules.
if unknown or (not win and builtins):
if not win:
addfiles, addmods = \
checkextensions.checkextensions(unknown+builtins,
extensions)
for mod in addmods:
if mod in unknown:
unknown.remove(mod)
builtins.append(mod)
else:
# Do the windows thang...
import checkextensions_win32
# Get a list of CExtension instances, each describing a module
# (including its source files)
frozen_extensions = checkextensions_win32.checkextensions(
unknown, extensions, prefix)
for mod in frozen_extensions:
unknown.remove(mod.name)
# report unknown modules
if unknown:
sys.stderr.write('Warning: unknown modules remain: %s\n' %
' '.join(unknown))
# windows gets different treatment
if win:
# Taking a shortcut here...
import winmakemakefile, checkextensions_win32
checkextensions_win32.write_extension_table(extensions_c,
frozen_extensions)
# Create a module definition for the bootstrap C code.
xtras = [frozenmain_c, os.path.basename(frozen_c),
frozendllmain_c, os.path.basename(extensions_c)] + files
maindefn = checkextensions_win32.CExtension( '__main__', xtras )
frozen_extensions.append( maindefn )
with open(makefile, 'w') as outfp:
winmakemakefile.makemakefile(outfp,
locals(),
frozen_extensions,
os.path.basename(target))
return
# generate config.c and Makefile
builtins.sort()
with open(config_c_in) as infp, bkfile.open(config_c, 'w') as outfp:
makeconfig.makeconfig(infp, outfp, builtins)
cflags = ['$(OPT)']
cppflags = defines + includes
libs = [os.path.join(binlib, '$(LDLIBRARY)')]
somevars = {}
if os.path.exists(makefile_in):
makevars = parsesetup.getmakevars(makefile_in)
for key in makevars:
somevars[key] = makevars[key]
somevars['CFLAGS'] = ' '.join(cflags) # override
somevars['CPPFLAGS'] = ' '.join(cppflags) # override
files = [base_config_c, base_frozen_c] + \
files + supp_sources + addfiles + libs + \
['$(MODLIBS)', '$(LIBS)', '$(SYSLIBS)']
with bkfile.open(makefile, 'w') as outfp:
makemakefile.makemakefile(outfp, somevars, files, base_target)
# Done!
if odir:
print('Now run "make" in', odir, end=' ')
print('to build the target:', base_target)
else:
print('Now run "make" to build the target:', base_target)
# Print usage message and exit
def usage(msg):
sys.stdout = sys.stderr
print("Error:", msg)
print("Use ``%s -h'' for help" % sys.argv[0])
sys.exit(2)
main()
| 17,065 | 492 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/checkextensions.py | # Check for a module in a set of extension directories.
# An extension directory should contain a Setup file
# and one or more .o files or a lib.a file.
import os
import parsesetup
def checkextensions(unknown, extensions):
files = []
modules = []
edict = {}
for e in extensions:
setup = os.path.join(e, 'Setup')
liba = os.path.join(e, 'lib.a')
if not os.path.isfile(liba):
liba = None
edict[e] = parsesetup.getsetupinfo(setup), liba
for mod in unknown:
for e in extensions:
(mods, vars), liba = edict[e]
if mod not in mods:
continue
modules.append(mod)
if liba:
# If we find a lib.a, use it, ignore the
# .o files, and use *all* libraries for
# *all* modules in the Setup file
if liba in files:
break
files.append(liba)
for m in list(mods.keys()):
files = files + select(e, mods, vars,
m, 1)
break
files = files + select(e, mods, vars, mod, 0)
break
return files, modules
def select(e, mods, vars, mod, skipofiles):
files = []
for w in mods[mod]:
w = treatword(w)
if not w:
continue
w = expandvars(w, vars)
for w in w.split():
if skipofiles and w[-2:] == '.o':
continue
# Assume $var expands to absolute pathname
if w[0] not in ('-', '$') and w[-2:] in ('.o', '.a'):
w = os.path.join(e, w)
if w[:2] in ('-L', '-R') and w[2:3] != '$':
w = w[:2] + os.path.join(e, w[2:])
files.append(w)
return files
cc_flags = ['-I', '-D', '-U']
cc_exts = ['.c', '.C', '.cc', '.c++']
def treatword(w):
if w[:2] in cc_flags:
return None
if w[:1] == '-':
return w # Assume loader flag
head, tail = os.path.split(w)
base, ext = os.path.splitext(tail)
if ext in cc_exts:
tail = base + '.o'
w = os.path.join(head, tail)
return w
def expandvars(str, vars):
i = 0
while i < len(str):
i = k = str.find('$', i)
if i < 0:
break
i = i+1
var = str[i:i+1]
i = i+1
if var == '(':
j = str.find(')', i)
if j < 0:
break
var = str[i:j]
i = j+1
if var in vars:
str = str[:k] + vars[var] + str[i:]
i = k
return str
| 2,630 | 91 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/hello.py | print('Hello world...')
| 24 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/makeconfig.py | import re
import sys
# Write the config.c file
never = ['marshal', '_imp', '_ast', '__main__', 'builtins',
'sys', 'gc', '_warnings']
def makeconfig(infp, outfp, modules, with_ifdef=0):
m1 = re.compile('-- ADDMODULE MARKER 1 --')
m2 = re.compile('-- ADDMODULE MARKER 2 --')
for line in infp:
outfp.write(line)
if m1 and m1.search(line):
m1 = None
for mod in modules:
if mod in never:
continue
if with_ifdef:
outfp.write("#ifndef PyInit_%s\n"%mod)
outfp.write('extern PyObject* PyInit_%s(void);\n' % mod)
if with_ifdef:
outfp.write("#endif\n")
elif m2 and m2.search(line):
m2 = None
for mod in modules:
if mod in never:
continue
outfp.write('\t{"%s", PyInit_%s},\n' %
(mod, mod))
if m1:
sys.stderr.write('MARKER 1 never found\n')
elif m2:
sys.stderr.write('MARKER 2 never found\n')
# Test program.
def test():
if not sys.argv[3:]:
print('usage: python makeconfig.py config.c.in outputfile', end=' ')
print('modulename ...')
sys.exit(2)
if sys.argv[1] == '-':
infp = sys.stdin
else:
infp = open(sys.argv[1])
if sys.argv[2] == '-':
outfp = sys.stdout
else:
outfp = open(sys.argv[2], 'w')
makeconfig(infp, outfp, sys.argv[3:])
if outfp != sys.stdout:
outfp.close()
if infp != sys.stdin:
infp.close()
if __name__ == '__main__':
test()
| 1,665 | 60 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/winmakemakefile.py | import sys, os
# Template used then the program is a GUI program
WINMAINTEMPLATE = """
#include <windows.h>
int WINAPI WinMain(
HINSTANCE hInstance, // handle to current instance
HINSTANCE hPrevInstance, // handle to previous instance
LPSTR lpCmdLine, // pointer to command line
int nCmdShow // show state of window
)
{
extern int Py_FrozenMain(int, char **);
PyImport_FrozenModules = _PyImport_FrozenModules;
return Py_FrozenMain(__argc, __argv);
}
"""
SERVICETEMPLATE = """
extern int PythonService_main(int, char **);
int main( int argc, char **argv)
{
PyImport_FrozenModules = _PyImport_FrozenModules;
return PythonService_main(argc, argv);
}
"""
subsystem_details = {
# -s flag : (C entry point template), (is it __main__?), (is it a DLL?)
'console' : (None, 1, 0),
'windows' : (WINMAINTEMPLATE, 1, 0),
'service' : (SERVICETEMPLATE, 0, 0),
'com_dll' : ("", 0, 1),
}
def get_custom_entry_point(subsystem):
try:
return subsystem_details[subsystem][:2]
except KeyError:
raise ValueError("The subsystem %s is not known" % subsystem)
def makemakefile(outfp, vars, files, target):
save = sys.stdout
try:
sys.stdout = outfp
realwork(vars, files, target)
finally:
sys.stdout = save
def realwork(vars, moddefns, target):
version_suffix = "%r%r" % sys.version_info[:2]
print("# Makefile for Microsoft Visual C++ generated by freeze.py script")
print()
print('target = %s' % target)
print('pythonhome = %s' % vars['prefix'])
print()
print('DEBUG=0 # Set to 1 to use the _d versions of Python.')
print('!IF $(DEBUG)')
print('debug_suffix=_d')
print('c_debug=/Zi /Od /DDEBUG /D_DEBUG')
print('l_debug=/DEBUG')
print('temp_dir=Build\\Debug')
print('!ELSE')
print('debug_suffix=')
print('c_debug=/Ox')
print('l_debug=')
print('temp_dir=Build\\Release')
print('!ENDIF')
print()
print('# The following line assumes you have built Python using the standard instructions')
print('# Otherwise fix the following line to point to the library.')
print('pythonlib = "$(pythonhome)/pcbuild/python%s$(debug_suffix).lib"' % version_suffix)
print()
# We only ever write one "entry point" symbol - either
# "main" or "WinMain". Therefore, there is no need to
# pass a subsystem switch to the linker as it works it
# out all by itself. However, the subsystem _does_ determine
# the file extension and additional linker flags.
target_link_flags = ""
target_ext = ".exe"
if subsystem_details[vars['subsystem']][2]:
target_link_flags = "-dll"
target_ext = ".dll"
print("# As the target uses Python%s.dll, we must use this compiler option!" % version_suffix)
print("cdl = /MD")
print()
print("all: $(target)$(debug_suffix)%s" % (target_ext))
print()
print('$(temp_dir):')
print(r' if not exist $(temp_dir)\. mkdir $(temp_dir)')
print()
objects = []
libs = ["shell32.lib", "comdlg32.lib", "wsock32.lib", "user32.lib", "oleaut32.lib"]
for moddefn in moddefns:
print("# Module", moddefn.name)
for file in moddefn.sourceFiles:
base = os.path.basename(file)
base, ext = os.path.splitext(base)
objects.append(base + ".obj")
print(r'$(temp_dir)\%s.obj: "%s"' % (base, file))
print("\t@$(CC) -c -nologo /Fo$* $(cdl) $(c_debug) /D BUILD_FREEZE", end=' ')
print('"-I$(pythonhome)/Include" "-I$(pythonhome)/PC" \\')
print("\t\t$(cflags) $(cdebug) $(cinclude) \\")
extra = moddefn.GetCompilerOptions()
if extra:
print("\t\t%s \\" % (' '.join(extra),))
print('\t\t"%s"' % file)
print()
# Add .lib files this module needs
for modlib in moddefn.GetLinkerLibs():
if modlib not in libs:
libs.append(modlib)
print("ADDN_LINK_FILES=", end=' ')
for addn in vars['addn_link']: print('"%s"' % (addn), end=' ')
print() ; print()
print("OBJS=", end=' ')
for obj in objects: print(r'"$(temp_dir)\%s"' % (obj), end=' ')
print() ; print()
print("LIBS=", end=' ')
for lib in libs: print('"%s"' % (lib), end=' ')
print() ; print()
print("$(target)$(debug_suffix)%s: $(temp_dir) $(OBJS)" % (target_ext))
print("\tlink -out:$(target)$(debug_suffix)%s %s" %
(target_ext, target_link_flags), "@<<")
print("\t$(OBJS)")
print("\t$(LIBS)")
print("\t$(ADDN_LINK_FILES)")
print("\t$(pythonlib) $(lcustom) $(l_debug)")
print("\t$(resources)")
print("<<")
print()
print("clean:")
print("\t-del /f *.obj")
print("\t-del /f $(target).exe")
| 4,982 | 149 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/win32.html | <HTML>
<HEAD>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=windows-1252">
<META NAME="Generator" CONTENT="Microsoft Word 97">
<TITLE>win32</TITLE>
<META NAME="Version" CONTENT="8.0.3410">
<META NAME="Date" CONTENT="10/11/96">
<META NAME="Template" CONTENT="D:\Program Files\Microsoft Office\Office\HTML.DOT">
</HEAD>
<BODY TEXT="#000000" BGCOLOR="#ffffff">
<H1>Freeze for Win32</H1>
<P>This document describes how to use Freeze for the Win32 platform. </P>
<P>Freeze itself is a Python tool for creating stand-alone executables from Python source code. This document does not attempt to document freeze itself - only the win32 specific changes.</P>
<H2>Frozen programs under Win32</H2>
<P>Frozen programs under Win32 can (theoretically) freeze any type of program supported by Python on Win32 - At the moment, Console .EXE and NT Service .EXE programs are supported. GUI Python programs and COM .EXE programs are very nearly all ready to go.</P>
<H3>Program Dependencies</H3>
<P>The person freezing the program has control over what external DLLs are required by a frozen program. The following dependencies are supported:</P>
<H4>Minimal frozen programs</H4>
<P>These programs freeze only .py files in your program. All external DLLs are required at run-time. This includes all .pyd/.dll modules used by your program, Python20.dll, and msvcrt.dll. </P>
<P>A small Python program would typically create a .EXE around 300kb.</P>
<H4>Frozen Extension programs</H4>
<B><I><P>Note:</B></I> For Python1.5.1, you must get a patch from Guido to import.c for this to work.</P>
<P>These programs also freeze in the sources from all .pyd and .dll files used at runtime. This means the resulting .EXE is only dependent on Python20.dll and msvcrt.dll.</P>
<P>A small Python program using win32api, win32con and one or 2 other win32 extensions would typically create a .EXE around 400kb.</P>
<H4>Completely frozen programs</H4>
<P>Completely stand-alone programs, as is the default on Unix systems. These are currently not supported, mainly as the size of a decent Python program gets very large. However, by tweaking the existing Unix support, this would not be difficult to do.</P>
<H2>Freezing Extension Modules</H2>
<P>By default, a file in the main "freeze" directory called "extensions_win32.ini" is used to obtain information about frozen extensions. A typical entry is:</P>
<CODE><P>[win32api]</P>
<P>dsp=%PYTHONEX%\win32\win32api.dsp</P>
<P>cl=/I %PYTHONEX%\win32\src</P>
<P>libs=kernel32.lib user32.lib shell32.lib advapi32.lib</P>
</CODE><P> </P>
<P>This entry indicates that the win32api extension module is defined in the MSVC project file "<CODE>%PYTHONEX%\win32\win32api.dsp</CODE>". Note the use of<CODE> </CODE>"<CODE>%PYTHONEX%" </CODE>- most strings are substituted with environment variables. In this case, it is assumed variable PYTHONEX points to the main "Python Extensions" source directory (which is assumed to be in the same structure as the release of the extension sources).</P>
<P>An entry in a .INI file can also control specific compiler options, and also the .lib files necessary to be linked with the application.</P>
<H3>Freezing Extension Module Considerations</H3>
<P>To prevent freezing extension modules, simply exclude that module using the freeze "-x" switch.</P>
<P>Occasionally, it will be necessary to explicitly include dependent modules. For example, many win32 modules are dependent on the "pywintypes" module - for example, the win32api module. In this case, the module may be explicitly included using the freeze "-m" option.</P>
<H3>Freezing win32com and PythonCOM</H3>
<P>PythonCOM.dll can be frozen as long as you are not implementing COM Servers. Ie, you can freeze programs which control other applications, but can not implement programs that are themselves controlled by other applications.</P>
<P>If you use any of the win32com .pyd extensions (ex, axscript, mapi, internet, axcontrol), then you will need to specify an additional "-a" option to point to the win32comext directory. There is an example below.</P>
<P>The use of the "win32com.client.gencache" module is not supported (although could be quite easily??)</P>
<H2>Examples</H2>
<P>Before we start, we must:</P>
<CODE><P>D:\temp\delme>set PYTHONEX=d:\src\pythonex</P>
</CODE><H3>Helloworld.py</H3>
<H4>Source Code</H4><DIR>
<DIR>
<CODE><P>import sys</P>
<P> </P>
<P>print " ".join( ["Hello", "world"] + sys.argv[1:] )</P></DIR>
</DIR>
</CODE><H4>Command Line used</H4><DIR>
<DIR>
<FONT FACE="Courier New" SIZE=2><P>\src\python-1.5.1\tools\freeze\freeze.py helloworld.py</P>
<P>nmake</P></DIR>
</DIR>
</FONT><P>Resulting helloworld.exe: 114,688 bytes.</P>
<H3>Helloworld2.py</H3>
<P>Uses win32api. Demonstrates requirement for pywintypes, and difference between freezing extensions and not.</P>
<H4>Source Code</H4><DIR>
<DIR>
<P>import win32api</P>
<P>print "Hello from", win32api.GetComputerName()</P></DIR>
</DIR>
<H4>Command Line used</H4>
<P>By default, win32api will be frozen in with the .EXE. If you do not provide the "pywintypes" inclusion, then the link step will fail looking for all the pywintypes modules.</P><DIR>
<DIR>
<FONT FACE="Courier New" SIZE=2><P>\src\python-1.5.1\tools\freeze\freeze.py helloworld2.py -m pywintypes</P>
<P>nmake</P></DIR>
</DIR>
</FONT><P>Resulting helloworld2.exe: 167,936 bytes</P>
<P>Simply adding win32con to the mix gives an EXE of size: 352,768 bytes.</P>
<H4>Command Line used</H4>
<P>Using this build, we are dependent at runtime on the win32api.pyd and pywintypes15.dll files.</P><DIR>
<DIR>
<P>\src\python-1.5.1\tools\freeze\freeze.py -x win32api helloworld.py</P></DIR>
</DIR>
<P>Resulting helloworld2.exe: 114,688</P>
<P>Adding win32con to this build results in a size of: 252,928</P>
<H3>Testmapi.py</H3>
<P>Uses MAPI, a PythonCOM extension, and win32api.</P>
<H4>Source Code</H4>
<P>from win32com.mapi import mapi</P>
<P>import win32api</P>
<P>mapi.MAPIInitialize( (mapi.MAPI_INIT_VERSION, 0) )</P>
<P>print "Hello from", win32api.GetComputerName()</P>
<P>mapi.MAPIUninitialize()</P>
<H4>Command Line used</H4>
<P>As it does not import pythoncom or pywintypes itself, they must be specified. As it uses the win32comext directory, -a must be used. If you have built the win32com extensions from sources, then the second -a is required.</P><DIR>
<DIR>
<CODE><P>\src\python-1.5.1\tools\freeze\freeze.py -a win32com=%PYTHONEX%\com\win32comext -a win32com.mapi=%PYTHONEX%\com\build\release testmapi.py -m pywintypes -m pythoncom</P></DIR>
</DIR>
</CODE><P>Resulting testmapi.exe: 352,768 bytes</P>
<H3>PipeTestService.py</H3>
<P>This is a standard Python demo in the Win32 extensions. It can be found in the "win32\demos\service" directory.</P>
<H4>Command Line used</H4>
<P>This will create a native NT Service EXE, dependent only on the main Python20.dll. All other modules are built-in to the final .EXE</P><DIR>
<DIR>
<CODE><P>\src\python-1.5.1\tools\freeze\freeze.py -s service %PYTHONEX%\win32\demos\service\pipeTestService.py</P></DIR>
</DIR>
<P>Resulting pipeTestService.exe: </CODE><FONT FACE="Courier New" SIZE=2>533,504 bytes.</P></FONT></BODY>
</HTML>
| 7,182 | 120 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/extensions_win32.ini | ; This is a list of modules generally build as .pyd files.
;
; Each section contains enough information about a module for
; freeze to include the module as a static, built-in module
; in a frozen .EXE/.DLL.
; This is all setup for all the win32 extension modules
; released by Mark Hammond.
; You must ensure that the environment variable PYTHONEX is set
; to point to the root win32 extensions directory
; PYTHONPREFIX must point to the Python build root directory
; (the *parent* of PCbuild); normally the freeze script takes
; care of this.
;--------------------------------------------------------------
;
; Standard Python extension modules
;
; Here are some of the standard Python extensions modules.
; If you need others, add them here
[_socket]
dsp=%PYTHONPREFIX%\PCBuild\_socket.dsp
[_sre]
dsp=%PYTHONPREFIX%\PCBuild\_sre.dsp
[unicodedata]
dsp=%PYTHONPREFIX%\PCBuild\unicodedata.dsp
[mmap]
dsp=%PYTHONPREFIX%\PCBuild\mmap.dsp
[winsound]
dsp=%PYTHONPREFIX%\PCBuild\winsound.dsp
libs=winmm.lib
[parser]
dsp=%PYTHONPREFIX%\PCBuild\parser.dsp
[select]
dsp=%PYTHONPREFIX%\PCBuild\select.dsp
[zlib]
dsp=%PYTHONPREFIX%\PCBuild\zlib.dsp
cl=/I %PYTHONPREFIX%\..\zlib-1.1.4 /D _WINDOWS /D WIN32
libs=%PYTHONPREFIX%\..\zlib-1.1.4\zlib.lib /nodefaultlib:libc
[winreg]
dsp=%PYTHONPREFIX%\PCBuild\winreg.dsp
libs=advapi32.lib
;--------------------------------------------------------------
;
; Win32 Projects.
;
[perfmon]
dsp=%PYTHONEX%\win32\perfmon.dsp
cl=/I %PYTHONEX%\win32\src
Unicode=1
[pywintypes]
dsp=%PYTHONEX%\win32\pywintypes.dsp
cl=/I %PYTHONEX%\win32\src
libs=ole32.lib oleaut32.lib
[win32api]
dsp=%PYTHONEX%\win32\win32api.dsp
cl=/I %PYTHONEX%\win32\src
libs=kernel32.lib user32.lib shell32.lib advapi32.lib
[win32service]
dsp=%PYTHONEX%\win32\win32service.dsp
cl=/I %PYTHONEX%\win32\src
Unicode=1
libs=advapi32.lib
[win32evtlog]
dsp=%PYTHONEX%\win32\win32evtlog.dsp
cl=/I %PYTHONEX%\win32\src
[win32process]
dsp=%PYTHONEX%\win32\win32process.dsp
cl=/I %PYTHONEX%\win32\src
[win32event]
dsp=%PYTHONEX%\win32\win32event.dsp
cl=/I %PYTHONEX%\win32\src
[win32file]
dsp=%PYTHONEX%\win32\win32file.dsp
cl=/I %PYTHONEX%\win32\src
[win32net]
dsp=%PYTHONEX%\win32\win32net.dsp
cl=/I %PYTHONEX%\win32\src
libs=netapi32.lib
[win32pdh]
dsp=%PYTHONEX%\win32\win32pdh.dsp
cl=/I %PYTHONEX%\win32\src
[win32pipe]
dsp=%PYTHONEX%\win32\win32pipe.dsp
cl=/I %PYTHONEX%\win32\src
[win32security]
dsp=%PYTHONEX%\win32\win32security.dsp
cl=/I %PYTHONEX%\win32\src
[win32service]
dsp=%PYTHONEX%\win32\win32service.dsp
cl=/I %PYTHONEX%\win32\src
[win32trace]
dsp=%PYTHONEX%\win32\win32trace.dsp
cl=/I %PYTHONEX%\win32\src
;--------------------------------------------------------------
;
; COM Projects.
;
[pythoncom]
dsp=%PYTHONEX%\com\win32com.dsp
cl=/I %PYTHONEX%\com\win32com\src\include /I %PYTHONEX%\win32\src
libs=uuid.lib
[win32com.axcontrol.axcontrol]
dsp=%PYTHONEX%\com\axcontrol.dsp
cl=/I %PYTHONEX%\win32\src /I %PYTHONEX%\com\win32com\src\include
[win32com.axscript.axscript]
dsp=%PYTHONEX%\com\Active Scripting.dsp
cl=/I %PYTHONEX%\win32\src /I %PYTHONEX%\com\win32com\src\include
[win32com.axdebug.axdebug]
dsp=%PYTHONEX%\com\Active Debugging.dsp
cl=/I %PYTHONEX%\win32\src /I %PYTHONEX%\com\win32com\src\include
[win32com.mapi.mapi]
dsp=%PYTHONEX%\com\mapi.dsp
cl=/I %PYTHONEX%\win32\src /I %PYTHONEX%\com\win32com\src\include
libs=MBLOGON.lib ADDRLKUP.LIB mapi32.lib version.lib
[win32com.mapi.exchange]
dsp=%PYTHONEX%\com\exchange.dsp
cl=/I %PYTHONEX%\win32\src /I %PYTHONEX%\com\win32com\src\include
libs=MBLOGON.lib ADDRLKUP.LIB exchinst.lib EDKCFG.LIB EDKUTILS.LIB EDKMAPI.LIB mapi32.lib version.lib
[win32com.mapi.exchdapi]
dsp=%PYTHONEX%\com\exchdapi.dsp
cl=/I %PYTHONEX%\win32\src /I %PYTHONEX%\com\win32com\src\include
libs=DAPI.LIB
[servicemanager]
dsp=%PYTHONEX%\win32\PythonService EXE.dsp
Unicode = 1
; Pythonwin
[win32ui]
dsp=%PYTHONEX%\Pythonwin\win32ui.dsp
cl=/D _AFXDLL /D FREEZE_WIN32UI /GX /I %PYTHONEX%\win32\src
libs=mfc42.lib
| 3,992 | 172 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/checkextensions_win32.py | """Extension management for Windows.
Under Windows it is unlikely the .obj files are of use, as special compiler options
are needed (primarily to toggle the behavior of "public" symbols.
I don't consider it worth parsing the MSVC makefiles for compiler options. Even if
we get it just right, a specific freeze application may have specific compiler
options anyway (eg, to enable or disable specific functionality)
So my basic strategy is:
* Have some Windows INI files which "describe" one or more extension modules.
(Freeze comes with a default one for all known modules - but you can specify
your own).
* This description can include:
- The MSVC .dsp file for the extension. The .c source file names
are extracted from there.
- Specific compiler/linker options
- Flag to indicate if Unicode compilation is expected.
At the moment the name and location of this INI file is hardcoded,
but an obvious enhancement would be to provide command line options.
"""
import os, sys
try:
import win32api
except ImportError:
win32api = None # User has already been warned
class CExtension:
"""An abstraction of an extension implemented in C/C++
"""
def __init__(self, name, sourceFiles):
self.name = name
# A list of strings defining additional compiler options.
self.sourceFiles = sourceFiles
# A list of special compiler options to be applied to
# all source modules in this extension.
self.compilerOptions = []
# A list of .lib files the final .EXE will need.
self.linkerLibs = []
def GetSourceFiles(self):
return self.sourceFiles
def AddCompilerOption(self, option):
self.compilerOptions.append(option)
def GetCompilerOptions(self):
return self.compilerOptions
def AddLinkerLib(self, lib):
self.linkerLibs.append(lib)
def GetLinkerLibs(self):
return self.linkerLibs
def checkextensions(unknown, extra_inis, prefix):
# Create a table of frozen extensions
defaultMapName = os.path.join( os.path.split(sys.argv[0])[0], "extensions_win32.ini")
if not os.path.isfile(defaultMapName):
sys.stderr.write("WARNING: %s can not be found - standard extensions may not be found\n" % defaultMapName)
else:
# must go on end, so other inis can override.
extra_inis.append(defaultMapName)
ret = []
for mod in unknown:
for ini in extra_inis:
# print "Looking for", mod, "in", win32api.GetFullPathName(ini),"...",
defn = get_extension_defn( mod, ini, prefix )
if defn is not None:
# print "Yay - found it!"
ret.append( defn )
break
# print "Nope!"
else: # For not broken!
sys.stderr.write("No definition of module %s in any specified map file.\n" % (mod))
return ret
def get_extension_defn(moduleName, mapFileName, prefix):
if win32api is None: return None
os.environ['PYTHONPREFIX'] = prefix
dsp = win32api.GetProfileVal(moduleName, "dsp", "", mapFileName)
if dsp=="":
return None
# We allow environment variables in the file name
dsp = win32api.ExpandEnvironmentStrings(dsp)
# If the path to the .DSP file is not absolute, assume it is relative
# to the description file.
if not os.path.isabs(dsp):
dsp = os.path.join( os.path.split(mapFileName)[0], dsp)
# Parse it to extract the source files.
sourceFiles = parse_dsp(dsp)
if sourceFiles is None:
return None
module = CExtension(moduleName, sourceFiles)
# Put the path to the DSP into the environment so entries can reference it.
os.environ['dsp_path'] = os.path.split(dsp)[0]
os.environ['ini_path'] = os.path.split(mapFileName)[0]
cl_options = win32api.GetProfileVal(moduleName, "cl", "", mapFileName)
if cl_options:
module.AddCompilerOption(win32api.ExpandEnvironmentStrings(cl_options))
exclude = win32api.GetProfileVal(moduleName, "exclude", "", mapFileName)
exclude = exclude.split()
if win32api.GetProfileVal(moduleName, "Unicode", 0, mapFileName):
module.AddCompilerOption('/D UNICODE /D _UNICODE')
libs = win32api.GetProfileVal(moduleName, "libs", "", mapFileName).split()
for lib in libs:
module.AddLinkerLib(win32api.ExpandEnvironmentStrings(lib))
for exc in exclude:
if exc in module.sourceFiles:
module.sourceFiles.remove(exc)
return module
# Given an MSVC DSP file, locate C source files it uses
# returns a list of source files.
def parse_dsp(dsp):
# print "Processing", dsp
# For now, only support
ret = []
dsp_path, dsp_name = os.path.split(dsp)
try:
lines = open(dsp, "r").readlines()
except IOError as msg:
sys.stderr.write("%s: %s\n" % (dsp, msg))
return None
for line in lines:
fields = line.strip().split("=", 2)
if fields[0]=="SOURCE":
if os.path.splitext(fields[1])[1].lower() in ['.cpp', '.c']:
ret.append( win32api.GetFullPathName(os.path.join(dsp_path, fields[1] ) ) )
return ret
def write_extension_table(fname, modules):
fp = open(fname, "w")
try:
fp.write (ext_src_header)
# Write fn protos
for module in modules:
# bit of a hack for .pyd's as part of packages.
name = module.name.split('.')[-1]
fp.write('extern void init%s(void);\n' % (name) )
# Write the table
fp.write (ext_tab_header)
for module in modules:
name = module.name.split('.')[-1]
fp.write('\t{"%s", init%s},\n' % (name, name) )
fp.write (ext_tab_footer)
fp.write(ext_src_footer)
finally:
fp.close()
ext_src_header = """\
#include "third_party/python/Include/Python.h"
"""
ext_tab_header = """\
static struct _inittab extensions[] = {
"""
ext_tab_footer = """\
/* Sentinel */
{0, 0}
};
"""
ext_src_footer = """\
extern DL_IMPORT(int) PyImport_ExtendInittab(struct _inittab *newtab);
int PyInitFrozenExtensions()
{
return PyImport_ExtendInittab(extensions);
}
"""
| 6,227 | 189 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/freeze/test/ok.py | import sys
sys.exit(0)
| 23 | 3 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/i18n/pygettext.py | #! /usr/bin/env python3
# -*- coding: iso-8859-1 -*-
# Originally written by Barry Warsaw <[email protected]>
#
# Minimally patched to make it even more xgettext compatible
# by Peter Funk <[email protected]>
#
# 2002-11-22 Jürgen Hermann <[email protected]>
# Added checks that _() only contains string literals, and
# command line args are resolved to module lists, i.e. you
# can now pass a filename, a module or package name, or a
# directory (including globbing chars, important for Win32).
# Made docstring fit in 80 chars wide displays using pydoc.
#
# for selftesting
try:
import fintl
_ = fintl.gettext
except ImportError:
_ = lambda s: s
__doc__ = _("""pygettext -- Python equivalent of xgettext(1)
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
internationalization of C programs. Most of these tools are independent of
the programming language and can be used from within Python programs.
Martin von Loewis' work[1] helps considerably in this regard.
There's one problem though; xgettext is the program that scans source code
looking for message strings, but it groks only C (or C++). Python
introduces a few wrinkles, such as dual quoting characters, triple quoted
strings, and raw strings. xgettext understands none of this.
Enter pygettext, which uses Python's standard tokenize module to scan
Python source code, generating .pot files identical to what GNU xgettext[2]
generates for C and C++ code. From there, the standard GNU tools can be
used.
A word about marking Python strings as candidates for translation. GNU
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
and gettext_noop. But those can be a lot of text to include all over your
code. C and C++ have a trick: they use the C preprocessor. Most
internationalized C source includes a #define for gettext() to _() so that
what has to be written in the source is much less. Thus these are both
translatable strings:
gettext("Translatable String")
_("Translatable String")
Python of course has no preprocessor so this doesn't work so well. Thus,
pygettext searches only for _() by default, but see the -k/--keyword flag
below for how to augment this.
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
[2] http://www.gnu.org/software/gettext/gettext.html
NOTE: pygettext attempts to be option and feature compatible with GNU
xgettext where ever possible. However some options are still missing or are
not fully implemented. Also, xgettext's use of command line switches with
option arguments is broken, and in these cases, pygettext just defines
additional switches.
Usage: pygettext [options] inputfile ...
Options:
-a
--extract-all
Extract all strings.
-d name
--default-domain=name
Rename the default output file from messages.pot to name.pot.
-E
--escape
Replace non-ASCII characters with octal escape sequences.
-D
--docstrings
Extract module, class, method, and function docstrings. These do
not need to be wrapped in _() markers, and in fact cannot be for
Python to consider them docstrings. (See also the -X option).
-h
--help
Print this help message and exit.
-k word
--keyword=word
Keywords to look for in addition to the default set, which are:
%(DEFAULTKEYWORDS)s
You can have multiple -k flags on the command line.
-K
--no-default-keywords
Disable the default set of keywords (see above). Any keywords
explicitly added with the -k/--keyword option are still recognized.
--no-location
Do not write filename/lineno location comments.
-n
--add-location
Write filename/lineno location comments indicating where each
extracted string is found in the source. These lines appear before
each msgid. The style of comments is controlled by the -S/--style
option. This is the default.
-o filename
--output=filename
Rename the default output file from messages.pot to filename. If
filename is `-' then the output is sent to standard out.
-p dir
--output-dir=dir
Output files will be placed in directory dir.
-S stylename
--style stylename
Specify which style to use for location comments. Two styles are
supported:
Solaris # File: filename, line: line-number
GNU #: filename:line
The style name is case insensitive. GNU style is the default.
-v
--verbose
Print the names of the files being processed.
-V
--version
Print the version of pygettext and exit.
-w columns
--width=columns
Set width of output to columns.
-x filename
--exclude-file=filename
Specify a file that contains a list of strings that are not be
extracted from the input files. Each string to be excluded must
appear on a line by itself in the file.
-X filename
--no-docstrings=filename
Specify a file that contains a list of files (one per line) that
should not have their docstrings extracted. This is only useful in
conjunction with the -D option above.
If `inputfile' is -, standard input is read.
""")
import os
import importlib.machinery
import importlib.util
import sys
import glob
import time
import getopt
import token
import tokenize
__version__ = '1.5'
default_keywords = ['_']
DEFAULTKEYWORDS = ', '.join(default_keywords)
EMPTYSTRING = ''
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
# there.
pot_header = _('''\
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\\n"
"POT-Creation-Date: %(time)s\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: LANGUAGE <[email protected]>\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=%(charset)s\\n"
"Content-Transfer-Encoding: %(encoding)s\\n"
"Generated-By: pygettext.py %(version)s\\n"
''')
def usage(code, msg=''):
print(__doc__ % globals(), file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def make_escapes(pass_nonascii):
global escapes, escape
if pass_nonascii:
# Allow non-ascii characters to pass through so that e.g. 'msgid
# "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we
# escape any character outside the 32..126 range.
mod = 128
escape = escape_ascii
else:
mod = 256
escape = escape_nonascii
escapes = [r"\%03o" % i for i in range(mod)]
for i in range(32, 127):
escapes[i] = chr(i)
escapes[ord('\\')] = r'\\'
escapes[ord('\t')] = r'\t'
escapes[ord('\r')] = r'\r'
escapes[ord('\n')] = r'\n'
escapes[ord('\"')] = r'\"'
def escape_ascii(s, encoding):
return ''.join(escapes[ord(c)] if ord(c) < 128 else c for c in s)
def escape_nonascii(s, encoding):
return ''.join(escapes[b] for b in s.encode(encoding))
def is_literal_string(s):
return s[0] in '\'"' or (s[0] in 'rRuU' and s[1] in '\'"')
def safe_eval(s):
# unwrap quotes, safely
return eval(s, {'__builtins__':{}}, {})
def normalize(s, encoding):
# This converts the various Python string types into a format that is
# appropriate for .po files, namely much closer to C style.
lines = s.split('\n')
if len(lines) == 1:
s = '"' + escape(s, encoding) + '"'
else:
if not lines[-1]:
del lines[-1]
lines[-1] = lines[-1] + '\n'
for i in range(len(lines)):
lines[i] = escape(lines[i], encoding)
lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"'
return s
def containsAny(str, set):
"""Check whether 'str' contains ANY of the chars in 'set'"""
return 1 in [c in str for c in set]
def getFilesForName(name):
"""Get a list of module files for a filename, a module or package name,
or a directory.
"""
if not os.path.exists(name):
# check for glob chars
if containsAny(name, "*?[]"):
files = glob.glob(name)
list = []
for file in files:
list.extend(getFilesForName(file))
return list
# try to find module or package
try:
spec = importlib.util.find_spec(name)
name = spec.origin
except ImportError:
name = None
if not name:
return []
if os.path.isdir(name):
# find all python files in directory
list = []
# get extension for python source files
_py_ext = importlib.machinery.SOURCE_SUFFIXES[0]
for root, dirs, files in os.walk(name):
# don't recurse into CVS directories
if 'CVS' in dirs:
dirs.remove('CVS')
# add all *.py files to list
list.extend(
[os.path.join(root, file) for file in files
if os.path.splitext(file)[1] == _py_ext]
)
return list
elif os.path.exists(name):
# a single file
return [name]
return []
class TokenEater:
def __init__(self, options):
self.__options = options
self.__messages = {}
self.__state = self.__waiting
self.__data = []
self.__lineno = -1
self.__freshmodule = 1
self.__curfile = None
self.__enclosurecount = 0
def __call__(self, ttype, tstring, stup, etup, line):
# dispatch
## import token
## print('ttype:', token.tok_name[ttype], 'tstring:', tstring,
## file=sys.stderr)
self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno):
opts = self.__options
# Do docstring extractions, if enabled
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
# module docstring?
if self.__freshmodule:
if ttype == tokenize.STRING and is_literal_string(tstring):
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__freshmodule = 0
elif ttype not in (tokenize.COMMENT, tokenize.NL):
self.__freshmodule = 0
return
# class or func/method docstring?
if ttype == tokenize.NAME and tstring in ('class', 'def'):
self.__state = self.__suiteseen
return
if ttype == tokenize.NAME and tstring in opts.keywords:
self.__state = self.__keywordseen
def __suiteseen(self, ttype, tstring, lineno):
# skip over any enclosure pairs until we see the colon
if ttype == tokenize.OP:
if tstring == ':' and self.__enclosurecount == 0:
# we see a colon and we're not in an enclosure: end of def
self.__state = self.__suitedocstring
elif tstring in '([{':
self.__enclosurecount += 1
elif tstring in ')]}':
self.__enclosurecount -= 1
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING and is_literal_string(tstring):
self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
tokenize.COMMENT):
# there was no class docstring
self.__state = self.__waiting
def __keywordseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == '(':
self.__data = []
self.__lineno = lineno
self.__state = self.__openseen
else:
self.__state = self.__waiting
def __openseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == ')':
# We've seen the last of the translatable strings. Record the
# line number of the first line of the strings and update the list
# of messages seen. Reset state for the next batch. If there
# were no strings inside _(), then just ignore this entry.
if self.__data:
self.__addentry(EMPTYSTRING.join(self.__data))
self.__state = self.__waiting
elif ttype == tokenize.STRING and is_literal_string(tstring):
self.__data.append(safe_eval(tstring))
elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
token.NEWLINE, tokenize.NL]:
# warn if we see anything else than STRING or whitespace
print(_(
'*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
) % {
'token': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
def __addentry(self, msg, lineno=None, isdocstring=0):
if lineno is None:
lineno = self.__lineno
if not msg in self.__options.toexclude:
entry = (self.__curfile, lineno)
self.__messages.setdefault(msg, {})[entry] = isdocstring
def set_filename(self, filename):
self.__curfile = filename
self.__freshmodule = 1
def write(self, fp):
options = self.__options
timestamp = time.strftime('%Y-%m-%d %H:%M%z')
encoding = fp.encoding if fp.encoding else 'UTF-8'
print(pot_header % {'time': timestamp, 'version': __version__,
'charset': encoding,
'encoding': '8bit'}, file=fp)
# Sort the entries. First sort each particular entry's keys, then
# sort all the entries by their first item.
reverse = {}
for k, v in self.__messages.items():
keys = sorted(v.keys())
reverse.setdefault(tuple(keys), []).append((k, v))
rkeys = sorted(reverse.keys())
for rkey in rkeys:
rentries = reverse[rkey]
rentries.sort()
for k, v in rentries:
# If the entry was gleaned out of a docstring, then add a
# comment stating so. This is to aid translators who may wish
# to skip translating some unimportant docstrings.
isdocstring = any(v.values())
# k is the message string, v is a dictionary-set of (filename,
# lineno) tuples. We want to sort the entries in v first by
# file name and then by line number.
v = sorted(v.keys())
if not options.writelocations:
pass
# location comments are different b/w Solaris and GNU:
elif options.locationstyle == options.SOLARIS:
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
print(_(
'# File: %(filename)s, line: %(lineno)d') % d, file=fp)
elif options.locationstyle == options.GNU:
# fit as many locations on one line, as long as the
# resulting line length doesn't exceed 'options.width'
locline = '#:'
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
s = _(' %(filename)s:%(lineno)d') % d
if len(locline) + len(s) <= options.width:
locline = locline + s
else:
print(locline, file=fp)
locline = "#:" + s
if len(locline) > 2:
print(locline, file=fp)
if isdocstring:
print('#, docstring', file=fp)
print('msgid', normalize(k, encoding), file=fp)
print('msgstr ""\n', file=fp)
def main():
global default_keywords
try:
opts, args = getopt.getopt(
sys.argv[1:],
'ad:DEhk:Kno:p:S:Vvw:x:X:',
['extract-all', 'default-domain=', 'escape', 'help',
'keyword=', 'no-default-keywords',
'add-location', 'no-location', 'output=', 'output-dir=',
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
'docstrings', 'no-docstrings',
])
except getopt.error as msg:
usage(1, msg)
# for holding option values
class Options:
# constants
GNU = 1
SOLARIS = 2
# defaults
extractall = 0 # FIXME: currently this option has no effect at all.
escape = 0
keywords = []
outpath = ''
outfile = 'messages.pot'
writelocations = 1
locationstyle = GNU
verbose = 0
width = 78
excludefilename = ''
docstrings = 0
nodocstrings = {}
options = Options()
locations = {'gnu' : options.GNU,
'solaris' : options.SOLARIS,
}
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-a', '--extract-all'):
options.extractall = 1
elif opt in ('-d', '--default-domain'):
options.outfile = arg + '.pot'
elif opt in ('-E', '--escape'):
options.escape = 1
elif opt in ('-D', '--docstrings'):
options.docstrings = 1
elif opt in ('-k', '--keyword'):
options.keywords.append(arg)
elif opt in ('-K', '--no-default-keywords'):
default_keywords = []
elif opt in ('-n', '--add-location'):
options.writelocations = 1
elif opt in ('--no-location',):
options.writelocations = 0
elif opt in ('-S', '--style'):
options.locationstyle = locations.get(arg.lower())
if options.locationstyle is None:
usage(1, _('Invalid value for --style: %s') % arg)
elif opt in ('-o', '--output'):
options.outfile = arg
elif opt in ('-p', '--output-dir'):
options.outpath = arg
elif opt in ('-v', '--verbose'):
options.verbose = 1
elif opt in ('-V', '--version'):
print(_('pygettext.py (xgettext for Python) %s') % __version__)
sys.exit(0)
elif opt in ('-w', '--width'):
try:
options.width = int(arg)
except ValueError:
usage(1, _('--width argument must be an integer: %s') % arg)
elif opt in ('-x', '--exclude-file'):
options.excludefilename = arg
elif opt in ('-X', '--no-docstrings'):
fp = open(arg)
try:
while 1:
line = fp.readline()
if not line:
break
options.nodocstrings[line[:-1]] = 1
finally:
fp.close()
# calculate escapes
make_escapes(not options.escape)
# calculate all keywords
options.keywords.extend(default_keywords)
# initialize list of strings to exclude
if options.excludefilename:
try:
fp = open(options.excludefilename)
options.toexclude = fp.readlines()
fp.close()
except IOError:
print(_(
"Can't read --exclude-file: %s") % options.excludefilename, file=sys.stderr)
sys.exit(1)
else:
options.toexclude = []
# resolve args to module lists
expanded = []
for arg in args:
if arg == '-':
expanded.append(arg)
else:
expanded.extend(getFilesForName(arg))
args = expanded
# slurp through all the files
eater = TokenEater(options)
for filename in args:
if filename == '-':
if options.verbose:
print(_('Reading standard input'))
fp = sys.stdin.buffer
closep = 0
else:
if options.verbose:
print(_('Working on %s') % filename)
fp = open(filename, 'rb')
closep = 1
try:
eater.set_filename(filename)
try:
tokens = tokenize.tokenize(fp.readline)
for _token in tokens:
eater(*_token)
except tokenize.TokenError as e:
print('%s: %s, line %d, column %d' % (
e.args[0], filename, e.args[1][0], e.args[1][1]),
file=sys.stderr)
finally:
if closep:
fp.close()
# write the output
if options.outfile == '-':
fp = sys.stdout
closep = 0
else:
if options.outpath:
options.outfile = os.path.join(options.outpath, options.outfile)
fp = open(options.outfile, 'w')
closep = 1
try:
eater.write(fp)
finally:
if closep:
fp.close()
if __name__ == '__main__':
main()
# some more test strings
# this one creates a warning
_('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
_('more' 'than' 'one' 'string')
| 21,549 | 632 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/i18n/makelocalealias.py | #!/usr/bin/env python3
"""
Convert the X11 locale.alias file into a mapping dictionary suitable
for locale.py.
Written by Marc-Andre Lemburg <[email protected]>, 2004-12-10.
"""
import locale
import sys
_locale = locale
# Location of the X11 alias file.
LOCALE_ALIAS = '/usr/share/X11/locale/locale.alias'
# Location of the glibc SUPPORTED locales file.
SUPPORTED = '/usr/share/i18n/SUPPORTED'
def parse(filename):
with open(filename, encoding='latin1') as f:
lines = list(f)
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
locale, alias = line.split()
# Fix non-standard locale names, e.g. [email protected]
if '@' in alias:
alias_lang, _, alias_mod = alias.partition('@')
if '.' in alias_mod:
alias_mod, _, alias_enc = alias_mod.partition('.')
alias = alias_lang + '.' + alias_enc + '@' + alias_mod
# Strip ':'
if locale[-1] == ':':
locale = locale[:-1]
# Lower-case locale
locale = locale.lower()
# Ignore one letter locale mappings (except for 'c')
if len(locale) == 1 and locale != 'c':
continue
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
data[locale] = alias
return data
def parse_glibc_supported(filename):
with open(filename, encoding='latin1') as f:
lines = list(f)
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
line = line.replace('/', ' ').strip()
line = line.rstrip('\\').rstrip()
words = line.split()
if len(words) != 2:
continue
alias, alias_encoding = words
# Lower-case locale
locale = alias.lower()
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
# Add an encoding to alias
alias, _, modifier = alias.partition('@')
alias = _locale._replace_encoding(alias, alias_encoding)
if modifier and not (modifier == 'euro' and alias_encoding == 'ISO-8859-15'):
alias += '@' + modifier
data[locale] = alias
return data
def pprint(data):
items = sorted(data.items())
for k, v in items:
print(' %-40s%a,' % ('%a:' % k, v))
def print_differences(data, olddata):
items = sorted(olddata.items())
for k, v in items:
if k not in data:
print('# removed %a' % k)
elif olddata[k] != data[k]:
print('# updated %a -> %a to %a' % \
(k, olddata[k], data[k]))
# Additions are not mentioned
def optimize(data):
locale_alias = locale.locale_alias
locale.locale_alias = data.copy()
for k, v in data.items():
del locale.locale_alias[k]
if locale.normalize(k) != v:
locale.locale_alias[k] = v
newdata = locale.locale_alias
errors = check(data)
locale.locale_alias = locale_alias
if errors:
sys.exit(1)
return newdata
def check(data):
# Check that all alias definitions from the X11 file
# are actually mapped to the correct alias locales.
errors = 0
for k, v in data.items():
if locale.normalize(k) != v:
print('ERROR: %a -> %a != %a' % (k, locale.normalize(k), v),
file=sys.stderr)
errors += 1
return errors
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--locale-alias', default=LOCALE_ALIAS,
help='location of the X11 alias file '
'(default: %a)' % LOCALE_ALIAS)
parser.add_argument('--glibc-supported', default=SUPPORTED,
help='location of the glibc SUPPORTED locales file '
'(default: %a)' % SUPPORTED)
args = parser.parse_args()
data = locale.locale_alias.copy()
data.update(parse_glibc_supported(args.glibc_supported))
data.update(parse(args.locale_alias))
while True:
# Repeat optimization while the size is decreased.
n = len(data)
data = optimize(data)
if len(data) == n:
break
print_differences(data, locale.locale_alias)
print()
print('locale_alias = {')
pprint(data)
print('}')
| 4,851 | 151 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/i18n/msgfmt.py | #! /usr/bin/env python3
# Written by Martin v. Löwis <[email protected]>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
import os
import sys
import ast
import getopt
import struct
import array
from email.parser import HeaderParser
__version__ = "1.1"
MESSAGES = {}
def usage(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def add(id, str, fuzzy):
"Add a non-fuzzy translation to the dictionary."
global MESSAGES
if not fuzzy and str:
MESSAGES[id] = str
def generate():
"Return the generated output."
global MESSAGES
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tobytes()
output += ids
output += strs
return output
def make(filename, outfile):
ID = 1
STR = 2
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
with open(infile, 'rb') as f:
lines = f.readlines()
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
section = None
fuzzy = 0
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
# Parse the catalog
lno = 0
for l in lines:
l = l.decode(encoding)
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, fuzzy)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgid, msgstr, fuzzy)
if not msgid:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceded by msgid on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = ast.literal_eval(l)
if section == ID:
msgid += l.encode(encoding)
elif section == STR:
msgstr += l.encode(encoding)
else:
print('Syntax error on %s:%d' % (infile, lno), \
'before:', file=sys.stderr)
print(l, file=sys.stderr)
sys.exit(1)
# Add last entry
if section == STR:
add(msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
with open(outfile,"wb") as f:
f.write(output)
except IOError as msg:
print(msg, file=sys.stderr)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVo:',
['help', 'version', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
# do it
if not args:
print('No input file given', file=sys.stderr)
print("Try `msgfmt --help' for more information.", file=sys.stderr)
return
for filename in args:
make(filename, outfile)
if __name__ == '__main__':
main()
| 7,082 | 239 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/tz/zdump.py | import sys
import os
import struct
from array import array
from collections import namedtuple
from datetime import datetime, timedelta
ttinfo = namedtuple('ttinfo', ['tt_gmtoff', 'tt_isdst', 'tt_abbrind'])
class TZInfo:
def __init__(self, transitions, type_indices, ttis, abbrs):
self.transitions = transitions
self.type_indices = type_indices
self.ttis = ttis
self.abbrs = abbrs
@classmethod
def fromfile(cls, fileobj):
if fileobj.read(4).decode() != "TZif":
raise ValueError("not a zoneinfo file")
fileobj.seek(20)
header = fileobj.read(24)
tzh = (tzh_ttisgmtcnt, tzh_ttisstdcnt, tzh_leapcnt,
tzh_timecnt, tzh_typecnt, tzh_charcnt) = struct.unpack(">6l", header)
transitions = array('i')
transitions.fromfile(fileobj, tzh_timecnt)
if sys.byteorder != 'big':
transitions.byteswap()
type_indices = array('B')
type_indices.fromfile(fileobj, tzh_timecnt)
ttis = []
for i in range(tzh_typecnt):
ttis.append(ttinfo._make(struct.unpack(">lbb", fileobj.read(6))))
abbrs = fileobj.read(tzh_charcnt)
self = cls(transitions, type_indices, ttis, abbrs)
self.tzh = tzh
return self
def dump(self, stream, start=None, end=None):
for j, (trans, i) in enumerate(zip(self.transitions, self.type_indices)):
utc = datetime.utcfromtimestamp(trans)
tti = self.ttis[i]
lmt = datetime.utcfromtimestamp(trans + tti.tt_gmtoff)
abbrind = tti.tt_abbrind
abbr = self.abbrs[abbrind:self.abbrs.find(0, abbrind)].decode()
if j > 0:
prev_tti = self.ttis[self.type_indices[j - 1]]
shift = " %+g" % ((tti.tt_gmtoff - prev_tti.tt_gmtoff) / 3600)
else:
shift = ''
print("%s UTC = %s %-5s isdst=%d" % (utc, lmt, abbr, tti[1]) + shift, file=stream)
@classmethod
def zonelist(cls, zonedir='/zip/usr/share/zoneinfo'):
zones = []
for root, _, files in os.walk(zonedir):
for f in files:
p = os.path.join(root, f)
with open(p, 'rb') as o:
magic = o.read(4)
if magic == b'TZif':
zones.append(p[len(zonedir) + 1:])
return zones
if __name__ == '__main__':
if len(sys.argv) < 2:
zones = TZInfo.zonelist()
for z in zones:
print(z)
sys.exit()
filepath = sys.argv[1]
if not filepath.startswith('/'):
filepath = os.path.join('/zip/usr/share/zoneinfo', filepath)
with open(filepath, 'rb') as fileobj:
tzi = TZInfo.fromfile(fileobj)
tzi.dump(sys.stdout)
| 2,789 | 82 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/websafe.txt | # Websafe RGB values
#000000
#000033
#000066
#000099
#0000cc
#0000ff
#003300
#003333
#003366
#003399
#0033cc
#0033ff
#006600
#006633
#006666
#006699
#0066cc
#0066ff
#009900
#009933
#009966
#009999
#0099cc
#0099ff
#00cc00
#00cc33
#00cc66
#00cc99
#00cccc
#00ccff
#00ff00
#00ff33
#00ff66
#00ff99
#00ffcc
#00ffff
#330000
#330033
#330066
#330099
#3300cc
#3300ff
#333300
#333333
#333366
#333399
#3333cc
#3333ff
#336600
#336633
#336666
#336699
#3366cc
#3366ff
#339900
#339933
#339966
#339999
#3399cc
#3399ff
#33cc00
#33cc33
#33cc66
#33cc99
#33cccc
#33ccff
#33ff00
#33ff33
#33ff66
#33ff99
#33ffcc
#33ffff
#660000
#660033
#660066
#660099
#6600cc
#6600ff
#663300
#663333
#663366
#663399
#6633cc
#6633ff
#666600
#666633
#666666
#666699
#6666cc
#6666ff
#669900
#669933
#669966
#669999
#6699cc
#6699ff
#66cc00
#66cc33
#66cc66
#66cc99
#66cccc
#66ccff
#66ff00
#66ff33
#66ff66
#66ff99
#66ffcc
#66ffff
#990000
#990033
#990066
#990099
#9900cc
#9900ff
#993300
#993333
#993366
#993399
#9933cc
#9933ff
#996600
#996633
#996666
#996699
#9966cc
#9966ff
#999900
#999933
#999966
#999999
#9999cc
#9999ff
#99cc00
#99cc33
#99cc66
#99cc99
#99cccc
#99ccff
#99ff00
#99ff33
#99ff66
#99ff99
#99ffcc
#99ffff
#cc0000
#cc0033
#cc0066
#cc0099
#cc00cc
#cc00ff
#cc3300
#cc3333
#cc3366
#cc3399
#cc33cc
#cc33ff
#cc6600
#cc6633
#cc6666
#cc6699
#cc66cc
#cc66ff
#cc9900
#cc9933
#cc9966
#cc9999
#cc99cc
#cc99ff
#cccc00
#cccc33
#cccc66
#cccc99
#cccccc
#ccccff
#ccff00
#ccff33
#ccff66
#ccff99
#ccffcc
#ccffff
#ff0000
#ff0033
#ff0066
#ff0099
#ff00cc
#ff00ff
#ff3300
#ff3333
#ff3366
#ff3399
#ff33cc
#ff33ff
#ff6600
#ff6633
#ff6666
#ff6699
#ff66cc
#ff66ff
#ff9900
#ff9933
#ff9966
#ff9999
#ff99cc
#ff99ff
#ffcc00
#ffcc33
#ffcc66
#ffcc99
#ffcccc
#ffccff
#ffff00
#ffff33
#ffff66
#ffff99
#ffffcc
#ffffff
| 1,749 | 218 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/ChipViewer.py | """Chip viewer and widget.
In the lower left corner of the main Pynche window, you will see two
ChipWidgets, one for the selected color and one for the nearest color. The
selected color is the actual RGB value expressed as an X11 #COLOR name. The
nearest color is the named color from the X11 database that is closest to the
selected color in 3D space. There may be other colors equally close, but the
nearest one is the first one found.
Clicking on the nearest color chip selects that named color.
The ChipViewer class includes the entire lower left quandrant; i.e. both the
selected and nearest ChipWidgets.
"""
from tkinter import *
import ColorDB
class ChipWidget:
_WIDTH = 150
_HEIGHT = 80
def __init__(self,
master = None,
width = _WIDTH,
height = _HEIGHT,
text = 'Color',
initialcolor = 'blue',
presscmd = None,
releasecmd = None):
# create the text label
self.__label = Label(master, text=text)
self.__label.grid(row=0, column=0)
# create the color chip, implemented as a frame
self.__chip = Frame(master, relief=RAISED, borderwidth=2,
width=width,
height=height,
background=initialcolor)
self.__chip.grid(row=1, column=0)
# create the color name
self.__namevar = StringVar()
self.__namevar.set(initialcolor)
self.__name = Entry(master, textvariable=self.__namevar,
relief=FLAT, justify=CENTER, state=DISABLED,
font=self.__label['font'])
self.__name.grid(row=2, column=0)
# create the message area
self.__msgvar = StringVar()
self.__name = Entry(master, textvariable=self.__msgvar,
relief=FLAT, justify=CENTER, state=DISABLED,
font=self.__label['font'])
self.__name.grid(row=3, column=0)
# set bindings
if presscmd:
self.__chip.bind('<ButtonPress-1>', presscmd)
if releasecmd:
self.__chip.bind('<ButtonRelease-1>', releasecmd)
def set_color(self, color):
self.__chip.config(background=color)
def get_color(self):
return self.__chip['background']
def set_name(self, colorname):
self.__namevar.set(colorname)
def set_message(self, message):
self.__msgvar.set(message)
def press(self):
self.__chip.configure(relief=SUNKEN)
def release(self):
self.__chip.configure(relief=RAISED)
class ChipViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
self.__frame.grid(row=3, column=0, ipadx=5, sticky='NSEW')
# create the chip that will display the currently selected color
# exactly
self.__sframe = Frame(self.__frame)
self.__sframe.grid(row=0, column=0)
self.__selected = ChipWidget(self.__sframe, text='Selected')
# create the chip that will display the nearest real X11 color
# database color name
self.__nframe = Frame(self.__frame)
self.__nframe.grid(row=0, column=1)
self.__nearest = ChipWidget(self.__nframe, text='Nearest',
presscmd = self.__buttonpress,
releasecmd = self.__buttonrelease)
def update_yourself(self, red, green, blue):
# Selected always shows the #rrggbb name of the color, nearest always
# shows the name of the nearest color in the database. BAW: should
# an exact match be indicated in some way?
#
# Always use the #rrggbb style to actually set the color, since we may
# not be using X color names (e.g. "web-safe" names)
colordb = self.__sb.colordb()
rgbtuple = (red, green, blue)
rrggbb = ColorDB.triplet_to_rrggbb(rgbtuple)
# find the nearest
nearest = colordb.nearest(red, green, blue)
nearest_tuple = colordb.find_byname(nearest)
nearest_rrggbb = ColorDB.triplet_to_rrggbb(nearest_tuple)
self.__selected.set_color(rrggbb)
self.__nearest.set_color(nearest_rrggbb)
# set the name and messages areas
self.__selected.set_name(rrggbb)
if rrggbb == nearest_rrggbb:
self.__selected.set_message(nearest)
else:
self.__selected.set_message('')
self.__nearest.set_name(nearest_rrggbb)
self.__nearest.set_message(nearest)
def __buttonpress(self, event=None):
self.__nearest.press()
def __buttonrelease(self, event=None):
self.__nearest.release()
rrggbb = self.__nearest.get_color()
red, green, blue = ColorDB.rrggbb_to_triplet(rrggbb)
self.__sb.update_views(red, green, blue)
| 4,998 | 131 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/ListViewer.py | """ListViewer class.
This class implements an input/output view on the color model. It lists every
unique color (e.g. unique r/g/b value) found in the color database. Each
color is shown by small swatch and primary color name. Some colors have
aliases -- more than one name for the same r/g/b value. These aliases are
displayed in the small listbox at the bottom of the screen.
Clicking on a color name or swatch selects that color and updates all other
windows. When a color is selected in a different viewer, the color list is
scrolled to the selected color and it is highlighted. If the selected color
is an r/g/b value without a name, no scrolling occurs.
You can turn off Update On Click if all you want to see is the alias for a
given name, without selecting the color.
"""
from tkinter import *
import ColorDB
ADDTOVIEW = 'Color %List Window...'
class ListViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
optiondb = switchboard.optiondb()
self.__lastbox = None
self.__dontcenter = 0
# GUI
root = self.__root = Toplevel(master, class_='Pynche')
root.protocol('WM_DELETE_WINDOW', self.withdraw)
root.title('Pynche Color List')
root.iconname('Pynche Color List')
root.bind('<Alt-q>', self.__quit)
root.bind('<Alt-Q>', self.__quit)
root.bind('<Alt-w>', self.withdraw)
root.bind('<Alt-W>', self.withdraw)
#
# create the canvas which holds everything, and its scrollbar
#
frame = self.__frame = Frame(root)
frame.pack()
canvas = self.__canvas = Canvas(frame, width=160, height=300,
borderwidth=2, relief=SUNKEN)
self.__scrollbar = Scrollbar(frame)
self.__scrollbar.pack(fill=Y, side=RIGHT)
canvas.pack(fill=BOTH, expand=1)
canvas.configure(yscrollcommand=(self.__scrollbar, 'set'))
self.__scrollbar.configure(command=(canvas, 'yview'))
self.__populate()
#
# Update on click
self.__uoc = BooleanVar()
self.__uoc.set(optiondb.get('UPONCLICK', 1))
self.__uocbtn = Checkbutton(root,
text='Update on Click',
variable=self.__uoc,
command=self.__toggleupdate)
self.__uocbtn.pack(expand=1, fill=BOTH)
#
# alias list
self.__alabel = Label(root, text='Aliases:')
self.__alabel.pack()
self.__aliases = Listbox(root, height=5,
selectmode=BROWSE)
self.__aliases.pack(expand=1, fill=BOTH)
def __populate(self):
#
# create all the buttons
colordb = self.__sb.colordb()
canvas = self.__canvas
row = 0
widest = 0
bboxes = self.__bboxes = []
for name in colordb.unique_names():
exactcolor = ColorDB.triplet_to_rrggbb(colordb.find_byname(name))
canvas.create_rectangle(5, row*20 + 5,
20, row*20 + 20,
fill=exactcolor)
textid = canvas.create_text(25, row*20 + 13,
text=name,
anchor=W)
x1, y1, textend, y2 = canvas.bbox(textid)
boxid = canvas.create_rectangle(3, row*20+3,
textend+3, row*20 + 23,
outline='',
tags=(exactcolor, 'all'))
canvas.bind('<ButtonRelease>', self.__onrelease)
bboxes.append(boxid)
if textend+3 > widest:
widest = textend+3
row += 1
canvheight = (row-1)*20 + 25
canvas.config(scrollregion=(0, 0, 150, canvheight))
for box in bboxes:
x1, y1, x2, y2 = canvas.coords(box)
canvas.coords(box, x1, y1, widest, y2)
def __onrelease(self, event=None):
canvas = self.__canvas
# find the current box
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
ids = canvas.find_overlapping(x, y, x, y)
for boxid in ids:
if boxid in self.__bboxes:
break
else:
## print 'No box found!'
return
tags = self.__canvas.gettags(boxid)
for t in tags:
if t[0] == '#':
break
else:
## print 'No color tag found!'
return
red, green, blue = ColorDB.rrggbb_to_triplet(t)
self.__dontcenter = 1
if self.__uoc.get():
self.__sb.update_views(red, green, blue)
else:
self.update_yourself(red, green, blue)
self.__red, self.__green, self.__blue = red, green, blue
def __toggleupdate(self, event=None):
if self.__uoc.get():
self.__sb.update_views(self.__red, self.__green, self.__blue)
def __quit(self, event=None):
self.__root.quit()
def withdraw(self, event=None):
self.__root.withdraw()
def deiconify(self, event=None):
self.__root.deiconify()
def update_yourself(self, red, green, blue):
canvas = self.__canvas
# turn off the last box
if self.__lastbox:
canvas.itemconfigure(self.__lastbox, outline='')
# turn on the current box
colortag = ColorDB.triplet_to_rrggbb((red, green, blue))
canvas.itemconfigure(colortag, outline='black')
self.__lastbox = colortag
# fill the aliases
self.__aliases.delete(0, END)
try:
aliases = self.__sb.colordb().aliases_of(red, green, blue)[1:]
except ColorDB.BadColor:
self.__aliases.insert(END, '<no matching color>')
return
if not aliases:
self.__aliases.insert(END, '<no aliases>')
else:
for name in aliases:
self.__aliases.insert(END, name)
# maybe scroll the canvas so that the item is visible
if self.__dontcenter:
self.__dontcenter = 0
else:
ig, ig, ig, y1 = canvas.coords(colortag)
ig, ig, ig, y2 = canvas.coords(self.__bboxes[-1])
h = int(canvas['height']) * 0.5
canvas.yview('moveto', (y1-h) / y2)
def save_options(self, optiondb):
optiondb['UPONCLICK'] = self.__uoc.get()
def colordb_changed(self, colordb):
self.__canvas.delete('all')
self.__populate()
| 6,648 | 176 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/DetailsViewer.py | """DetailsViewer class.
This class implements a pure input window which allows you to meticulously
edit the current color. You have both mouse control of the color (via the
buttons along the bottom row), and there are keyboard bindings for each of the
increment/decrement buttons.
The top three check buttons allow you to specify which of the three color
variations are tied together when incrementing and decrementing. Red, green,
and blue are self evident. By tying together red and green, you can modify
the yellow level of the color. By tying together red and blue, you can modify
the magenta level of the color. By tying together green and blue, you can
modify the cyan level, and by tying all three together, you can modify the
grey level.
The behavior at the boundaries (0 and 255) are defined by the `At boundary'
option menu:
Stop
When the increment or decrement would send any of the tied variations
out of bounds, the entire delta is discarded.
Wrap Around
When the increment or decrement would send any of the tied variations
out of bounds, the out of bounds variation is wrapped around to the
other side. Thus if red were at 238 and 25 were added to it, red
would have the value 7.
Preserve Distance
When the increment or decrement would send any of the tied variations
out of bounds, all tied variations are wrapped as one, so as to
preserve the distance between them. Thus if green and blue were tied,
and green was at 238 while blue was at 223, and an increment of 25
were applied, green would be at 15 and blue would be at 0.
Squash
When the increment or decrement would send any of the tied variations
out of bounds, the out of bounds variation is set to the ceiling of
255 or floor of 0, as appropriate. In this way, all tied variations
are squashed to one edge or the other.
The following key bindings can be used as accelerators. Note that Pynche can
fall behind if you hold the key down as a key repeat:
Left arrow == -1
Right arrow == +1
Control + Left == -10
Control + Right == 10
Shift + Left == -25
Shift + Right == +25
"""
from tkinter import *
STOP = 'Stop'
WRAP = 'Wrap Around'
RATIO = 'Preserve Distance'
GRAV = 'Squash'
ADDTOVIEW = 'Details Window...'
class DetailsViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
optiondb = switchboard.optiondb()
self.__red, self.__green, self.__blue = switchboard.current_rgb()
# GUI
root = self.__root = Toplevel(master, class_='Pynche')
root.protocol('WM_DELETE_WINDOW', self.withdraw)
root.title('Pynche Details Window')
root.iconname('Pynche Details Window')
root.bind('<Alt-q>', self.__quit)
root.bind('<Alt-Q>', self.__quit)
root.bind('<Alt-w>', self.withdraw)
root.bind('<Alt-W>', self.withdraw)
# accelerators
root.bind('<KeyPress-Left>', self.__minus1)
root.bind('<KeyPress-Right>', self.__plus1)
root.bind('<Control-KeyPress-Left>', self.__minus10)
root.bind('<Control-KeyPress-Right>', self.__plus10)
root.bind('<Shift-KeyPress-Left>', self.__minus25)
root.bind('<Shift-KeyPress-Right>', self.__plus25)
#
# color ties
frame = self.__frame = Frame(root)
frame.pack(expand=YES, fill=X)
self.__l1 = Label(frame, text='Move Sliders:')
self.__l1.grid(row=1, column=0, sticky=E)
self.__rvar = IntVar()
self.__rvar.set(optiondb.get('RSLIDER', 4))
self.__radio1 = Checkbutton(frame, text='Red',
variable=self.__rvar,
command=self.__effect,
onvalue=4, offvalue=0)
self.__radio1.grid(row=1, column=1, sticky=W)
self.__gvar = IntVar()
self.__gvar.set(optiondb.get('GSLIDER', 2))
self.__radio2 = Checkbutton(frame, text='Green',
variable=self.__gvar,
command=self.__effect,
onvalue=2, offvalue=0)
self.__radio2.grid(row=2, column=1, sticky=W)
self.__bvar = IntVar()
self.__bvar.set(optiondb.get('BSLIDER', 1))
self.__radio3 = Checkbutton(frame, text='Blue',
variable=self.__bvar,
command=self.__effect,
onvalue=1, offvalue=0)
self.__radio3.grid(row=3, column=1, sticky=W)
self.__l2 = Label(frame)
self.__l2.grid(row=4, column=1, sticky=W)
self.__effect()
#
# Boundary behavior
self.__l3 = Label(frame, text='At boundary:')
self.__l3.grid(row=5, column=0, sticky=E)
self.__boundvar = StringVar()
self.__boundvar.set(optiondb.get('ATBOUND', STOP))
self.__omenu = OptionMenu(frame, self.__boundvar,
STOP, WRAP, RATIO, GRAV)
self.__omenu.grid(row=5, column=1, sticky=W)
self.__omenu.configure(width=17)
#
# Buttons
frame = self.__btnframe = Frame(frame)
frame.grid(row=0, column=0, columnspan=2, sticky='EW')
self.__down25 = Button(frame, text='-25',
command=self.__minus25)
self.__down10 = Button(frame, text='-10',
command=self.__minus10)
self.__down1 = Button(frame, text='-1',
command=self.__minus1)
self.__up1 = Button(frame, text='+1',
command=self.__plus1)
self.__up10 = Button(frame, text='+10',
command=self.__plus10)
self.__up25 = Button(frame, text='+25',
command=self.__plus25)
self.__down25.pack(expand=YES, fill=X, side=LEFT)
self.__down10.pack(expand=YES, fill=X, side=LEFT)
self.__down1.pack(expand=YES, fill=X, side=LEFT)
self.__up1.pack(expand=YES, fill=X, side=LEFT)
self.__up10.pack(expand=YES, fill=X, side=LEFT)
self.__up25.pack(expand=YES, fill=X, side=LEFT)
def __effect(self, event=None):
tie = self.__rvar.get() + self.__gvar.get() + self.__bvar.get()
if tie in (0, 1, 2, 4):
text = ''
else:
text = '(= %s Level)' % {3: 'Cyan',
5: 'Magenta',
6: 'Yellow',
7: 'Grey'}[tie]
self.__l2.configure(text=text)
def __quit(self, event=None):
self.__root.quit()
def withdraw(self, event=None):
self.__root.withdraw()
def deiconify(self, event=None):
self.__root.deiconify()
def __minus25(self, event=None):
self.__delta(-25)
def __minus10(self, event=None):
self.__delta(-10)
def __minus1(self, event=None):
self.__delta(-1)
def __plus1(self, event=None):
self.__delta(1)
def __plus10(self, event=None):
self.__delta(10)
def __plus25(self, event=None):
self.__delta(25)
def __delta(self, delta):
tie = []
if self.__rvar.get():
red = self.__red + delta
tie.append(red)
else:
red = self.__red
if self.__gvar.get():
green = self.__green + delta
tie.append(green)
else:
green = self.__green
if self.__bvar.get():
blue = self.__blue + delta
tie.append(blue)
else:
blue = self.__blue
# now apply at boundary behavior
atbound = self.__boundvar.get()
if atbound == STOP:
if red < 0 or green < 0 or blue < 0 or \
red > 255 or green > 255 or blue > 255:
# then
red, green, blue = self.__red, self.__green, self.__blue
elif atbound == WRAP or (atbound == RATIO and len(tie) < 2):
if red < 0:
red += 256
if green < 0:
green += 256
if blue < 0:
blue += 256
if red > 255:
red -= 256
if green > 255:
green -= 256
if blue > 255:
blue -= 256
elif atbound == RATIO:
# for when 2 or 3 colors are tied together
dir = 0
for c in tie:
if c < 0:
dir = -1
elif c > 255:
dir = 1
if dir == -1:
delta = max(tie)
if self.__rvar.get():
red = red + 255 - delta
if self.__gvar.get():
green = green + 255 - delta
if self.__bvar.get():
blue = blue + 255 - delta
elif dir == 1:
delta = min(tie)
if self.__rvar.get():
red = red - delta
if self.__gvar.get():
green = green - delta
if self.__bvar.get():
blue = blue - delta
elif atbound == GRAV:
if red < 0:
red = 0
if green < 0:
green = 0
if blue < 0:
blue = 0
if red > 255:
red = 255
if green > 255:
green = 255
if blue > 255:
blue = 255
self.__sb.update_views(red, green, blue)
self.__root.update_idletasks()
def update_yourself(self, red, green, blue):
self.__red = red
self.__green = green
self.__blue = blue
def save_options(self, optiondb):
optiondb['RSLIDER'] = self.__rvar.get()
optiondb['GSLIDER'] = self.__gvar.get()
optiondb['BSLIDER'] = self.__bvar.get()
optiondb['ATBOUND'] = self.__boundvar.get()
| 10,116 | 274 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/README | Pynche - The PYthonically Natural Color and Hue Editor
Contact: Barry A. Warsaw
Email: [email protected]
Version: 1.3
Introduction
Pynche is a color editor based largely on a similar program that I
originally wrote back in 1987 for the Sunview window system. That
editor was called ICE, the Interactive Color Editor. I'd always
wanted to port this program to X but didn't feel like hacking X
and C code to do it. Fast forward many years, to where Python +
Tkinter provides such a nice programming environment, with enough
power, that I finally buckled down and re-implemented it. I
changed the name because these days, too many other systems have
the acronym `ICE'.
Pynche should work with any variant of Python after 1.5.2
(e.g. 2.0.1 and 2.1.1), using Tk 8.0.x. It's been tested on
Solaris 2.6, Windows NT 4, and various Linux distros. You'll want
to be sure to have at least Tk 8.0.3 for Windows. Also, Pynche is
very colormap intensive, so it doesn't work very well on 8-bit
graphics cards; 24bit+ graphics cards are so cheap these days,
I'll probably never "fix" that.
Pynche must find a text database of colors names in order to
provide `nearest' color matching. Pynche is distributed with an
rgb.txt file from the X11R6.4 distribution for this reason, along
with other "Web related" database (see below). You can use a
different file with the -d option. The file xlicense.txt contains
the license only for rgb.txt and both files are in the X/
subdirectory.
Pynche is pronounced: Pin'-chee
Running Standalone
On Unix, start it by running the `pynche' script. On Windows, run
pynche.pyw to inhibit the console window. When run from the
command line, the following options are recognized:
--database file
-d file
Alternate location of the color database file. Without this
option, the first valid file found will be used (see below).
--initfile file
-i file
Alternate location of the persistent initialization file. See
the section on Persistency below.
--ignore
-X
Ignore the persistent initialization file when starting up.
Pynche will still write the current option settings to the
persistent init file when it quits.
--help
-h
Print the help message.
initialcolor
a Tk color name or #rrggbb color spec to be used as the
initially selected color. This overrides any color saved in
the persistent init file. Since `#' needs to be escaped in
many shells, it is optional in the spec (e.g. #45dd1f is the
same as 45dd1f).
Running as a Modal Dialog
Pynche can be run as a modal dialog, inside another application,
say as a general color chooser. In fact, Grail 0.6 uses Pynche
and a future version of IDLE may as well. Pynche supports the API
implemented by the Tkinter standard tkColorChooser module, with a
few changes as described below. By importing pyColorChooser from
the Pynche package, you can run
pyColorChooser.askcolor()
which will popup Pynche as a modal dialog, and return the selected
color.
There are some UI differences when running as a modal
vs. standalone. When running as a modal, there is no "Quit" menu
item under the "File" menu. Instead there are "Okay" and "Cancel"
buttons.
When "Okay" is hit, askcolor() returns the tuple
((r, g, b), "name")
where r, g, and b are red, green, and blue color values
respectively (in the range 0 to 255). "name" will be a color name
from the color database if there is an exact match, otherwise it
will be an X11 color spec of the form "#rrggbb". Note that this
is different than tkColorChooser, which doesn't know anything
about color names.
askcolor() supports the following optional keyword arguments:
color
the color to set as the initial selected color
master[*]
the master window to use as the parent of the modal
dialog. Without this argument, pyColorChooser will create
its own Tkinter.Tk instance as the master. This may not
be what you want.
databasefile
similar to the --database option, the value must be a
file name
initfile[*]
similar to the --initfile option, the value must be a
file name
ignore[*]
similar to the --ignore flag, the value is a boolean
wantspec
When this is true, the "name" field in the return tuple
will always be a color spec of the form "#rrggbb". It
will not return a color name even if there is a match;
this is so pyColorChooser can exactly match the API of
tkColorChooser.
[*] these arguments must be specified the first time
askcolor() is used and cannot be changed on subsequent calls.
The Colorstrip Window
The top part of the main Pynche window contains the "variation
strips". Each strip contains a number of "color chips". The
strips always indicate the currently selected color by a highlight
rectangle around the selected color chip, with an arrow pointing
to the chip. Each arrow has an associated number giving you the
color value along the variation's axis. Each variation strip
shows you the colors that are reachable from the selected color by
varying just one axis of the color solid.
For example, when the selected color is (in Red/Green/Blue
notation) 127/127/127, the Red Variations strip shows you every
color in the range 0/127/127 to 255/127/127. Similarly for the
green and blue axes. You can select any color by clicking on its
chip. This will update the highlight rectangle and the arrow, as
well as other displays in Pynche.
Click on "Update while dragging" if you want Pynche to update the
selected color while you drag along any variation strip (this will
be a bit slower). Click on "Hexadecimal" to display the arrow
numbers in hex.
There are also two shortcut buttons in this window, which
auto-select Black (0/0/0) and White (255/255/255).
The Proof Window
In the lower left corner of the main window you see two larger
color chips. The Selected chip shows you a larger version of the
color selected in the variation strips, along with its X11 color
specification. The Nearest chip shows you the closest color in
the X11 database to the selected color, giving its X11 color
specification, and below that, its X11 color name. When the
Selected chip color exactly matches the Nearest chip color, you
will see the color name appear below the color specification for
the Selected chip.
Clicking on the Nearest color chip selects that color. Color
distance is calculated in the 3D space of the RGB color solid and
if more than one color name is the same distance from the selected
color, the first one found will be chosen.
Note that there may be more than one X11 color name for the same
RGB value. In that case, the first one found in the text database
is designated the "primary" name, and this is shown under the
Nearest chip. The other names are "aliases" and they are visible
in the Color List Window (see below).
Both the color specifications and color names are selectable for
copying and pasting into another window.
The Type-in Window
At the lower right of the main window are three entry fields.
Here you can type numeric values for any of the three color axes.
Legal values are between 0 and 255, and these fields do not allow
you to enter illegal values. You must hit Enter or Tab to select
the new color.
Click on "Update while typing" if you want Pynche to select the
color on every keystroke (well, every one that produces a legal
value!) Click on "Hexadecimal" to display and enter color values
in hex.
Other Views
There are three secondary windows which are not displayed by
default. You can bring these up via the "View" menu on the main
Pynche window.
The Text Window
The "Text Window" allows you to see what effects various colors
have on the standard Tk text widget elements. In the upper part
of the window is a plain Tk text widget and here you can edit the
text, select a region of text, etc. Below this is a button "Track
color changes". When this is turned on, any colors selected in
the other windows will change the text widget element specified in
the radio buttons below. When this is turned off, text widget
elements are not affected by color selection.
You can choose which element gets changed by color selection by
clicking on one of the radio buttons in the bottom part of this
window. Text foreground and background affect the text in the
upper part of the window. Selection foreground and background
affect the colors of the primary selection which is what you see
when you click the middle button (depending on window system) and
drag it through some text.
The Insertion is the insertion cursor in the text window, where
new text will be inserted as you type. The insertion cursor only
has a background.
The Color List Window
The "Color List" window shows every named color in the color name
database (this window may take a while to come up). In the upper
part of the window you see a scrolling list of all the color names
in the database, in alphabetical order. Click on any color to
select it. In the bottom part of the window is displayed any
aliases for the selected color (those color names that have the
same RGB value, but were found later in the text database). For
example, find the color "Black" and you'll see that its aliases
are "gray0" and "grey0".
If the color has no aliases you'll see "<no aliases>" here. If you
just want to see if a color has an alias, and do not want to select a
color when you click on it, turn off "Update on Click".
Note that the color list is always updated when a color is selected
from the main window. There's no way to turn this feature off. If
the selected color has no matching color name you'll see
"<no matching color>" in the Aliases window.
The Details Window
The "Details" window gives you more control over color selection
than just clicking on a color chip in the main window. The row of
buttons along the top apply the specified increment and decrement
amounts to the selected color. These delta amounts are applied to
the variation strips specified by the check boxes labeled "Move
Sliders". Thus if just Red and Green are selected, hitting -10
will subtract 10 from the color value along the red and green
variation only. Note the message under the checkboxes; this
indicates the primary color level being changed when more than one
slider is tied together. For example, if Red and Green are
selected, you will be changing the Yellow level of the selected
color.
The "At Boundary" behavior determines what happens when any color
variation hits either the lower or upper boundaries (0 or 255) as
a result of clicking on the top row buttons:
Stop
When the increment or decrement would send any of the tied
variations out of bounds, the entire delta is discarded.
Wrap Around
When the increment or decrement would send any of the tied
variations out of bounds, the out of bounds value is wrapped
around to the other side. Thus if red were at 238 and +25
were clicked, red would have the value 7.
Preserve Distance
When the increment or decrement would send any of the tied
variations out of bounds, all tied variations are wrapped as
one, so as to preserve the distance between them. Thus if
green and blue were tied, and green was at 238 while blue was
at 223, and +25 were clicked, green would be at 15 and blue
would be at 0.
Squash
When the increment or decrement would send any of the tied
variations out of bounds, the out of bounds variation is set
to the ceiling of 255 or floor of 0, as appropriate. In this
way, all tied variations are squashed to one edge or the
other.
The top row buttons have the following keyboard accelerators:
-25 == Shift Left Arrow
-10 == Control Left Arrow
-1 == Left Arrow
+1 == Right Arrow
+10 == Control Right Arrow
+25 == Shift Right Arrow
Keyboard Accelerators
Alt-w in any secondary window dismisses the window. In the main
window it exits Pynche (except when running as a modal).
Alt-q in any window exits Pynche (except when running as a modal).
Persistency
Pynche remembers various settings of options and colors between
invocations, storing these values in a `persistent initialization
file'. The actual location of this file is specified by the
--initfile option (see above), and defaults to ~/.pynche.
When Pynche exits, it saves these values in the init file, and
re-reads them when it starts up. There is no locking on this
file, so if you run multiple instances of Pynche at a time, you
may clobber the init file.
The actual options stored include
- the currently selected color
- all settings of checkbox and radio button options in all windows
- the contents of the text window, the current text selection and
insertion point, and all current text widget element color
settings.
- the name of the color database file (but not its contents)
You can inhibit Pynche from reading the init file by supplying the
--ignore option on the command line. However, you cannot suppress
the storing of the settings in the init file on Pynche exit. If
you really want to do this, use /dev/null as the init file, using
--initfile.
Color Name Database Files
Pynche uses a color name database file to calculate the nearest
color to the selected color, and to display in the Color List
view. Several files are distributed with Pynche, described
below. By default, the X11 color name database file is selected.
Other files:
html40colors.txt -- the HTML 4.0 guaranteed color names
websafe.txt -- the 216 "Web-safe" colors that Netscape and MSIE
guarantee will not be dithered. These are specified in #rrggbb
format for both values and names
webcolors.txt -- The 140 color names that Tim Peters and his
sister say NS and MSIE both understand (with some controversy over
AliceBlue).
namedcolors.txt -- an alternative set of Netscape colors.
You can switch between files by choosing "Load palette..." from
the "File" menu. This brings up a standard Tk file dialog.
Choose the file you want and then click "Ok". If Pynche
understands the format in this file, it will load the database and
update the appropriate windows. If not, it will bring up an error
dialog.
To Do
Here's a brief list of things I want to do (some mythical day):
- Better support for resizing the top level windows
- More output views, e.g. color solids
- Have the notion of a `last color selected'; this may require a
new output view
- Support setting the font in the text view
- Support distutils setup.py for installation
I'm open to suggestions!
Local Variables:
indent-tabs-mode: nil
End:
| 15,774 | 399 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/TypeinViewer.py | """TypeinViewer class.
The TypeinViewer is what you see at the lower right of the main Pynche
widget. It contains three text entry fields, one each for red, green, blue.
Input into these windows is highly constrained; it only allows you to enter
values that are legal for a color axis. This usually means 0-255 for decimal
input and 0x0 - 0xff for hex input.
You can toggle whether you want to view and input the values in either decimal
or hex by clicking on Hexadecimal. By clicking on Update while typing, the
color selection will be made on every change to the text field. Otherwise,
you must hit Return or Tab to select the color.
"""
from tkinter import *
class TypeinViewer:
def __init__(self, switchboard, master=None):
# non-gui ivars
self.__sb = switchboard
optiondb = switchboard.optiondb()
self.__hexp = BooleanVar()
self.__hexp.set(optiondb.get('HEXTYPE', 0))
self.__uwtyping = BooleanVar()
self.__uwtyping.set(optiondb.get('UPWHILETYPE', 0))
# create the gui
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
self.__frame.grid(row=3, column=1, sticky='NSEW')
# Red
self.__xl = Label(self.__frame, text='Red:')
self.__xl.grid(row=0, column=0, sticky=E)
subframe = Frame(self.__frame)
subframe.grid(row=0, column=1)
self.__xox = Label(subframe, text='0x')
self.__xox.grid(row=0, column=0, sticky=E)
self.__xox['font'] = 'courier'
self.__x = Entry(subframe, width=3)
self.__x.grid(row=0, column=1)
self.__x.bindtags(self.__x.bindtags() + ('Normalize', 'Update'))
self.__x.bind_class('Normalize', '<Key>', self.__normalize)
self.__x.bind_class('Update' , '<Key>', self.__maybeupdate)
# Green
self.__yl = Label(self.__frame, text='Green:')
self.__yl.grid(row=1, column=0, sticky=E)
subframe = Frame(self.__frame)
subframe.grid(row=1, column=1)
self.__yox = Label(subframe, text='0x')
self.__yox.grid(row=0, column=0, sticky=E)
self.__yox['font'] = 'courier'
self.__y = Entry(subframe, width=3)
self.__y.grid(row=0, column=1)
self.__y.bindtags(self.__y.bindtags() + ('Normalize', 'Update'))
# Blue
self.__zl = Label(self.__frame, text='Blue:')
self.__zl.grid(row=2, column=0, sticky=E)
subframe = Frame(self.__frame)
subframe.grid(row=2, column=1)
self.__zox = Label(subframe, text='0x')
self.__zox.grid(row=0, column=0, sticky=E)
self.__zox['font'] = 'courier'
self.__z = Entry(subframe, width=3)
self.__z.grid(row=0, column=1)
self.__z.bindtags(self.__z.bindtags() + ('Normalize', 'Update'))
# Update while typing?
self.__uwt = Checkbutton(self.__frame,
text='Update while typing',
variable=self.__uwtyping)
self.__uwt.grid(row=3, column=0, columnspan=2, sticky=W)
# Hex/Dec
self.__hex = Checkbutton(self.__frame,
text='Hexadecimal',
variable=self.__hexp,
command=self.__togglehex)
self.__hex.grid(row=4, column=0, columnspan=2, sticky=W)
def __togglehex(self, event=None):
red, green, blue = self.__sb.current_rgb()
if self.__hexp.get():
label = '0x'
else:
label = ' '
self.__xox['text'] = label
self.__yox['text'] = label
self.__zox['text'] = label
self.update_yourself(red, green, blue)
def __normalize(self, event=None):
ew = event.widget
contents = ew.get()
icursor = ew.index(INSERT)
if contents and contents[0] in 'xX' and self.__hexp.get():
contents = '0' + contents
# Figure out the contents in the current base.
try:
if self.__hexp.get():
v = int(contents, 16)
else:
v = int(contents)
except ValueError:
v = None
# If value is not legal, or empty, delete the last character inserted
# and ring the bell. Don't ring the bell if the field is empty (it'll
# just equal zero.
if v is None:
pass
elif v < 0 or v > 255:
i = ew.index(INSERT)
if event.char:
contents = contents[:i-1] + contents[i:]
icursor -= 1
ew.bell()
elif self.__hexp.get():
contents = hex(v)[2:]
else:
contents = int(v)
ew.delete(0, END)
ew.insert(0, contents)
ew.icursor(icursor)
def __maybeupdate(self, event=None):
if self.__uwtyping.get() or event.keysym in ('Return', 'Tab'):
self.__update(event)
def __update(self, event=None):
redstr = self.__x.get() or '0'
greenstr = self.__y.get() or '0'
bluestr = self.__z.get() or '0'
if self.__hexp.get():
base = 16
else:
base = 10
red, green, blue = [int(x, base) for x in (redstr, greenstr, bluestr)]
self.__sb.update_views(red, green, blue)
def update_yourself(self, red, green, blue):
if self.__hexp.get():
sred, sgreen, sblue = [hex(x)[2:] for x in (red, green, blue)]
else:
sred, sgreen, sblue = red, green, blue
x, y, z = self.__x, self.__y, self.__z
xicursor = x.index(INSERT)
yicursor = y.index(INSERT)
zicursor = z.index(INSERT)
x.delete(0, END)
y.delete(0, END)
z.delete(0, END)
x.insert(0, sred)
y.insert(0, sgreen)
z.insert(0, sblue)
x.icursor(xicursor)
y.icursor(yicursor)
z.icursor(zicursor)
def hexp_var(self):
return self.__hexp
def save_options(self, optiondb):
optiondb['HEXTYPE'] = self.__hexp.get()
optiondb['UPWHILETYPE'] = self.__uwtyping.get()
| 6,102 | 162 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/Switchboard.py | """Switchboard class.
This class is used to coordinate updates among all Viewers. Every Viewer must
conform to the following interface:
- it must include a method called update_yourself() which takes three
arguments; the red, green, and blue values of the selected color.
- When a Viewer selects a color and wishes to update all other Views, it
should call update_views() on the Switchboard object. Note that the
Viewer typically does *not* update itself before calling update_views(),
since this would cause it to get updated twice.
Optionally, Viewers can also implement:
- save_options() which takes an optiondb (a dictionary). Store into this
dictionary any values the Viewer wants to save in the persistent
~/.pynche file. This dictionary is saved using marshal. The namespace
for the keys is ad-hoc; make sure you don't clobber some other Viewer's
keys!
- withdraw() which takes no arguments. This is called when Pynche is
unmapped. All Viewers should implement this.
- colordb_changed() which takes a single argument, an instance of
ColorDB. This is called whenever the color name database is changed and
gives a chance for the Viewers to do something on those events. See
ListViewer for details.
External Viewers are found dynamically. Viewer modules should have names such
as FooViewer.py. If such a named module has a module global variable called
ADDTOVIEW and this variable is true, the Viewer will be added dynamically to
the `View' menu. ADDTOVIEW contains a string which is used as the menu item
to display the Viewer (one kludge: if the string contains a `%', this is used
to indicate that the next character will get an underline in the menu,
otherwise the first character is underlined).
FooViewer.py should contain a class called FooViewer, and its constructor
should take two arguments, an instance of Switchboard, and optionally a Tk
master window.
"""
import sys
import marshal
class Switchboard:
def __init__(self, initfile):
self.__initfile = initfile
self.__colordb = None
self.__optiondb = {}
self.__views = []
self.__red = 0
self.__green = 0
self.__blue = 0
self.__canceled = 0
# read the initialization file
fp = None
if initfile:
try:
try:
fp = open(initfile, 'rb')
self.__optiondb = marshal.load(fp)
if not isinstance(self.__optiondb, dict):
print('Problem reading options from file:', initfile,
file=sys.stderr)
self.__optiondb = {}
except (IOError, EOFError, ValueError):
pass
finally:
if fp:
fp.close()
def add_view(self, view):
self.__views.append(view)
def update_views(self, red, green, blue):
self.__red = red
self.__green = green
self.__blue = blue
for v in self.__views:
v.update_yourself(red, green, blue)
def update_views_current(self):
self.update_views(self.__red, self.__green, self.__blue)
def current_rgb(self):
return self.__red, self.__green, self.__blue
def colordb(self):
return self.__colordb
def set_colordb(self, colordb):
self.__colordb = colordb
for v in self.__views:
if hasattr(v, 'colordb_changed'):
v.colordb_changed(colordb)
self.update_views_current()
def optiondb(self):
return self.__optiondb
def save_views(self):
# save the current color
self.__optiondb['RED'] = self.__red
self.__optiondb['GREEN'] = self.__green
self.__optiondb['BLUE'] = self.__blue
for v in self.__views:
if hasattr(v, 'save_options'):
v.save_options(self.__optiondb)
# save the name of the file used for the color database. we'll try to
# load this first.
self.__optiondb['DBFILE'] = self.__colordb.filename()
fp = None
try:
try:
fp = open(self.__initfile, 'wb')
except IOError:
print('Cannot write options to file:', \
self.__initfile, file=sys.stderr)
else:
marshal.dump(self.__optiondb, fp)
finally:
if fp:
fp.close()
def withdraw_views(self):
for v in self.__views:
if hasattr(v, 'withdraw'):
v.withdraw()
def canceled(self, flag=1):
self.__canceled = flag
def canceled_p(self):
return self.__canceled
| 4,797 | 139 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/pynche.pyw | #! /usr/bin/env python
"""Run this file under Windows to inhibit the console window.
Run the file pynche.py under Unix or when debugging under Windows.
"""
import Main
Main.main()
| 181 | 8 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/PyncheWidget.py | """Main Pynche (Pythonically Natural Color and Hue Editor) widget.
This window provides the basic decorations, primarily including the menubar.
It is used to bring up other windows.
"""
import sys
import os
from tkinter import *
from tkinter import messagebox, filedialog
import ColorDB
# Milliseconds between interrupt checks
KEEPALIVE_TIMER = 500
class PyncheWidget:
def __init__(self, version, switchboard, master=None, extrapath=[]):
self.__sb = switchboard
self.__version = version
self.__textwin = None
self.__listwin = None
self.__detailswin = None
self.__helpwin = None
self.__dialogstate = {}
modal = self.__modal = not not master
# If a master was given, we are running as a modal dialog servant to
# some other application. We rearrange our UI in this case (there's
# no File menu and we get `Okay' and `Cancel' buttons), and we do a
# grab_set() to make ourselves modal
if modal:
self.__tkroot = tkroot = Toplevel(master, class_='Pynche')
tkroot.grab_set()
tkroot.withdraw()
else:
# Is there already a default root for Tk, say because we're
# running under Guido's IDE? :-) Two conditions say no, either the
# import fails or _default_root is None.
tkroot = None
try:
from Tkinter import _default_root
tkroot = self.__tkroot = _default_root
except ImportError:
pass
if not tkroot:
tkroot = self.__tkroot = Tk(className='Pynche')
# but this isn't our top level widget, so make it invisible
tkroot.withdraw()
# create the menubar
menubar = self.__menubar = Menu(tkroot)
#
# File menu
#
filemenu = self.__filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Load palette...',
command=self.__load,
underline=0)
if not modal:
filemenu.add_command(label='Quit',
command=self.__quit,
accelerator='Alt-Q',
underline=0)
#
# View menu
#
views = make_view_popups(self.__sb, self.__tkroot, extrapath)
viewmenu = Menu(menubar, tearoff=0)
for v in views:
viewmenu.add_command(label=v.menutext(),
command=v.popup,
underline=v.underline())
#
# Help menu
#
helpmenu = Menu(menubar, name='help', tearoff=0)
helpmenu.add_command(label='About Pynche...',
command=self.__popup_about,
underline=0)
helpmenu.add_command(label='Help...',
command=self.__popup_usage,
underline=0)
#
# Tie them all together
#
menubar.add_cascade(label='File',
menu=filemenu,
underline=0)
menubar.add_cascade(label='View',
menu=viewmenu,
underline=0)
menubar.add_cascade(label='Help',
menu=helpmenu,
underline=0)
# now create the top level window
root = self.__root = Toplevel(tkroot, class_='Pynche', menu=menubar)
root.protocol('WM_DELETE_WINDOW',
modal and self.__bell or self.__quit)
root.title('Pynche %s' % version)
root.iconname('Pynche')
# Only bind accelerators for the File->Quit menu item if running as a
# standalone app
if not modal:
root.bind('<Alt-q>', self.__quit)
root.bind('<Alt-Q>', self.__quit)
else:
# We're a modal dialog so we have a new row of buttons
bframe = Frame(root, borderwidth=1, relief=RAISED)
bframe.grid(row=4, column=0, columnspan=2,
sticky='EW',
ipady=5)
okay = Button(bframe,
text='Okay',
command=self.__okay)
okay.pack(side=LEFT, expand=1)
cancel = Button(bframe,
text='Cancel',
command=self.__cancel)
cancel.pack(side=LEFT, expand=1)
def __quit(self, event=None):
self.__tkroot.quit()
def __bell(self, event=None):
self.__tkroot.bell()
def __okay(self, event=None):
self.__sb.withdraw_views()
self.__tkroot.grab_release()
self.__quit()
def __cancel(self, event=None):
self.__sb.canceled()
self.__okay()
def __keepalive(self):
# Exercise the Python interpreter regularly so keyboard interrupts get
# through.
self.__tkroot.tk.createtimerhandler(KEEPALIVE_TIMER, self.__keepalive)
def start(self):
if not self.__modal:
self.__keepalive()
self.__tkroot.mainloop()
def window(self):
return self.__root
def __popup_about(self, event=None):
from Main import __version__
messagebox.showinfo('About Pynche ' + __version__,
'''\
Pynche %s
The PYthonically Natural
Color and Hue Editor
For information
contact: Barry A. Warsaw
email: [email protected]''' % __version__)
def __popup_usage(self, event=None):
if not self.__helpwin:
self.__helpwin = Helpwin(self.__root, self.__quit)
self.__helpwin.deiconify()
def __load(self, event=None):
while 1:
idir, ifile = os.path.split(self.__sb.colordb().filename())
file = filedialog.askopenfilename(
filetypes=[('Text files', '*.txt'),
('All files', '*'),
],
initialdir=idir,
initialfile=ifile)
if not file:
# cancel button
return
try:
colordb = ColorDB.get_colordb(file)
except IOError:
messagebox.showerror('Read error', '''\
Could not open file for reading:
%s''' % file)
continue
if colordb is None:
messagebox.showerror('Unrecognized color file type', '''\
Unrecognized color file type in file:
%s''' % file)
continue
break
self.__sb.set_colordb(colordb)
def withdraw(self):
self.__root.withdraw()
def deiconify(self):
self.__root.deiconify()
class Helpwin:
def __init__(self, master, quitfunc):
from Main import docstring
self.__root = root = Toplevel(master, class_='Pynche')
root.protocol('WM_DELETE_WINDOW', self.__withdraw)
root.title('Pynche Help Window')
root.iconname('Pynche Help Window')
root.bind('<Alt-q>', quitfunc)
root.bind('<Alt-Q>', quitfunc)
root.bind('<Alt-w>', self.__withdraw)
root.bind('<Alt-W>', self.__withdraw)
# more elaborate help is available in the README file
readmefile = os.path.join(sys.path[0], 'README')
try:
fp = None
try:
fp = open(readmefile)
contents = fp.read()
# wax the last page, it contains Emacs cruft
i = contents.rfind('\f')
if i > 0:
contents = contents[:i].rstrip()
finally:
if fp:
fp.close()
except IOError:
sys.stderr.write("Couldn't open Pynche's README, "
'using docstring instead.\n')
contents = docstring()
self.__text = text = Text(root, relief=SUNKEN,
width=80, height=24)
self.__text.focus_set()
text.insert(0.0, contents)
scrollbar = Scrollbar(root)
scrollbar.pack(fill=Y, side=RIGHT)
text.pack(fill=BOTH, expand=YES)
text.configure(yscrollcommand=(scrollbar, 'set'))
scrollbar.configure(command=(text, 'yview'))
def __withdraw(self, event=None):
self.__root.withdraw()
def deiconify(self):
self.__root.deiconify()
import functools
@functools.total_ordering
class PopupViewer:
def __init__(self, module, name, switchboard, root):
self.__m = module
self.__name = name
self.__sb = switchboard
self.__root = root
self.__menutext = module.ADDTOVIEW
# find the underline character
underline = module.ADDTOVIEW.find('%')
if underline == -1:
underline = 0
else:
self.__menutext = module.ADDTOVIEW.replace('%', '', 1)
self.__underline = underline
self.__window = None
def menutext(self):
return self.__menutext
def underline(self):
return self.__underline
def popup(self, event=None):
if not self.__window:
# class and module must have the same name
class_ = getattr(self.__m, self.__name)
self.__window = class_(self.__sb, self.__root)
self.__sb.add_view(self.__window)
self.__window.deiconify()
def __eq__(self, other):
return self.__menutext == other.__menutext
def __lt__(self, other):
return self.__menutext < other.__menutext
def make_view_popups(switchboard, root, extrapath):
viewers = []
# where we are in the file system
dirs = [os.path.dirname(__file__)] + extrapath
for dir in dirs:
if dir == '':
dir = '.'
for file in os.listdir(dir):
if file[-9:] == 'Viewer.py':
name = file[:-3]
try:
module = __import__(name)
except ImportError:
# Pynche is running from inside a package, so get the
# module using the explicit path.
pkg = __import__('pynche.'+name)
module = getattr(pkg, name)
if hasattr(module, 'ADDTOVIEW') and module.ADDTOVIEW:
# this is an external viewer
v = PopupViewer(module, name, switchboard, root)
viewers.append(v)
# sort alphabetically
viewers.sort()
return viewers
| 10,615 | 314 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/namedcolors.txt | # named colors from http://www.lightlink.com/xine/bells/namedcolors.html
White #FFFFFF
Red #FF0000
Green #00FF00
Blue #0000FF
Magenta #FF00FF
Cyan #00FFFF
Yellow #FFFF00
Black #000000
Aquamarine #70DB93
Baker's Chocolate #5C3317
Blue Violet #9F5F9F
Brass #B5A642
Bright Gold #D9D919
Brown #A62A2A
Bronze #8C7853
Bronze II #A67D3D
Cadet Blue #5F9F9F
Cool Copper #D98719
Copper #B87333
Coral #FF7F00
Corn Flower Blue #42426F
Dark Brown #5C4033
Dark Green #2F4F2F
Dark Green Copper #4A766E
Dark Olive Green #4F4F2F
Dark Orchid #9932CD
Dark Purple #871F78
Dark Slate Blue #6B238E
Dark Slate Grey #2F4F4F
Dark Tan #97694F
Dark Turquoise #7093DB
Dark Wood #855E42
Dim Grey #545454
Dusty Rose #856363
Feldspar #D19275
Firebrick #8E2323
Forest Green #238E23
Gold #CD7F32
Goldenrod #DBDB70
Grey #C0C0C0
Green Copper #527F76
Green Yellow #93DB70
Hunter Green #215E21
Indian Red #4E2F2F
Khaki #9F9F5F
Light Blue #C0D9D9
Light Grey #A8A8A8
Light Steel Blue #8F8FBD
Light Wood #E9C2A6
Lime Green #32CD32
Mandarian Orange #E47833
Maroon #8E236B
Medium Aquamarine #32CD99
Medium Blue #3232CD
Medium Forest Green #6B8E23
Medium Goldenrod #EAEAAE
Medium Orchid #9370DB
Medium Sea Green #426F42
Medium Slate Blue #7F00FF
Medium Spring Green #7FFF00
Medium Turquoise #70DBDB
Medium Violet Red #DB7093
Medium Wood #A68064
Midnight Blue #2F2F4F
Navy Blue #23238E
Neon Blue #4D4DFF
Neon Pink #FF6EC7
New Midnight Blue #00009C
New Tan #EBC79E
Old Gold #CFB53B
Orange #FF7F00
Orange Red #FF2400
Orchid #DB70DB
Pale Green #8FBC8F
Pink #BC8F8F
Plum #EAADEA
Quartz #D9D9F3
Rich Blue #5959AB
Salmon #6F4242
Scarlet #8C1717
Sea Green #238E68
Semi-Sweet Chocolate #6B4226
Sienna #8E6B23
Silver #E6E8FA
Sky Blue #3299CC
Slate Blue #007FFF
Spicy Pink #FF1CAE
Spring Green #00FF7F
Steel Blue #236B8E
Summer Sky #38B0DE
Tan #DB9370
Thistle #D8BFD8
Turquoise #ADEAEA
Very Dark Brown #5C4033
Very Light Grey #CDCDCD
Violet #4F2F4F
Violet Red #CC3299
Wheat #D8D8BF
Yellow Green #99CC32
| 5,716 | 101 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/ColorDB.py | """Color Database.
This file contains one class, called ColorDB, and several utility functions.
The class must be instantiated by the get_colordb() function in this file,
passing it a filename to read a database out of.
The get_colordb() function will try to examine the file to figure out what the
format of the file is. If it can't figure out the file format, or it has
trouble reading the file, None is returned. You can pass get_colordb() an
optional filetype argument.
Supporte file types are:
X_RGB_TXT -- X Consortium rgb.txt format files. Three columns of numbers
from 0 .. 255 separated by whitespace. Arbitrary trailing
columns used as the color name.
The utility functions are useful for converting between the various expected
color formats, and for calculating other color values.
"""
import sys
import re
from types import *
class BadColor(Exception):
pass
DEFAULT_DB = None
SPACE = ' '
COMMASPACE = ', '
# generic class
class ColorDB:
def __init__(self, fp):
lineno = 2
self.__name = fp.name
# Maintain several dictionaries for indexing into the color database.
# Note that while Tk supports RGB intensities of 4, 8, 12, or 16 bits,
# for now we only support 8 bit intensities. At least on OpenWindows,
# all intensities in the /usr/openwin/lib/rgb.txt file are 8-bit
#
# key is (red, green, blue) tuple, value is (name, [aliases])
self.__byrgb = {}
# key is name, value is (red, green, blue)
self.__byname = {}
# all unique names (non-aliases). built-on demand
self.__allnames = None
for line in fp:
# get this compiled regular expression from derived class
mo = self._re.match(line)
if not mo:
print('Error in', fp.name, ' line', lineno, file=sys.stderr)
lineno += 1
continue
# extract the red, green, blue, and name
red, green, blue = self._extractrgb(mo)
name = self._extractname(mo)
keyname = name.lower()
# BAW: for now the `name' is just the first named color with the
# rgb values we find. Later, we might want to make the two word
# version the `name', or the CapitalizedVersion, etc.
key = (red, green, blue)
foundname, aliases = self.__byrgb.get(key, (name, []))
if foundname != name and foundname not in aliases:
aliases.append(name)
self.__byrgb[key] = (foundname, aliases)
# add to byname lookup
self.__byname[keyname] = key
lineno = lineno + 1
# override in derived classes
def _extractrgb(self, mo):
return [int(x) for x in mo.group('red', 'green', 'blue')]
def _extractname(self, mo):
return mo.group('name')
def filename(self):
return self.__name
def find_byrgb(self, rgbtuple):
"""Return name for rgbtuple"""
try:
return self.__byrgb[rgbtuple]
except KeyError:
raise BadColor(rgbtuple)
def find_byname(self, name):
"""Return (red, green, blue) for name"""
name = name.lower()
try:
return self.__byname[name]
except KeyError:
raise BadColor(name)
def nearest(self, red, green, blue):
"""Return the name of color nearest (red, green, blue)"""
# BAW: should we use Voronoi diagrams, Delaunay triangulation, or
# octree for speeding up the locating of nearest point? Exhaustive
# search is inefficient, but seems fast enough.
nearest = -1
nearest_name = ''
for name, aliases in self.__byrgb.values():
r, g, b = self.__byname[name.lower()]
rdelta = red - r
gdelta = green - g
bdelta = blue - b
distance = rdelta * rdelta + gdelta * gdelta + bdelta * bdelta
if nearest == -1 or distance < nearest:
nearest = distance
nearest_name = name
return nearest_name
def unique_names(self):
# sorted
if not self.__allnames:
self.__allnames = []
for name, aliases in self.__byrgb.values():
self.__allnames.append(name)
self.__allnames.sort(key=str.lower)
return self.__allnames
def aliases_of(self, red, green, blue):
try:
name, aliases = self.__byrgb[(red, green, blue)]
except KeyError:
raise BadColor((red, green, blue))
return [name] + aliases
class RGBColorDB(ColorDB):
_re = re.compile(
r'\s*(?P<red>\d+)\s+(?P<green>\d+)\s+(?P<blue>\d+)\s+(?P<name>.*)')
class HTML40DB(ColorDB):
_re = re.compile(r'(?P<name>\S+)\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
def _extractrgb(self, mo):
return rrggbb_to_triplet(mo.group('hexrgb'))
class LightlinkDB(HTML40DB):
_re = re.compile(r'(?P<name>(.+))\s+(?P<hexrgb>#[0-9a-fA-F]{6})')
def _extractname(self, mo):
return mo.group('name').strip()
class WebsafeDB(ColorDB):
_re = re.compile('(?P<hexrgb>#[0-9a-fA-F]{6})')
def _extractrgb(self, mo):
return rrggbb_to_triplet(mo.group('hexrgb'))
def _extractname(self, mo):
return mo.group('hexrgb').upper()
# format is a tuple (RE, SCANLINES, CLASS) where RE is a compiled regular
# expression, SCANLINES is the number of header lines to scan, and CLASS is
# the class to instantiate if a match is found
FILETYPES = [
(re.compile('Xorg'), RGBColorDB),
(re.compile('XConsortium'), RGBColorDB),
(re.compile('HTML'), HTML40DB),
(re.compile('lightlink'), LightlinkDB),
(re.compile('Websafe'), WebsafeDB),
]
def get_colordb(file, filetype=None):
colordb = None
fp = open(file)
try:
line = fp.readline()
if not line:
return None
# try to determine the type of RGB file it is
if filetype is None:
filetypes = FILETYPES
else:
filetypes = [filetype]
for typere, class_ in filetypes:
mo = typere.search(line)
if mo:
break
else:
# no matching type
return None
# we know the type and the class to grok the type, so suck it in
colordb = class_(fp)
finally:
fp.close()
# save a global copy
global DEFAULT_DB
DEFAULT_DB = colordb
return colordb
_namedict = {}
def rrggbb_to_triplet(color):
"""Converts a #rrggbb color to the tuple (red, green, blue)."""
rgbtuple = _namedict.get(color)
if rgbtuple is None:
if color[0] != '#':
raise BadColor(color)
red = color[1:3]
green = color[3:5]
blue = color[5:7]
rgbtuple = int(red, 16), int(green, 16), int(blue, 16)
_namedict[color] = rgbtuple
return rgbtuple
_tripdict = {}
def triplet_to_rrggbb(rgbtuple):
"""Converts a (red, green, blue) tuple to #rrggbb."""
global _tripdict
hexname = _tripdict.get(rgbtuple)
if hexname is None:
hexname = '#%02x%02x%02x' % rgbtuple
_tripdict[rgbtuple] = hexname
return hexname
def triplet_to_fractional_rgb(rgbtuple):
return [x / 256 for x in rgbtuple]
def triplet_to_brightness(rgbtuple):
# return the brightness (grey level) along the scale 0.0==black to
# 1.0==white
r = 0.299
g = 0.587
b = 0.114
return r*rgbtuple[0] + g*rgbtuple[1] + b*rgbtuple[2]
if __name__ == '__main__':
colordb = get_colordb('/usr/openwin/lib/rgb.txt')
if not colordb:
print('No parseable color database found')
sys.exit(1)
# on my system, this color matches exactly
target = 'navy'
red, green, blue = rgbtuple = colordb.find_byname(target)
print(target, ':', red, green, blue, triplet_to_rrggbb(rgbtuple))
name, aliases = colordb.find_byrgb(rgbtuple)
print('name:', name, 'aliases:', COMMASPACE.join(aliases))
r, g, b = (1, 1, 128) # nearest to navy
r, g, b = (145, 238, 144) # nearest to lightgreen
r, g, b = (255, 251, 250) # snow
print('finding nearest to', target, '...')
import time
t0 = time.time()
nearest = colordb.nearest(r, g, b)
t1 = time.time()
print('found nearest color', nearest, 'in', t1-t0, 'seconds')
# dump the database
for n in colordb.unique_names():
r, g, b = colordb.find_byname(n)
aliases = colordb.aliases_of(r, g, b)
print('%20s: (%3d/%3d/%3d) == %s' % (n, r, g, b,
SPACE.join(aliases[1:])))
| 8,773 | 272 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/Main.py | """Pynche -- The PYthon Natural Color and Hue Editor.
Contact: %(AUTHNAME)s
Email: %(AUTHEMAIL)s
Version: %(__version__)s
Pynche is based largely on a similar color editor I wrote years ago for the
SunView window system. That editor was called ICE: the Interactive Color
Editor. I'd always wanted to port the editor to X but didn't feel like
hacking X and C code to do it. Fast forward many years, to where Python +
Tkinter provides such a nice programming environment, with enough power, that
I finally buckled down and implemented it. I changed the name because these
days, too many other systems have the acronym `ICE'.
This program currently requires Python 2.2 with Tkinter.
Usage: %(PROGRAM)s [-d file] [-i file] [-X] [-v] [-h] [initialcolor]
Where:
--database file
-d file
Alternate location of a color database file
--initfile file
-i file
Alternate location of the initialization file. This file contains a
persistent database of the current Pynche options and color. This
means that Pynche restores its option settings and current color when
it restarts, using this file (unless the -X option is used). The
default is ~/.pynche
--ignore
-X
Ignore the initialization file when starting up. Pynche will still
write the current option settings to this file when it quits.
--version
-v
print the version number and exit
--help
-h
print this message
initialcolor
initial color, as a color name or #RRGGBB format
"""
__version__ = '1.4.1'
import sys
import os
import getopt
import ColorDB
from PyncheWidget import PyncheWidget
from Switchboard import Switchboard
from StripViewer import StripViewer
from ChipViewer import ChipViewer
from TypeinViewer import TypeinViewer
PROGRAM = sys.argv[0]
AUTHNAME = 'Barry Warsaw'
AUTHEMAIL = '[email protected]'
# Default locations of rgb.txt or other textual color database
RGB_TXT = [
# Solaris OpenWindows
'/usr/openwin/lib/rgb.txt',
# Linux
'/usr/lib/X11/rgb.txt',
# The X11R6.4 rgb.txt file
os.path.join(sys.path[0], 'X/rgb.txt'),
# add more here
]
# Do this because PyncheWidget.py wants to get at the interpolated docstring
# too, for its Help menu.
def docstring():
return __doc__ % globals()
def usage(code, msg=''):
print(docstring())
if msg:
print(msg)
sys.exit(code)
def initial_color(s, colordb):
# function called on every color
def scan_color(s, colordb=colordb):
try:
r, g, b = colordb.find_byname(s)
except ColorDB.BadColor:
try:
r, g, b = ColorDB.rrggbb_to_triplet(s)
except ColorDB.BadColor:
return None, None, None
return r, g, b
#
# First try the passed in color
r, g, b = scan_color(s)
if r is None:
# try the same color with '#' prepended, since some shells require
# this to be escaped, which is a pain
r, g, b = scan_color('#' + s)
if r is None:
print('Bad initial color, using gray50:', s)
r, g, b = scan_color('gray50')
if r is None:
usage(1, 'Cannot find an initial color to use')
# does not return
return r, g, b
def build(master=None, initialcolor=None, initfile=None, ignore=None,
dbfile=None):
# create all output widgets
s = Switchboard(not ignore and initfile)
# defer to the command line chosen color database, falling back to the one
# in the .pynche file.
if dbfile is None:
dbfile = s.optiondb().get('DBFILE')
# find a parseable color database
colordb = None
files = RGB_TXT[:]
if dbfile is None:
dbfile = files.pop()
while colordb is None:
try:
colordb = ColorDB.get_colordb(dbfile)
except (KeyError, IOError):
pass
if colordb is None:
if not files:
break
dbfile = files.pop(0)
if not colordb:
usage(1, 'No color database file found, see the -d option.')
s.set_colordb(colordb)
# create the application window decorations
app = PyncheWidget(__version__, s, master=master)
w = app.window()
# these built-in viewers live inside the main Pynche window
s.add_view(StripViewer(s, w))
s.add_view(ChipViewer(s, w))
s.add_view(TypeinViewer(s, w))
# get the initial color as components and set the color on all views. if
# there was no initial color given on the command line, use the one that's
# stored in the option database
if initialcolor is None:
optiondb = s.optiondb()
red = optiondb.get('RED')
green = optiondb.get('GREEN')
blue = optiondb.get('BLUE')
# but if there wasn't any stored in the database, use grey50
if red is None or blue is None or green is None:
red, green, blue = initial_color('grey50', colordb)
else:
red, green, blue = initial_color(initialcolor, colordb)
s.update_views(red, green, blue)
return app, s
def run(app, s):
try:
app.start()
except KeyboardInterrupt:
pass
def main():
try:
opts, args = getopt.getopt(
sys.argv[1:],
'hd:i:Xv',
['database=', 'initfile=', 'ignore', 'help', 'version'])
except getopt.error as msg:
usage(1, msg)
if len(args) == 0:
initialcolor = None
elif len(args) == 1:
initialcolor = args[0]
else:
usage(1)
ignore = False
dbfile = None
initfile = os.path.expanduser('~/.pynche')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-v', '--version'):
print("""\
Pynche -- The PYthon Natural Color and Hue Editor.
Contact: %(AUTHNAME)s
Email: %(AUTHEMAIL)s
Version: %(__version__)s""" % globals())
sys.exit(0)
elif opt in ('-d', '--database'):
dbfile = arg
elif opt in ('-X', '--ignore'):
ignore = True
elif opt in ('-i', '--initfile'):
initfile = arg
app, sb = build(initialcolor=initialcolor,
initfile=initfile,
ignore=ignore,
dbfile=dbfile)
run(app, sb)
sb.save_views()
if __name__ == '__main__':
main()
| 6,406 | 230 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/webcolors.txt | # De-facto NS & MSIE recognized HTML color names
AliceBlue #f0f8ff
AntiqueWhite #faebd7
Aqua #00ffff
Aquamarine #7fffd4
Azure #f0ffff
Beige #f5f5dc
Bisque #ffe4c4
Black #000000
BlanchedAlmond #ffebcd
Blue #0000ff
BlueViolet #8a2be2
Brown #a52a2a
BurlyWood #deb887
CadetBlue #5f9ea0
Chartreuse #7fff00
Chocolate #d2691e
Coral #ff7f50
CornflowerBlue #6495ed
Cornsilk #fff8dc
Crimson #dc143c
Cyan #00ffff
DarkBlue #00008b
DarkCyan #008b8b
DarkGoldenrod #b8860b
DarkGray #a9a9a9
DarkGreen #006400
DarkKhaki #bdb76b
DarkMagenta #8b008b
DarkOliveGreen #556b2f
DarkOrange #ff8c00
DarkOrchid #9932cc
DarkRed #8b0000
DarkSalmon #e9967a
DarkSeaGreen #8fbc8f
DarkSlateBlue #483d8b
DarkSlateGray #2f4f4f
DarkTurquoise #00ced1
DarkViolet #9400d3
DeepPink #ff1493
DeepSkyBlue #00bfff
DimGray #696969
DodgerBlue #1e90ff
FireBrick #b22222
FloralWhite #fffaf0
ForestGreen #228b22
Fuchsia #ff00ff
Gainsboro #dcdcdc
GhostWhite #f8f8ff
Gold #ffd700
Goldenrod #daa520
Gray #808080
Green #008000
GreenYellow #adff2f
Honeydew #f0fff0
HotPink #ff69b4
IndianRed #cd5c5c
Indigo #4b0082
Ivory #fffff0
Khaki #f0e68c
Lavender #e6e6fa
LavenderBlush #fff0f5
LawnGreen #7cfc00
LemonChiffon #fffacd
LightBlue #add8e6
LightCoral #f08080
LightCyan #e0ffff
LightGoldenrodYellow #fafad2
LightGreen #90ee90
LightGrey #d3d3d3
LightPink #ffb6c1
LightSalmon #ffa07a
LightSeaGreen #20b2aa
LightSkyBlue #87cefa
LightSlateGray #778899
LightSteelBlue #b0c4de
LightYellow #ffffe0
Lime #00ff00
LimeGreen #32cd32
Linen #faf0e6
Magenta #ff00ff
Maroon #800000
MediumAquamarine #66cdaa
MediumBlue #0000cd
MediumOrchid #ba55d3
MediumPurple #9370db
MediumSeaGreen #3cb371
MediumSlateBlue #7b68ee
MediumSpringGreen #00fa9a
MediumTurquoise #48d1cc
MediumVioletRed #c71585
MidnightBlue #191970
MintCream #f5fffa
MistyRose #ffe4e1
Moccasin #ffe4b5
NavajoWhite #ffdead
Navy #000080
OldLace #fdf5e6
Olive #808000
OliveDrab #6b8e23
Orange #ffa500
OrangeRed #ff4500
Orchid #da70d6
PaleGoldenrod #eee8aa
PaleGreen #98fb98
PaleTurquoise #afeeee
PaleVioletRed #db7093
PapayaWhip #ffefd5
PeachPuff #ffdab9
Peru #cd853f
Pink #ffc0cb
Plum #dda0dd
PowderBlue #b0e0e6
Purple #800080
Red #ff0000
RosyBrown #bc8f8f
RoyalBlue #4169e1
SaddleBrown #8b4513
Salmon #fa8072
SandyBrown #f4a460
SeaGreen #2e8b57
Seashell #fff5ee
Sienna #a0522d
Silver #c0c0c0
SkyBlue #87ceeb
SlateBlue #6a5acd
SlateGray #708090
Snow #fffafa
SpringGreen #00ff7f
SteelBlue #4682b4
Tan #d2b48c
Teal #008080
Thistle #d8bfd8
Tomato #ff6347
Turquoise #40e0d0
Violet #ee82ee
Wheat #f5deb3
White #ffffff
WhiteSmoke #f5f5f5
Yellow #ffff00
YellowGreen #9acd32
| 3,088 | 142 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/TextViewer.py | """TextViewer class.
The TextViewer allows you to see how the selected color would affect various
characteristics of a Tk text widget. This is an output viewer only.
In the top part of the window is a standard text widget with some sample text
in it. You are free to edit this text in any way you want (BAW: allow you to
change font characteristics). If you want changes in other viewers to update
text characteristics, turn on Track color changes.
To select which characteristic tracks the change, select one of the radio
buttons in the window below. Text foreground and background affect the text
in the window above. The Selection is what you see when you click the middle
button and drag it through some text. The Insertion is the insertion cursor
in the text window (which only has a background).
"""
from tkinter import *
import ColorDB
ADDTOVIEW = 'Text Window...'
class TextViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
optiondb = switchboard.optiondb()
root = self.__root = Toplevel(master, class_='Pynche')
root.protocol('WM_DELETE_WINDOW', self.withdraw)
root.title('Pynche Text Window')
root.iconname('Pynche Text Window')
root.bind('<Alt-q>', self.__quit)
root.bind('<Alt-Q>', self.__quit)
root.bind('<Alt-w>', self.withdraw)
root.bind('<Alt-W>', self.withdraw)
#
# create the text widget
#
self.__text = Text(root, relief=SUNKEN,
background=optiondb.get('TEXTBG', 'black'),
foreground=optiondb.get('TEXTFG', 'white'),
width=35, height=15)
sfg = optiondb.get('TEXT_SFG')
if sfg:
self.__text.configure(selectforeground=sfg)
sbg = optiondb.get('TEXT_SBG')
if sbg:
self.__text.configure(selectbackground=sbg)
ibg = optiondb.get('TEXT_IBG')
if ibg:
self.__text.configure(insertbackground=ibg)
self.__text.pack()
self.__text.insert(0.0, optiondb.get('TEXT', '''\
Insert some stuff here and play
with the buttons below to see
how the colors interact in
textual displays.
See how the selection can also
be affected by tickling the buttons
and choosing a color.'''))
insert = optiondb.get('TEXTINS')
if insert:
self.__text.mark_set(INSERT, insert)
try:
start, end = optiondb.get('TEXTSEL', (6.0, END))
self.__text.tag_add(SEL, start, end)
except ValueError:
# selection wasn't set
pass
self.__text.focus_set()
#
# variables
self.__trackp = BooleanVar()
self.__trackp.set(optiondb.get('TRACKP', 0))
self.__which = IntVar()
self.__which.set(optiondb.get('WHICH', 0))
#
# track toggle
self.__t = Checkbutton(root, text='Track color changes',
variable=self.__trackp,
relief=GROOVE,
command=self.__toggletrack)
self.__t.pack(fill=X, expand=YES)
frame = self.__frame = Frame(root)
frame.pack()
#
# labels
self.__labels = []
row = 2
for text in ('Text:', 'Selection:', 'Insertion:'):
l = Label(frame, text=text)
l.grid(row=row, column=0, sticky=E)
self.__labels.append(l)
row += 1
col = 1
for text in ('Foreground', 'Background'):
l = Label(frame, text=text)
l.grid(row=1, column=col)
self.__labels.append(l)
col += 1
#
# radios
self.__radios = []
for col in (1, 2):
for row in (2, 3, 4):
# there is no insertforeground option
if row==4 and col==1:
continue
r = Radiobutton(frame, variable=self.__which,
value=(row-2)*2 + col-1,
command=self.__set_color)
r.grid(row=row, column=col)
self.__radios.append(r)
self.__toggletrack()
def __quit(self, event=None):
self.__root.quit()
def withdraw(self, event=None):
self.__root.withdraw()
def deiconify(self, event=None):
self.__root.deiconify()
def __forceupdate(self, event=None):
self.__sb.update_views_current()
def __toggletrack(self, event=None):
if self.__trackp.get():
state = NORMAL
fg = self.__radios[0]['foreground']
else:
state = DISABLED
fg = self.__radios[0]['disabledforeground']
for r in self.__radios:
r.configure(state=state)
for l in self.__labels:
l.configure(foreground=fg)
def __set_color(self, event=None):
which = self.__which.get()
text = self.__text
if which == 0:
color = text['foreground']
elif which == 1:
color = text['background']
elif which == 2:
color = text['selectforeground']
elif which == 3:
color = text['selectbackground']
elif which == 5:
color = text['insertbackground']
try:
red, green, blue = ColorDB.rrggbb_to_triplet(color)
except ColorDB.BadColor:
# must have been a color name
red, green, blue = self.__sb.colordb().find_byname(color)
self.__sb.update_views(red, green, blue)
def update_yourself(self, red, green, blue):
if self.__trackp.get():
colorname = ColorDB.triplet_to_rrggbb((red, green, blue))
which = self.__which.get()
text = self.__text
if which == 0:
text.configure(foreground=colorname)
elif which == 1:
text.configure(background=colorname)
elif which == 2:
text.configure(selectforeground=colorname)
elif which == 3:
text.configure(selectbackground=colorname)
elif which == 5:
text.configure(insertbackground=colorname)
def save_options(self, optiondb):
optiondb['TRACKP'] = self.__trackp.get()
optiondb['WHICH'] = self.__which.get()
optiondb['TEXT'] = self.__text.get(0.0, 'end - 1c')
optiondb['TEXTSEL'] = self.__text.tag_ranges(SEL)[0:2]
optiondb['TEXTINS'] = self.__text.index(INSERT)
optiondb['TEXTFG'] = self.__text['foreground']
optiondb['TEXTBG'] = self.__text['background']
optiondb['TEXT_SFG'] = self.__text['selectforeground']
optiondb['TEXT_SBG'] = self.__text['selectbackground']
optiondb['TEXT_IBG'] = self.__text['insertbackground']
| 6,869 | 189 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/html40colors.txt | # HTML 4.0 color names
Black #000000
Silver #c0c0c0
Gray #808080
White #ffffff
Maroon #800000
Red #ff0000
Purple #800080
Fuchsia #ff00ff
Green #008000
Lime #00ff00
Olive #808000
Yellow #ffff00
Navy #000080
Blue #0000ff
Teal #008080
Aqua #00ffff
| 245 | 18 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/pynche | #! /usr/bin/env python
"""Run this file under Unix, or when debugging under Windows.
Run the file pynche.pyw under Windows to inhibit the console window.
"""
import Main
Main.main()
| 183 | 8 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/StripViewer.py | """Strip viewer and related widgets.
The classes in this file implement the StripViewer shown in the top two thirds
of the main Pynche window. It consists of three StripWidgets which display
the variations in red, green, and blue respectively of the currently selected
r/g/b color value.
Each StripWidget shows the color variations that are reachable by varying an
axis of the currently selected color. So for example, if the color is
(R,G,B)=(127,163,196)
then the Red variations show colors from (0,163,196) to (255,163,196), the
Green variations show colors from (127,0,196) to (127,255,196), and the Blue
variations show colors from (127,163,0) to (127,163,255).
The selected color is always visible in all three StripWidgets, and in fact
each StripWidget highlights the selected color, and has an arrow pointing to
the selected chip, which includes the value along that particular axis.
Clicking on any chip in any StripWidget selects that color, and updates all
arrows and other windows. By toggling on Update while dragging, Pynche will
select the color under the cursor while you drag it, but be forewarned that
this can be slow.
"""
from tkinter import *
import ColorDB
# Load this script into the Tcl interpreter and call it in
# StripWidget.set_color(). This is about as fast as it can be with the
# current _tkinter.c interface, which doesn't support Tcl Objects.
TCLPROC = '''\
proc setcolor {canv colors} {
set i 1
foreach c $colors {
$canv itemconfigure $i -fill $c -outline $c
incr i
}
}
'''
# Tcl event types
BTNDOWN = 4
BTNUP = 5
BTNDRAG = 6
SPACE = ' '
def constant(numchips):
step = 255.0 / (numchips - 1)
start = 0.0
seq = []
while numchips > 0:
seq.append(int(start))
start = start + step
numchips = numchips - 1
return seq
# red variations, green+blue = cyan constant
def constant_red_generator(numchips, red, green, blue):
seq = constant(numchips)
return list(zip([red] * numchips, seq, seq))
# green variations, red+blue = magenta constant
def constant_green_generator(numchips, red, green, blue):
seq = constant(numchips)
return list(zip(seq, [green] * numchips, seq))
# blue variations, red+green = yellow constant
def constant_blue_generator(numchips, red, green, blue):
seq = constant(numchips)
return list(zip(seq, seq, [blue] * numchips))
# red variations, green+blue = cyan constant
def constant_cyan_generator(numchips, red, green, blue):
seq = constant(numchips)
return list(zip(seq, [green] * numchips, [blue] * numchips))
# green variations, red+blue = magenta constant
def constant_magenta_generator(numchips, red, green, blue):
seq = constant(numchips)
return list(zip([red] * numchips, seq, [blue] * numchips))
# blue variations, red+green = yellow constant
def constant_yellow_generator(numchips, red, green, blue):
seq = constant(numchips)
return list(zip([red] * numchips, [green] * numchips, seq))
class LeftArrow:
_ARROWWIDTH = 30
_ARROWHEIGHT = 15
_YOFFSET = 13
_TEXTYOFFSET = 1
_TAG = ('leftarrow',)
def __init__(self, canvas, x):
self._canvas = canvas
self.__arrow, self.__text = self._create(x)
self.move_to(x)
def _create(self, x):
arrow = self._canvas.create_line(
x, self._ARROWHEIGHT + self._YOFFSET,
x, self._YOFFSET,
x + self._ARROWWIDTH, self._YOFFSET,
arrow='first',
width=3.0,
tags=self._TAG)
text = self._canvas.create_text(
x + self._ARROWWIDTH + 13,
self._ARROWHEIGHT - self._TEXTYOFFSET,
tags=self._TAG,
text='128')
return arrow, text
def _x(self):
coords = list(self._canvas.coords(self._TAG))
assert coords
return coords[0]
def move_to(self, x):
deltax = x - self._x()
self._canvas.move(self._TAG, deltax, 0)
def set_text(self, text):
self._canvas.itemconfigure(self.__text, text=text)
class RightArrow(LeftArrow):
_TAG = ('rightarrow',)
def _create(self, x):
arrow = self._canvas.create_line(
x, self._YOFFSET,
x + self._ARROWWIDTH, self._YOFFSET,
x + self._ARROWWIDTH, self._ARROWHEIGHT + self._YOFFSET,
arrow='last',
width=3.0,
tags=self._TAG)
text = self._canvas.create_text(
x - self._ARROWWIDTH + 15, # BAW: kludge
self._ARROWHEIGHT - self._TEXTYOFFSET,
justify=RIGHT,
text='128',
tags=self._TAG)
return arrow, text
def _x(self):
coords = list(self._canvas.coords(self._TAG))
assert coords
return coords[0] + self._ARROWWIDTH
class StripWidget:
_CHIPHEIGHT = 50
_CHIPWIDTH = 10
_NUMCHIPS = 40
def __init__(self, switchboard,
master = None,
chipwidth = _CHIPWIDTH,
chipheight = _CHIPHEIGHT,
numchips = _NUMCHIPS,
generator = None,
axis = None,
label = '',
uwdvar = None,
hexvar = None):
# instance variables
self.__generator = generator
self.__axis = axis
self.__numchips = numchips
assert self.__axis in (0, 1, 2)
self.__uwd = uwdvar
self.__hexp = hexvar
# the last chip selected
self.__lastchip = None
self.__sb = switchboard
canvaswidth = numchips * (chipwidth + 1)
canvasheight = chipheight + 43 # BAW: Kludge
# create the canvas and pack it
canvas = self.__canvas = Canvas(master,
width=canvaswidth,
height=canvasheight,
## borderwidth=2,
## relief=GROOVE
)
canvas.pack()
canvas.bind('<ButtonPress-1>', self.__select_chip)
canvas.bind('<ButtonRelease-1>', self.__select_chip)
canvas.bind('<B1-Motion>', self.__select_chip)
# Load a proc into the Tcl interpreter. This is used in the
# set_color() method to speed up setting the chip colors.
canvas.tk.eval(TCLPROC)
# create the color strip
chips = self.__chips = []
x = 1
y = 30
tags = ('chip',)
for c in range(self.__numchips):
color = 'grey'
canvas.create_rectangle(
x, y, x+chipwidth, y+chipheight,
fill=color, outline=color,
tags=tags)
x = x + chipwidth + 1 # for outline
chips.append(color)
# create the strip label
self.__label = canvas.create_text(
3, y + chipheight + 8,
text=label,
anchor=W)
# create the arrow and text item
chipx = self.__arrow_x(0)
self.__leftarrow = LeftArrow(canvas, chipx)
chipx = self.__arrow_x(len(chips) - 1)
self.__rightarrow = RightArrow(canvas, chipx)
def __arrow_x(self, chipnum):
coords = self.__canvas.coords(chipnum+1)
assert coords
x0, y0, x1, y1 = coords
return (x1 + x0) / 2.0
# Invoked when one of the chips is clicked. This should just tell the
# switchboard to set the color on all the output components
def __select_chip(self, event=None):
x = event.x
y = event.y
canvas = self.__canvas
chip = canvas.find_overlapping(x, y, x, y)
if chip and (1 <= chip[0] <= self.__numchips):
color = self.__chips[chip[0]-1]
red, green, blue = ColorDB.rrggbb_to_triplet(color)
etype = int(event.type)
if (etype == BTNUP or self.__uwd.get()):
# update everyone
self.__sb.update_views(red, green, blue)
else:
# just track the arrows
self.__trackarrow(chip[0], (red, green, blue))
def __trackarrow(self, chip, rgbtuple):
# invert the last chip
if self.__lastchip is not None:
color = self.__canvas.itemcget(self.__lastchip, 'fill')
self.__canvas.itemconfigure(self.__lastchip, outline=color)
self.__lastchip = chip
# get the arrow's text
coloraxis = rgbtuple[self.__axis]
if self.__hexp.get():
# hex
text = hex(coloraxis)
else:
# decimal
text = repr(coloraxis)
# move the arrow, and set its text
if coloraxis <= 128:
# use the left arrow
self.__leftarrow.set_text(text)
self.__leftarrow.move_to(self.__arrow_x(chip-1))
self.__rightarrow.move_to(-100)
else:
# use the right arrow
self.__rightarrow.set_text(text)
self.__rightarrow.move_to(self.__arrow_x(chip-1))
self.__leftarrow.move_to(-100)
# and set the chip's outline
brightness = ColorDB.triplet_to_brightness(rgbtuple)
if brightness <= 128:
outline = 'white'
else:
outline = 'black'
self.__canvas.itemconfigure(chip, outline=outline)
def update_yourself(self, red, green, blue):
assert self.__generator
i = 1
chip = 0
chips = self.__chips = []
tk = self.__canvas.tk
# get the red, green, and blue components for all chips
for t in self.__generator(self.__numchips, red, green, blue):
rrggbb = ColorDB.triplet_to_rrggbb(t)
chips.append(rrggbb)
tred, tgreen, tblue = t
if tred <= red and tgreen <= green and tblue <= blue:
chip = i
i = i + 1
# call the raw tcl script
colors = SPACE.join(chips)
tk.eval('setcolor %s {%s}' % (self.__canvas._w, colors))
# move the arrows around
self.__trackarrow(chip, (red, green, blue))
def set(self, label, generator):
self.__canvas.itemconfigure(self.__label, text=label)
self.__generator = generator
class StripViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
optiondb = switchboard.optiondb()
# create a frame inside the master.
frame = Frame(master, relief=RAISED, borderwidth=1)
frame.grid(row=1, column=0, columnspan=2, sticky='NSEW')
# create the options to be used later
uwd = self.__uwdvar = BooleanVar()
uwd.set(optiondb.get('UPWHILEDRAG', 0))
hexp = self.__hexpvar = BooleanVar()
hexp.set(optiondb.get('HEXSTRIP', 0))
# create the red, green, blue strips inside their own frame
frame1 = Frame(frame)
frame1.pack(expand=YES, fill=BOTH)
self.__reds = StripWidget(switchboard, frame1,
generator=constant_cyan_generator,
axis=0,
label='Red Variations',
uwdvar=uwd, hexvar=hexp)
self.__greens = StripWidget(switchboard, frame1,
generator=constant_magenta_generator,
axis=1,
label='Green Variations',
uwdvar=uwd, hexvar=hexp)
self.__blues = StripWidget(switchboard, frame1,
generator=constant_yellow_generator,
axis=2,
label='Blue Variations',
uwdvar=uwd, hexvar=hexp)
# create a frame to contain the controls
frame2 = Frame(frame)
frame2.pack(expand=YES, fill=BOTH)
frame2.columnconfigure(0, weight=20)
frame2.columnconfigure(2, weight=20)
padx = 8
# create the black button
blackbtn = Button(frame2,
text='Black',
command=self.__toblack)
blackbtn.grid(row=0, column=0, rowspan=2, sticky=W, padx=padx)
# create the controls
uwdbtn = Checkbutton(frame2,
text='Update while dragging',
variable=uwd)
uwdbtn.grid(row=0, column=1, sticky=W)
hexbtn = Checkbutton(frame2,
text='Hexadecimal',
variable=hexp,
command=self.__togglehex)
hexbtn.grid(row=1, column=1, sticky=W)
# XXX: ignore this feature for now; it doesn't work quite right yet
## gentypevar = self.__gentypevar = IntVar()
## self.__variations = Radiobutton(frame,
## text='Variations',
## variable=gentypevar,
## value=0,
## command=self.__togglegentype)
## self.__variations.grid(row=0, column=1, sticky=W)
## self.__constants = Radiobutton(frame,
## text='Constants',
## variable=gentypevar,
## value=1,
## command=self.__togglegentype)
## self.__constants.grid(row=1, column=1, sticky=W)
# create the white button
whitebtn = Button(frame2,
text='White',
command=self.__towhite)
whitebtn.grid(row=0, column=2, rowspan=2, sticky=E, padx=padx)
def update_yourself(self, red, green, blue):
self.__reds.update_yourself(red, green, blue)
self.__greens.update_yourself(red, green, blue)
self.__blues.update_yourself(red, green, blue)
def __togglehex(self, event=None):
red, green, blue = self.__sb.current_rgb()
self.update_yourself(red, green, blue)
## def __togglegentype(self, event=None):
## which = self.__gentypevar.get()
## if which == 0:
## self.__reds.set(label='Red Variations',
## generator=constant_cyan_generator)
## self.__greens.set(label='Green Variations',
## generator=constant_magenta_generator)
## self.__blues.set(label='Blue Variations',
## generator=constant_yellow_generator)
## elif which == 1:
## self.__reds.set(label='Red Constant',
## generator=constant_red_generator)
## self.__greens.set(label='Green Constant',
## generator=constant_green_generator)
## self.__blues.set(label='Blue Constant',
## generator=constant_blue_generator)
## else:
## assert 0
## self.__sb.update_views_current()
def __toblack(self, event=None):
self.__sb.update_views(0, 0, 0)
def __towhite(self, event=None):
self.__sb.update_views(255, 255, 255)
def save_options(self, optiondb):
optiondb['UPWHILEDRAG'] = self.__uwdvar.get()
optiondb['HEXSTRIP'] = self.__hexpvar.get()
| 15,477 | 434 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/__init__.py | # Dummy file to make this directory a package.
| 47 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/pyColorChooser.py | """Color chooser implementing (almost) the tkColorColor interface
"""
import os
import Main
import ColorDB
class Chooser:
"""Ask for a color"""
def __init__(self,
master = None,
databasefile = None,
initfile = None,
ignore = None,
wantspec = None):
self.__master = master
self.__databasefile = databasefile
self.__initfile = initfile or os.path.expanduser('~/.pynche')
self.__ignore = ignore
self.__pw = None
self.__wantspec = wantspec
def show(self, color, options):
# scan for options that can override the ctor options
self.__wantspec = options.get('wantspec', self.__wantspec)
dbfile = options.get('databasefile', self.__databasefile)
# load the database file
colordb = None
if dbfile != self.__databasefile:
colordb = ColorDB.get_colordb(dbfile)
if not self.__master:
from tkinter import Tk
self.__master = Tk()
if not self.__pw:
self.__pw, self.__sb = \
Main.build(master = self.__master,
initfile = self.__initfile,
ignore = self.__ignore)
else:
self.__pw.deiconify()
# convert color
if colordb:
self.__sb.set_colordb(colordb)
else:
colordb = self.__sb.colordb()
if color:
r, g, b = Main.initial_color(color, colordb)
self.__sb.update_views(r, g, b)
# reset the canceled flag and run it
self.__sb.canceled(0)
Main.run(self.__pw, self.__sb)
rgbtuple = self.__sb.current_rgb()
self.__pw.withdraw()
# check to see if the cancel button was pushed
if self.__sb.canceled_p():
return None, None
# Try to return the color name from the database if there is an exact
# match, otherwise use the "#rrggbb" spec. BAW: Forget about color
# aliases for now, maybe later we should return these too.
name = None
if not self.__wantspec:
try:
name = colordb.find_byrgb(rgbtuple)[0]
except ColorDB.BadColor:
pass
if name is None:
name = ColorDB.triplet_to_rrggbb(rgbtuple)
return rgbtuple, name
def save(self):
if self.__sb:
self.__sb.save_views()
# convenience stuff
_chooser = None
def askcolor(color = None, **options):
"""Ask for a color"""
global _chooser
if not _chooser:
_chooser = Chooser(**options)
return _chooser.show(color, options)
def save():
global _chooser
if _chooser:
_chooser.save()
# test stuff
if __name__ == '__main__':
from tkinter import *
class Tester:
def __init__(self):
self.__root = tk = Tk()
b = Button(tk, text='Choose Color...', command=self.__choose)
b.pack()
self.__l = Label(tk)
self.__l.pack()
q = Button(tk, text='Quit', command=self.__quit)
q.pack()
def __choose(self, event=None):
rgb, name = askcolor(master=self.__root)
if rgb is None:
text = 'You hit CANCEL!'
else:
r, g, b = rgb
text = 'You picked %s (%3d/%3d/%3d)' % (name, r, g, b)
self.__l.configure(text=text)
def __quit(self, event=None):
self.__root.quit()
def run(self):
self.__root.mainloop()
t = Tester()
t.run()
# simpler
## print 'color:', askcolor()
## print 'color:', askcolor()
| 3,759 | 126 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/X/rgb.txt | ! $XConsortium: rgb.txt,v 10.41 94/02/20 18:39:36 rws Exp $
255 250 250 snow
248 248 255 ghost white
248 248 255 GhostWhite
245 245 245 white smoke
245 245 245 WhiteSmoke
220 220 220 gainsboro
255 250 240 floral white
255 250 240 FloralWhite
253 245 230 old lace
253 245 230 OldLace
250 240 230 linen
250 235 215 antique white
250 235 215 AntiqueWhite
255 239 213 papaya whip
255 239 213 PapayaWhip
255 235 205 blanched almond
255 235 205 BlanchedAlmond
255 228 196 bisque
255 218 185 peach puff
255 218 185 PeachPuff
255 222 173 navajo white
255 222 173 NavajoWhite
255 228 181 moccasin
255 248 220 cornsilk
255 255 240 ivory
255 250 205 lemon chiffon
255 250 205 LemonChiffon
255 245 238 seashell
240 255 240 honeydew
245 255 250 mint cream
245 255 250 MintCream
240 255 255 azure
240 248 255 alice blue
240 248 255 AliceBlue
230 230 250 lavender
255 240 245 lavender blush
255 240 245 LavenderBlush
255 228 225 misty rose
255 228 225 MistyRose
255 255 255 white
0 0 0 black
47 79 79 dark slate gray
47 79 79 DarkSlateGray
47 79 79 dark slate grey
47 79 79 DarkSlateGrey
105 105 105 dim gray
105 105 105 DimGray
105 105 105 dim grey
105 105 105 DimGrey
112 128 144 slate gray
112 128 144 SlateGray
112 128 144 slate grey
112 128 144 SlateGrey
119 136 153 light slate gray
119 136 153 LightSlateGray
119 136 153 light slate grey
119 136 153 LightSlateGrey
190 190 190 gray
190 190 190 grey
211 211 211 light grey
211 211 211 LightGrey
211 211 211 light gray
211 211 211 LightGray
25 25 112 midnight blue
25 25 112 MidnightBlue
0 0 128 navy
0 0 128 navy blue
0 0 128 NavyBlue
100 149 237 cornflower blue
100 149 237 CornflowerBlue
72 61 139 dark slate blue
72 61 139 DarkSlateBlue
106 90 205 slate blue
106 90 205 SlateBlue
123 104 238 medium slate blue
123 104 238 MediumSlateBlue
132 112 255 light slate blue
132 112 255 LightSlateBlue
0 0 205 medium blue
0 0 205 MediumBlue
65 105 225 royal blue
65 105 225 RoyalBlue
0 0 255 blue
30 144 255 dodger blue
30 144 255 DodgerBlue
0 191 255 deep sky blue
0 191 255 DeepSkyBlue
135 206 235 sky blue
135 206 235 SkyBlue
135 206 250 light sky blue
135 206 250 LightSkyBlue
70 130 180 steel blue
70 130 180 SteelBlue
176 196 222 light steel blue
176 196 222 LightSteelBlue
173 216 230 light blue
173 216 230 LightBlue
176 224 230 powder blue
176 224 230 PowderBlue
175 238 238 pale turquoise
175 238 238 PaleTurquoise
0 206 209 dark turquoise
0 206 209 DarkTurquoise
72 209 204 medium turquoise
72 209 204 MediumTurquoise
64 224 208 turquoise
0 255 255 cyan
224 255 255 light cyan
224 255 255 LightCyan
95 158 160 cadet blue
95 158 160 CadetBlue
102 205 170 medium aquamarine
102 205 170 MediumAquamarine
127 255 212 aquamarine
0 100 0 dark green
0 100 0 DarkGreen
85 107 47 dark olive green
85 107 47 DarkOliveGreen
143 188 143 dark sea green
143 188 143 DarkSeaGreen
46 139 87 sea green
46 139 87 SeaGreen
60 179 113 medium sea green
60 179 113 MediumSeaGreen
32 178 170 light sea green
32 178 170 LightSeaGreen
152 251 152 pale green
152 251 152 PaleGreen
0 255 127 spring green
0 255 127 SpringGreen
124 252 0 lawn green
124 252 0 LawnGreen
0 255 0 green
127 255 0 chartreuse
0 250 154 medium spring green
0 250 154 MediumSpringGreen
173 255 47 green yellow
173 255 47 GreenYellow
50 205 50 lime green
50 205 50 LimeGreen
154 205 50 yellow green
154 205 50 YellowGreen
34 139 34 forest green
34 139 34 ForestGreen
107 142 35 olive drab
107 142 35 OliveDrab
189 183 107 dark khaki
189 183 107 DarkKhaki
240 230 140 khaki
238 232 170 pale goldenrod
238 232 170 PaleGoldenrod
250 250 210 light goldenrod yellow
250 250 210 LightGoldenrodYellow
255 255 224 light yellow
255 255 224 LightYellow
255 255 0 yellow
255 215 0 gold
238 221 130 light goldenrod
238 221 130 LightGoldenrod
218 165 32 goldenrod
184 134 11 dark goldenrod
184 134 11 DarkGoldenrod
188 143 143 rosy brown
188 143 143 RosyBrown
205 92 92 indian red
205 92 92 IndianRed
139 69 19 saddle brown
139 69 19 SaddleBrown
160 82 45 sienna
205 133 63 peru
222 184 135 burlywood
245 245 220 beige
245 222 179 wheat
244 164 96 sandy brown
244 164 96 SandyBrown
210 180 140 tan
210 105 30 chocolate
178 34 34 firebrick
165 42 42 brown
233 150 122 dark salmon
233 150 122 DarkSalmon
250 128 114 salmon
255 160 122 light salmon
255 160 122 LightSalmon
255 165 0 orange
255 140 0 dark orange
255 140 0 DarkOrange
255 127 80 coral
240 128 128 light coral
240 128 128 LightCoral
255 99 71 tomato
255 69 0 orange red
255 69 0 OrangeRed
255 0 0 red
255 105 180 hot pink
255 105 180 HotPink
255 20 147 deep pink
255 20 147 DeepPink
255 192 203 pink
255 182 193 light pink
255 182 193 LightPink
219 112 147 pale violet red
219 112 147 PaleVioletRed
176 48 96 maroon
199 21 133 medium violet red
199 21 133 MediumVioletRed
208 32 144 violet red
208 32 144 VioletRed
255 0 255 magenta
238 130 238 violet
221 160 221 plum
218 112 214 orchid
186 85 211 medium orchid
186 85 211 MediumOrchid
153 50 204 dark orchid
153 50 204 DarkOrchid
148 0 211 dark violet
148 0 211 DarkViolet
138 43 226 blue violet
138 43 226 BlueViolet
160 32 240 purple
147 112 219 medium purple
147 112 219 MediumPurple
216 191 216 thistle
255 250 250 snow1
238 233 233 snow2
205 201 201 snow3
139 137 137 snow4
255 245 238 seashell1
238 229 222 seashell2
205 197 191 seashell3
139 134 130 seashell4
255 239 219 AntiqueWhite1
238 223 204 AntiqueWhite2
205 192 176 AntiqueWhite3
139 131 120 AntiqueWhite4
255 228 196 bisque1
238 213 183 bisque2
205 183 158 bisque3
139 125 107 bisque4
255 218 185 PeachPuff1
238 203 173 PeachPuff2
205 175 149 PeachPuff3
139 119 101 PeachPuff4
255 222 173 NavajoWhite1
238 207 161 NavajoWhite2
205 179 139 NavajoWhite3
139 121 94 NavajoWhite4
255 250 205 LemonChiffon1
238 233 191 LemonChiffon2
205 201 165 LemonChiffon3
139 137 112 LemonChiffon4
255 248 220 cornsilk1
238 232 205 cornsilk2
205 200 177 cornsilk3
139 136 120 cornsilk4
255 255 240 ivory1
238 238 224 ivory2
205 205 193 ivory3
139 139 131 ivory4
240 255 240 honeydew1
224 238 224 honeydew2
193 205 193 honeydew3
131 139 131 honeydew4
255 240 245 LavenderBlush1
238 224 229 LavenderBlush2
205 193 197 LavenderBlush3
139 131 134 LavenderBlush4
255 228 225 MistyRose1
238 213 210 MistyRose2
205 183 181 MistyRose3
139 125 123 MistyRose4
240 255 255 azure1
224 238 238 azure2
193 205 205 azure3
131 139 139 azure4
131 111 255 SlateBlue1
122 103 238 SlateBlue2
105 89 205 SlateBlue3
71 60 139 SlateBlue4
72 118 255 RoyalBlue1
67 110 238 RoyalBlue2
58 95 205 RoyalBlue3
39 64 139 RoyalBlue4
0 0 255 blue1
0 0 238 blue2
0 0 205 blue3
0 0 139 blue4
30 144 255 DodgerBlue1
28 134 238 DodgerBlue2
24 116 205 DodgerBlue3
16 78 139 DodgerBlue4
99 184 255 SteelBlue1
92 172 238 SteelBlue2
79 148 205 SteelBlue3
54 100 139 SteelBlue4
0 191 255 DeepSkyBlue1
0 178 238 DeepSkyBlue2
0 154 205 DeepSkyBlue3
0 104 139 DeepSkyBlue4
135 206 255 SkyBlue1
126 192 238 SkyBlue2
108 166 205 SkyBlue3
74 112 139 SkyBlue4
176 226 255 LightSkyBlue1
164 211 238 LightSkyBlue2
141 182 205 LightSkyBlue3
96 123 139 LightSkyBlue4
198 226 255 SlateGray1
185 211 238 SlateGray2
159 182 205 SlateGray3
108 123 139 SlateGray4
202 225 255 LightSteelBlue1
188 210 238 LightSteelBlue2
162 181 205 LightSteelBlue3
110 123 139 LightSteelBlue4
191 239 255 LightBlue1
178 223 238 LightBlue2
154 192 205 LightBlue3
104 131 139 LightBlue4
224 255 255 LightCyan1
209 238 238 LightCyan2
180 205 205 LightCyan3
122 139 139 LightCyan4
187 255 255 PaleTurquoise1
174 238 238 PaleTurquoise2
150 205 205 PaleTurquoise3
102 139 139 PaleTurquoise4
152 245 255 CadetBlue1
142 229 238 CadetBlue2
122 197 205 CadetBlue3
83 134 139 CadetBlue4
0 245 255 turquoise1
0 229 238 turquoise2
0 197 205 turquoise3
0 134 139 turquoise4
0 255 255 cyan1
0 238 238 cyan2
0 205 205 cyan3
0 139 139 cyan4
151 255 255 DarkSlateGray1
141 238 238 DarkSlateGray2
121 205 205 DarkSlateGray3
82 139 139 DarkSlateGray4
127 255 212 aquamarine1
118 238 198 aquamarine2
102 205 170 aquamarine3
69 139 116 aquamarine4
193 255 193 DarkSeaGreen1
180 238 180 DarkSeaGreen2
155 205 155 DarkSeaGreen3
105 139 105 DarkSeaGreen4
84 255 159 SeaGreen1
78 238 148 SeaGreen2
67 205 128 SeaGreen3
46 139 87 SeaGreen4
154 255 154 PaleGreen1
144 238 144 PaleGreen2
124 205 124 PaleGreen3
84 139 84 PaleGreen4
0 255 127 SpringGreen1
0 238 118 SpringGreen2
0 205 102 SpringGreen3
0 139 69 SpringGreen4
0 255 0 green1
0 238 0 green2
0 205 0 green3
0 139 0 green4
127 255 0 chartreuse1
118 238 0 chartreuse2
102 205 0 chartreuse3
69 139 0 chartreuse4
192 255 62 OliveDrab1
179 238 58 OliveDrab2
154 205 50 OliveDrab3
105 139 34 OliveDrab4
202 255 112 DarkOliveGreen1
188 238 104 DarkOliveGreen2
162 205 90 DarkOliveGreen3
110 139 61 DarkOliveGreen4
255 246 143 khaki1
238 230 133 khaki2
205 198 115 khaki3
139 134 78 khaki4
255 236 139 LightGoldenrod1
238 220 130 LightGoldenrod2
205 190 112 LightGoldenrod3
139 129 76 LightGoldenrod4
255 255 224 LightYellow1
238 238 209 LightYellow2
205 205 180 LightYellow3
139 139 122 LightYellow4
255 255 0 yellow1
238 238 0 yellow2
205 205 0 yellow3
139 139 0 yellow4
255 215 0 gold1
238 201 0 gold2
205 173 0 gold3
139 117 0 gold4
255 193 37 goldenrod1
238 180 34 goldenrod2
205 155 29 goldenrod3
139 105 20 goldenrod4
255 185 15 DarkGoldenrod1
238 173 14 DarkGoldenrod2
205 149 12 DarkGoldenrod3
139 101 8 DarkGoldenrod4
255 193 193 RosyBrown1
238 180 180 RosyBrown2
205 155 155 RosyBrown3
139 105 105 RosyBrown4
255 106 106 IndianRed1
238 99 99 IndianRed2
205 85 85 IndianRed3
139 58 58 IndianRed4
255 130 71 sienna1
238 121 66 sienna2
205 104 57 sienna3
139 71 38 sienna4
255 211 155 burlywood1
238 197 145 burlywood2
205 170 125 burlywood3
139 115 85 burlywood4
255 231 186 wheat1
238 216 174 wheat2
205 186 150 wheat3
139 126 102 wheat4
255 165 79 tan1
238 154 73 tan2
205 133 63 tan3
139 90 43 tan4
255 127 36 chocolate1
238 118 33 chocolate2
205 102 29 chocolate3
139 69 19 chocolate4
255 48 48 firebrick1
238 44 44 firebrick2
205 38 38 firebrick3
139 26 26 firebrick4
255 64 64 brown1
238 59 59 brown2
205 51 51 brown3
139 35 35 brown4
255 140 105 salmon1
238 130 98 salmon2
205 112 84 salmon3
139 76 57 salmon4
255 160 122 LightSalmon1
238 149 114 LightSalmon2
205 129 98 LightSalmon3
139 87 66 LightSalmon4
255 165 0 orange1
238 154 0 orange2
205 133 0 orange3
139 90 0 orange4
255 127 0 DarkOrange1
238 118 0 DarkOrange2
205 102 0 DarkOrange3
139 69 0 DarkOrange4
255 114 86 coral1
238 106 80 coral2
205 91 69 coral3
139 62 47 coral4
255 99 71 tomato1
238 92 66 tomato2
205 79 57 tomato3
139 54 38 tomato4
255 69 0 OrangeRed1
238 64 0 OrangeRed2
205 55 0 OrangeRed3
139 37 0 OrangeRed4
255 0 0 red1
238 0 0 red2
205 0 0 red3
139 0 0 red4
255 20 147 DeepPink1
238 18 137 DeepPink2
205 16 118 DeepPink3
139 10 80 DeepPink4
255 110 180 HotPink1
238 106 167 HotPink2
205 96 144 HotPink3
139 58 98 HotPink4
255 181 197 pink1
238 169 184 pink2
205 145 158 pink3
139 99 108 pink4
255 174 185 LightPink1
238 162 173 LightPink2
205 140 149 LightPink3
139 95 101 LightPink4
255 130 171 PaleVioletRed1
238 121 159 PaleVioletRed2
205 104 137 PaleVioletRed3
139 71 93 PaleVioletRed4
255 52 179 maroon1
238 48 167 maroon2
205 41 144 maroon3
139 28 98 maroon4
255 62 150 VioletRed1
238 58 140 VioletRed2
205 50 120 VioletRed3
139 34 82 VioletRed4
255 0 255 magenta1
238 0 238 magenta2
205 0 205 magenta3
139 0 139 magenta4
255 131 250 orchid1
238 122 233 orchid2
205 105 201 orchid3
139 71 137 orchid4
255 187 255 plum1
238 174 238 plum2
205 150 205 plum3
139 102 139 plum4
224 102 255 MediumOrchid1
209 95 238 MediumOrchid2
180 82 205 MediumOrchid3
122 55 139 MediumOrchid4
191 62 255 DarkOrchid1
178 58 238 DarkOrchid2
154 50 205 DarkOrchid3
104 34 139 DarkOrchid4
155 48 255 purple1
145 44 238 purple2
125 38 205 purple3
85 26 139 purple4
171 130 255 MediumPurple1
159 121 238 MediumPurple2
137 104 205 MediumPurple3
93 71 139 MediumPurple4
255 225 255 thistle1
238 210 238 thistle2
205 181 205 thistle3
139 123 139 thistle4
0 0 0 gray0
0 0 0 grey0
3 3 3 gray1
3 3 3 grey1
5 5 5 gray2
5 5 5 grey2
8 8 8 gray3
8 8 8 grey3
10 10 10 gray4
10 10 10 grey4
13 13 13 gray5
13 13 13 grey5
15 15 15 gray6
15 15 15 grey6
18 18 18 gray7
18 18 18 grey7
20 20 20 gray8
20 20 20 grey8
23 23 23 gray9
23 23 23 grey9
26 26 26 gray10
26 26 26 grey10
28 28 28 gray11
28 28 28 grey11
31 31 31 gray12
31 31 31 grey12
33 33 33 gray13
33 33 33 grey13
36 36 36 gray14
36 36 36 grey14
38 38 38 gray15
38 38 38 grey15
41 41 41 gray16
41 41 41 grey16
43 43 43 gray17
43 43 43 grey17
46 46 46 gray18
46 46 46 grey18
48 48 48 gray19
48 48 48 grey19
51 51 51 gray20
51 51 51 grey20
54 54 54 gray21
54 54 54 grey21
56 56 56 gray22
56 56 56 grey22
59 59 59 gray23
59 59 59 grey23
61 61 61 gray24
61 61 61 grey24
64 64 64 gray25
64 64 64 grey25
66 66 66 gray26
66 66 66 grey26
69 69 69 gray27
69 69 69 grey27
71 71 71 gray28
71 71 71 grey28
74 74 74 gray29
74 74 74 grey29
77 77 77 gray30
77 77 77 grey30
79 79 79 gray31
79 79 79 grey31
82 82 82 gray32
82 82 82 grey32
84 84 84 gray33
84 84 84 grey33
87 87 87 gray34
87 87 87 grey34
89 89 89 gray35
89 89 89 grey35
92 92 92 gray36
92 92 92 grey36
94 94 94 gray37
94 94 94 grey37
97 97 97 gray38
97 97 97 grey38
99 99 99 gray39
99 99 99 grey39
102 102 102 gray40
102 102 102 grey40
105 105 105 gray41
105 105 105 grey41
107 107 107 gray42
107 107 107 grey42
110 110 110 gray43
110 110 110 grey43
112 112 112 gray44
112 112 112 grey44
115 115 115 gray45
115 115 115 grey45
117 117 117 gray46
117 117 117 grey46
120 120 120 gray47
120 120 120 grey47
122 122 122 gray48
122 122 122 grey48
125 125 125 gray49
125 125 125 grey49
127 127 127 gray50
127 127 127 grey50
130 130 130 gray51
130 130 130 grey51
133 133 133 gray52
133 133 133 grey52
135 135 135 gray53
135 135 135 grey53
138 138 138 gray54
138 138 138 grey54
140 140 140 gray55
140 140 140 grey55
143 143 143 gray56
143 143 143 grey56
145 145 145 gray57
145 145 145 grey57
148 148 148 gray58
148 148 148 grey58
150 150 150 gray59
150 150 150 grey59
153 153 153 gray60
153 153 153 grey60
156 156 156 gray61
156 156 156 grey61
158 158 158 gray62
158 158 158 grey62
161 161 161 gray63
161 161 161 grey63
163 163 163 gray64
163 163 163 grey64
166 166 166 gray65
166 166 166 grey65
168 168 168 gray66
168 168 168 grey66
171 171 171 gray67
171 171 171 grey67
173 173 173 gray68
173 173 173 grey68
176 176 176 gray69
176 176 176 grey69
179 179 179 gray70
179 179 179 grey70
181 181 181 gray71
181 181 181 grey71
184 184 184 gray72
184 184 184 grey72
186 186 186 gray73
186 186 186 grey73
189 189 189 gray74
189 189 189 grey74
191 191 191 gray75
191 191 191 grey75
194 194 194 gray76
194 194 194 grey76
196 196 196 gray77
196 196 196 grey77
199 199 199 gray78
199 199 199 grey78
201 201 201 gray79
201 201 201 grey79
204 204 204 gray80
204 204 204 grey80
207 207 207 gray81
207 207 207 grey81
209 209 209 gray82
209 209 209 grey82
212 212 212 gray83
212 212 212 grey83
214 214 214 gray84
214 214 214 grey84
217 217 217 gray85
217 217 217 grey85
219 219 219 gray86
219 219 219 grey86
222 222 222 gray87
222 222 222 grey87
224 224 224 gray88
224 224 224 grey88
227 227 227 gray89
227 227 227 grey89
229 229 229 gray90
229 229 229 grey90
232 232 232 gray91
232 232 232 grey91
235 235 235 gray92
235 235 235 grey92
237 237 237 gray93
237 237 237 grey93
240 240 240 gray94
240 240 240 grey94
242 242 242 gray95
242 242 242 grey95
245 245 245 gray96
245 245 245 grey96
247 247 247 gray97
247 247 247 grey97
250 250 250 gray98
250 250 250 grey98
252 252 252 gray99
252 252 252 grey99
255 255 255 gray100
255 255 255 grey100
169 169 169 dark grey
169 169 169 DarkGrey
169 169 169 dark gray
169 169 169 DarkGray
0 0 139 dark blue
0 0 139 DarkBlue
0 139 139 dark cyan
0 139 139 DarkCyan
139 0 139 dark magenta
139 0 139 DarkMagenta
139 0 0 dark red
139 0 0 DarkRed
144 238 144 light green
144 238 144 LightGreen
| 17,375 | 754 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pynche/X/xlicense.txt | X Window System License - X11R6.4
Copyright (c) 1998 The Open Group
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall
not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization
from The Open Group.
X Window System is a trademark of The Open Group
| 1,352 | 30 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pybench/With.py | from pybench import Test
class WithFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
pass
def test(self):
cm = self.ContextManager()
for i in range(self.rounds):
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
with cm: pass
def calibrate(self):
cm = self.ContextManager()
for i in range(self.rounds):
pass
class TryFinally(Test):
version = 2.0
operations = 20
rounds = 80000
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self):
# "Context manager" objects used just for their cleanup
# actions in finally blocks usually don't have parameters.
pass
def test(self):
cm = self.ContextManager()
for i in range(self.rounds):
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
cm.__enter__()
try: pass
finally: cm.__exit__()
def calibrate(self):
cm = self.ContextManager()
for i in range(self.rounds):
pass
class WithRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
rounds = 100000
class BlockExceptions(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
def test(self):
error = ValueError
be = self.BlockExceptions()
for i in range(self.rounds):
with be: raise error
with be: raise error
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
with be: raise error("something")
def calibrate(self):
error = ValueError
be = self.BlockExceptions()
for i in range(self.rounds):
pass
| 4,096 | 190 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Tools/pybench/LICENSE | pybench License
---------------
This copyright notice and license applies to all files in the pybench
directory of the pybench distribution.
Copyright (c), 1997-2006, Marc-Andre Lemburg ([email protected])
Copyright (c), 2000-2006, eGenix.com Software GmbH ([email protected])
All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby
granted, provided that the above copyright notice appear in all copies
and that both that copyright notice and this permission notice appear
in supporting documentation or portions thereof, including
modifications, that you make.
THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
| 1,146 | 26 | jart/cosmopolitan | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.