path
stringlengths 14
112
| content
stringlengths 0
6.32M
| size
int64 0
6.32M
| max_lines
int64 1
100k
| repo_name
stringclasses 2
values | autogenerated
bool 1
class |
---|---|---|---|---|---|
cosmopolitan/third_party/python/Lib/xdrlib.py | """Implements (a subset of) Sun XDR -- eXternal Data Representation.
See: RFC 1014
"""
import struct
from io import BytesIO
from functools import wraps
__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
# exceptions
class Error(Exception):
"""Exception class for this module. Use:
except xdrlib.Error as var:
# var has the Error instance for the exception
Public ivars:
msg -- contains the message
"""
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return repr(self.msg)
def __str__(self):
return str(self.msg)
class ConversionError(Error):
pass
def raise_conversion_error(function):
""" Wrap any raised struct.errors in a ConversionError. """
@wraps(function)
def result(self, value):
try:
return function(self, value)
except struct.error as e:
raise ConversionError(e.args[0]) from None
return result
class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
def reset(self):
self.__buf = BytesIO()
def get_buffer(self):
return self.__buf.getvalue()
# backwards compatibility
get_buf = get_buffer
@raise_conversion_error
def pack_uint(self, x):
self.__buf.write(struct.pack('>L', x))
@raise_conversion_error
def pack_int(self, x):
self.__buf.write(struct.pack('>l', x))
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf.write(b'\0\0\0\1')
else: self.__buf.write(b'\0\0\0\0')
def pack_uhyper(self, x):
try:
self.pack_uint(x>>32 & 0xffffffff)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0]) from None
try:
self.pack_uint(x & 0xffffffff)
except (TypeError, struct.error) as e:
raise ConversionError(e.args[0]) from None
pack_hyper = pack_uhyper
@raise_conversion_error
def pack_float(self, x):
self.__buf.write(struct.pack('>f', x))
@raise_conversion_error
def pack_double(self, x):
self.__buf.write(struct.pack('>d', x))
def pack_fstring(self, n, s):
if n < 0:
raise ValueError('fstring size must be nonnegative')
data = s[:n]
n = ((n+3)//4)*4
data = data + (n - len(data)) * b'\0'
self.__buf.write(data)
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) != n:
raise ValueError('wrong array size')
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
def set_position(self, position):
self.__pos = position
def get_buffer(self):
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>L', data)[0]
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
def unpack_bool(self):
return bool(self.unpack_int())
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return int(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000:
x = x - 0x10000000000000000
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError('fstring size must be nonnegative')
i = self.__pos
j = i + (n+3)//4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x != 1:
raise ConversionError('0 or 1 expected, got %r' % (x,))
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
| 5,913 | 242 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/dummy_threading.py | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``_thread`` was available which is not desired.
"""
from sys import modules as sys_modules
import _dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``_thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if '_thread' in sys_modules:
held_thread = sys_modules['_thread']
holding_thread = True
# Must have some module named ``_thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['_thread'] = sys_modules['_dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['_thread'] = held_thread
del held_thread
else:
del sys_modules['_thread']
del holding_thread
del _dummy_thread
del sys_modules
if __name__ == 'PYOBJ.COM':
Barrier = 0
BoundedSemaphore = 0
BrokenBarrierError = 0
Condition = 0
Event = 0
Lock = 0
RLock = 0
Semaphore = 0
TIMEOUT_MAX = 0
Thread = 0
ThreadError = 0
Timer = 0
WeakSet = 0
activeCount = 0
active_count = 0
currentThread = 0
current_thread = 0
enumerate = 0
get_ident = 0
local = 0
main_thread = 0
setprofile = 0
settrace = 0
stack_size = 0
| 3,285 | 105 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/this.py | s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
| 1,003 | 29 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pipes.py | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
""" # '
import re
import os
import tempfile
# we import the quote function rather than the module for backward compat
# (quote used to be an undocumented but used function in pipes)
from shlex import quote
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError('Template.append: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.append: bad kind %r' % (kind,))
if kind == SOURCE:
raise ValueError('Template.append: SOURCE can only be prepended')
if self.steps and self.steps[-1][1] == SINK:
raise ValueError('Template.append: already ends with SINK')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.append: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.append: missing $OUT in cmd')
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError('Template.prepend: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.prepend: bad kind %r' % (kind,))
if kind == SINK:
raise ValueError('Template.prepend: SINK can only be appended')
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError('Template.prepend: already begins with SOURCE')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.prepend: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.prepend: missing $OUT in cmd')
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r'
% (rw,))
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError('Template.open_r: pipeline ends width SINK')
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError('Template.open_w: pipeline begins with SOURCE')
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print(cmd)
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
| 8,916 | 248 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/mime.types | # This is a comment. I love comments. -*- indent-tabs-mode: t -*-
# This file controls what Internet media types are sent to the client for
# given file extension(s). Sending the correct media type to the client
# is important so they know how to handle the content of the file.
# Extra types can either be added here or by using an AddType directive
# in your config files. For more information about Internet media types,
# please read RFC 2045, 2046, 2047, 2048, and 2077. The Internet media type
# registry is at <http://www.iana.org/assignments/media-types/>.
# IANA types
# MIME type Extensions
application/1d-interleaved-parityfec
application/3gpp-ims+xml
application/activemessage
application/andrew-inset ez
application/applefile
application/atom+xml atom
application/atomcat+xml atomcat
application/atomicmail
application/atomsvc+xml atomsvc
application/auth-policy+xml apxml
application/batch-SMTP
application/beep+xml
application/cals-1840
application/ccxml+xml ccxml
application/cdmi-capability cdmia
application/cdmi-container cdmic
application/cdmi-domain cdmid
application/cdmi-object cdmio
application/cdmi-queue cdmiq
application/cea-2018+xml
application/cellml+xml cellml cml
application/cfw
application/cnrp+xml
application/commonground
application/conference-info+xml
application/cpl+xml cpl
application/csta+xml
application/CSTAdata+xml
application/cybercash
application/davmount+xml davmount
application/dca-rft
application/dec-dx
application/dialog-info+xml
application/dicom dcm
application/dns
application/dskpp+xml xmls
application/dssc+der dssc
application/dssc+xml xdssc
application/dvcs dvc
application/ecmascript
application/EDI-Consent
application/EDI-X12
application/EDIFACT
application/emma+xml emma
application/epp+xml
application/eshop
application/exi exi
application/fastinfoset finf
application/fastsoap
# fits, fit, fts: image/fits
application/fits
application/font-tdpfr pfr
application/framework-attributes+xml
application/H224
application/hal+xml hal
application/held+xml
application/http
application/hyperstudio stk
application/ibe-key-request+xml
application/ibe-pkg-reply+xml
application/ibe-pp-data
application/iges
application/im-iscomposing+xml
application/index
application/index.cmd
application/index.obj
application/index.response
application/index.vnd
application/iotp
application/ipfix ipfix
application/ipp
application/isup
application/javascript js
application/json json
application/kpml-request+xml
application/kpml-response+xml
application/lost+xml lostxml
application/mac-binhex40 hqx
application/macwriteii
application/mads+xml mads
application/marc mrc
application/marcxml+xml mrcx
application/mathematica nb ma mb
application/mathml-content+xml
application/mathml-presentation+xml
application/mathml+xml mml
application/mbms-associated-procedure-description+xml
application/mbms-deregister+xml
application/mbms-envelope+xml
application/mbms-msk-response+xml
application/mbms-msk+xml
application/mbms-protection-description+xml
application/mbms-reception-report+xml
application/mbms-register-response+xml
application/mbms-register+xml
application/mbms-user-service-description+xml
application/mbox mbox
application/media_control+xml
application/mediaservercontrol+xml
application/metalink4+xml meta4
application/mets+xml mets
application/mikey
application/mods+xml mods
application/moss-keys
application/moss-signature
application/mosskey-data
application/mosskey-request
application/mp21 m21 mp21
# mp4, mpg4: video/mp4, see RFC 4337
application/mp4
application/mpeg4-generic
application/mpeg4-iod
application/mpeg4-iod-xmt
application/msc-ivr+xml
application/msc-mixer+xml
application/msword doc
application/mxf mxf
application/nasdata
application/news-checkgroups
application/news-groupinfo
application/news-transmission
application/nss
application/ocsp-request orq
application/ocsp-response ors
application/octet-stream bin lha lzh exe class so dll img iso
application/oda oda
application/oebps-package+xml opf
application/ogg ogx
application/parityfec
# xer: application/xcap-error+xml
application/patch-ops-error+xml
application/pdf pdf
application/pgp-encrypted
application/pgp-keys
application/pgp-signature sig
application/pidf-diff+xml
application/pidf+xml
application/pkcs10 p10
application/pkcs7-mime p7m p7c
application/pkcs7-signature p7s
application/pkcs8 p8
# ac: application/vnd.nokia.n-gage.ac+xml
application/pkix-attr-cert
application/pkix-cert cer
application/pkix-crl crl
application/pkix-pkipath pkipath
application/pkixcmp
application/pls+xml pls
application/poc-settings+xml
application/postscript ps eps ai
application/prs.alvestrand.titrax-sheet
application/prs.cww cw cww
application/prs.nprend rnd rct
application/prs.plucker
application/prs.rdf-xml-crypt rdf-crypt
application/prs.xsf+xml xsf
application/pskc+xml pskcxml
application/qsig
application/rdf+xml rdf
application/reginfo+xml rif
application/relax-ng-compact-syntax rnc
application/remote-printing
application/resource-lists-diff+xml rld
application/resource-lists+xml rl
application/riscos
application/rlmi+xml
application/rls-services+xml rs
application/rtf rtf
application/rtx
application/samlassertion+xml
application/samlmetadata+xml
application/sbml+xml
application/scvp-cv-request scq
application/scvp-cv-response scs
application/scvp-vp-request spq
application/scvp-vp-response spp
application/sdp sdp
application/set-payment
application/set-payment-initiation
application/set-registration
application/set-registration-initiation
application/sgml
application/sgml-open-catalog soc
application/shf+xml shf
application/sieve siv sieve
application/simple-filter+xml cl
application/simple-message-summary
application/simpleSymbolContainer
application/slate
# obsoleted by application/smil+xml
application/smil smil smi sml
# smil, smi: application/smil for now
application/smil+xml
application/soap+fastinfoset
application/soap+xml
application/sparql-query rq
application/sparql-results+xml srx
application/spirits-event+xml
application/srgs gram
application/srgs+xml grxml
application/sru+xml sru
application/ssml+xml ssml
application/tamp-apex-update tau
application/tamp-apex-update-confirm auc
application/tamp-community-update tcu
application/tamp-community-update-confirm cuc
application/tamp-error ter
application/tamp-sequence-adjust tsa
application/tamp-sequence-adjust-confirm sac
# tsq: application/timestamp-query
application/tamp-status-query
# tsr: application/timestamp-reply
application/tamp-status-response
application/tamp-update tur
application/tamp-update-confirm tuc
application/tei+xml tei teiCorpus odd
application/thraud+xml tfi
application/timestamp-query tsq
application/timestamp-reply tsr
application/timestamped-data tsd
application/tve-trigger
application/ulpfec
application/vemmi
application/vnd.3gpp.bsf+xml
application/vnd.3gpp.pic-bw-large plb
application/vnd.3gpp.pic-bw-small psb
application/vnd.3gpp.pic-bw-var pvb
# sms: application/vnd.3gpp2.sms
application/vnd.3gpp.sms
application/vnd.3gpp2.bcmcsinfo+xml
application/vnd.3gpp2.sms sms
application/vnd.3gpp2.tcap tcap
application/vnd.3M.Post-it-Notes pwn
application/vnd.accpac.simply.aso aso
application/vnd.accpac.simply.imp imp
application/vnd.acucobol acu
application/vnd.acucorp atc acutc
application/vnd.adobe.fxp fxp fxpl
application/vnd.adobe.partial-upload
application/vnd.adobe.xdp+xml xdp
application/vnd.adobe.xfdf xfdf
application/vnd.aether.imp
application/vnd.ah-barcode
application/vnd.ahead.space ahead
application/vnd.airzip.filesecure.azf azf
application/vnd.airzip.filesecure.azs azs
application/vnd.americandynamics.acc acc
application/vnd.amiga.ami ami
application/vnd.amundsen.maze+xml
application/vnd.anser-web-certificate-issue-initiation cii
# Not in IANA listing, but is on FTP site?
application/vnd.anser-web-funds-transfer-initiation fti
# atx: audio/ATRAC-X
application/vnd.antix.game-component
application/vnd.apple.installer+xml dist distz pkg mpkg
# m3u: application/x-mpegurl for now
application/vnd.apple.mpegurl m3u8
application/vnd.aristanetworks.swi swi
application/vnd.audiograph aep
application/vnd.autopackage package
application/vnd.avistar+xml
application/vnd.blueice.multipass mpm
application/vnd.bluetooth.ep.oob ep
application/vnd.bmi bmi
application/vnd.businessobjects rep
application/vnd.cab-jscript
application/vnd.canon-cpdl
application/vnd.canon-lips
application/vnd.cendio.thinlinc.clientconf tlclient
application/vnd.chemdraw+xml cdxml
application/vnd.chipnuts.karaoke-mmd mmd
application/vnd.cinderella cdy
application/vnd.cirpack.isdn-ext
application/vnd.claymore cla
application/vnd.cloanto.rp9 rp9
application/vnd.clonk.c4group c4g c4d c4f c4p c4u
application/vnd.cluetrust.cartomobile-config c11amc
application/vnd.cluetrust.cartomobile-config-pkg c11amz
# icc: application/vnd.iccprofile
application/vnd.commerce-battelle ica icf icd ic0 ic1 ic2 ic3 ic4 ic5 ic6 ic7 ic8
application/vnd.commonspace csp cst
application/vnd.contact.cmsg cdbcmsg
application/vnd.cosmocaller cmc
application/vnd.crick.clicker clkx
application/vnd.crick.clicker.keyboard clkk
application/vnd.crick.clicker.palette clkp
application/vnd.crick.clicker.template clkt
application/vnd.crick.clicker.wordbank clkw
application/vnd.criticaltools.wbs+xml wbs
application/vnd.ctc-posml pml
application/vnd.ctct.ws+xml
application/vnd.cups-pdf
application/vnd.cups-postscript
application/vnd.cups-ppd ppd
application/vnd.cups-raster
application/vnd.cups-raw
application/vnd.curl curl
application/vnd.cybank
application/vnd.data-vision.rdz rdz
application/vnd.dece.data uvf uvvf uvd uvvd
application/vnd.dece.ttml+xml uvt uvvt
application/vnd.dece.unspecified uvx uvvx
application/vnd.denovo.fcselayout-link fe_launch
application/vnd.dir-bi.plate-dl-nosuffix
application/vnd.dna dna
application/vnd.dolby.mobile.1
application/vnd.dolby.mobile.2
application/vnd.dpgraph dpg mwc dpgraph
application/vnd.dreamfactory dfac
application/vnd.dvb.ait ait
# class: application/octet-stream
application/vnd.dvb.dvbj
application/vnd.dvb.esgcontainer
application/vnd.dvb.ipdcdftnotifaccess
application/vnd.dvb.ipdcesgaccess
application/vnd.dvb.ipdcesgaccess2
application/vnd.dvb.ipdcesgpdd
application/vnd.dvb.ipdcroaming
application/vnd.dvb.iptv.alfec-base
application/vnd.dvb.iptv.alfec-enhancement
application/vnd.dvb.notif-aggregate-root+xml
application/vnd.dvb.notif-container+xml
application/vnd.dvb.notif-generic+xml
application/vnd.dvb.notif-ia-msglist+xml
application/vnd.dvb.notif-ia-registration-request+xml
application/vnd.dvb.notif-ia-registration-response+xml
application/vnd.dvb.notif-init+xml
# pfr: application/font-tdpfr
application/vnd.dvb.pfr
application/vnd.dvb.service svc
# dxr: application/x-director
application/vnd.dxr
application/vnd.dynageo geo
application/vnd.easykaraoke.cdgdownload
application/vnd.ecdis-update
application/vnd.ecowin.chart mag
application/vnd.ecowin.filerequest
application/vnd.ecowin.fileupdate
application/vnd.ecowin.series
application/vnd.ecowin.seriesrequest
application/vnd.ecowin.seriesupdate
application/vnd.enliven nml
application/vnd.epson.esf esf
application/vnd.epson.msf msf
application/vnd.epson.quickanime qam
application/vnd.epson.salt slt
application/vnd.epson.ssf ssf
application/vnd.ericsson.quickcall qcall qca
application/vnd.eszigno3+xml es3 et3
application/vnd.etsi.aoc+xml
application/vnd.etsi.cug+xml
application/vnd.etsi.iptvcommand+xml
application/vnd.etsi.iptvdiscovery+xml
application/vnd.etsi.iptvprofile+xml
application/vnd.etsi.iptvsad-bc+xml
application/vnd.etsi.iptvsad-cod+xml
application/vnd.etsi.iptvsad-npvr+xml
application/vnd.etsi.iptvservice+xml
application/vnd.etsi.iptvsync+xml
application/vnd.etsi.iptvueprofile+xml
application/vnd.etsi.mcid+xml
application/vnd.etsi.overload-control-policy-dataset+xml
application/vnd.etsi.sci+xml
application/vnd.etsi.simservs+xml
application/vnd.etsi.tsl.der
application/vnd.etsi.tsl+xml
application/vnd.eudora.data
application/vnd.ezpix-album ez2
application/vnd.ezpix-package ez3
application/vnd.f-secure.mobile
application/vnd.fdf fdf
application/vnd.fdsn.mseed msd mseed
application/vnd.fdsn.seed seed dataless
application/vnd.ffsns
# all extensions: application/vnd.hbci
application/vnd.fints
application/vnd.FloGraphIt gph
application/vnd.fluxtime.clip ftc
application/vnd.font-fontforge-sfd sfd
application/vnd.framemaker fm
application/vnd.frogans.fnc fnc
application/vnd.frogans.ltf ltf
application/vnd.fsc.weblaunch fsc
application/vnd.fujitsu.oasys oas
application/vnd.fujitsu.oasys2 oa2
application/vnd.fujitsu.oasys3 oa3
application/vnd.fujitsu.oasysgp fg5
application/vnd.fujitsu.oasysprs bh2
application/vnd.fujixerox.ART-EX
application/vnd.fujixerox.ART4
application/vnd.fujixerox.ddd ddd
application/vnd.fujixerox.docuworks xdw
application/vnd.fujixerox.docuworks.binder xbd
application/vnd.fujixerox.HBPL
application/vnd.fut-misnet
application/vnd.fuzzysheet fzs
application/vnd.genomatix.tuxedo txd
application/vnd.geocube+xml g3 g³
application/vnd.geogebra.file ggb
application/vnd.geogebra.tool ggt
application/vnd.geometry-explorer gex gre
application/vnd.geonext gxt
application/vnd.geoplan g2w
application/vnd.geospace g3w
application/vnd.globalplatform.card-content-mgt
application/vnd.globalplatform.card-content-mgt-response
# application/vnd.gmx deprecated 2009-03-04
application/vnd.google-earth.kml+xml kml
application/vnd.google-earth.kmz kmz
application/vnd.grafeq gqf gqs
application/vnd.gridmp
application/vnd.groove-account gac
application/vnd.groove-help ghf
application/vnd.groove-identity-message gim
application/vnd.groove-injector grv
application/vnd.groove-tool-message gtm
application/vnd.groove-tool-template tpl
application/vnd.groove-vcard vcg
application/vnd.HandHeld-Entertainment+xml zmm
application/vnd.hbci hbci hbc kom upa pkd bpd
# rep: application/vnd.businessobjects
application/vnd.hcl-bireports
application/vnd.hhe.lesson-player les
application/vnd.hp-HPGL hpgl
application/vnd.hp-hpid hpi hpid
application/vnd.hp-hps hps
application/vnd.hp-jlyt jlt
application/vnd.hp-PCL pcl
application/vnd.hp-PCLXL
application/vnd.httphone
application/vnd.hydrostatix.sof-data sfd-hdstx
application/vnd.hzn-3d-crossword x3d
application/vnd.ibm.afplinedata
application/vnd.ibm.electronic-media emm
application/vnd.ibm.MiniPay mpy
application/vnd.ibm.modcap list3820 listafp afp pseg3820
application/vnd.ibm.rights-management irm
application/vnd.ibm.secure-container sc
application/vnd.iccprofile icc icm
application/vnd.igloader igl
application/vnd.immervision-ivp ivp
application/vnd.immervision-ivu ivu
application/vnd.informedcontrol.rms+xml
# application/vnd.informix-visionary obsoleted by application/vnd.visionary
application/vnd.infotech.project
application/vnd.infotech.project+xml
application/vnd.insors.igm igm
application/vnd.intercon.formnet xpw xpx
application/vnd.intergeo i2g
application/vnd.intertrust.digibox
application/vnd.intertrust.nncp
application/vnd.intu.qbo qbo
application/vnd.intu.qfx qfx
application/vnd.iptc.g2.conceptitem+xml
application/vnd.iptc.g2.knowledgeitem+xml
application/vnd.iptc.g2.newsitem+xml
application/vnd.iptc.g2.packageitem+xml
application/vnd.ipunplugged.rcprofile rcprofile
application/vnd.irepository.package+xml irp
application/vnd.is-xpr xpr
application/vnd.isac.fcs fcs
application/vnd.jam jam
application/vnd.japannet-directory-service
application/vnd.japannet-jpnstore-wakeup
application/vnd.japannet-payment-wakeup
application/vnd.japannet-registration
application/vnd.japannet-registration-wakeup
application/vnd.japannet-setstore-wakeup
application/vnd.japannet-verification
application/vnd.japannet-verification-wakeup
application/vnd.jcp.javame.midlet-rms rms
application/vnd.jisp jisp
application/vnd.joost.joda-archive joda
application/vnd.kahootz ktz ktr
application/vnd.kde.karbon karbon
application/vnd.kde.kchart chrt
application/vnd.kde.kformula kfo
application/vnd.kde.kivio flw
application/vnd.kde.kontour kon
application/vnd.kde.kpresenter kpr kpt
application/vnd.kde.kspread ksp
application/vnd.kde.kword kwd kwt
application/vnd.kenameaapp htke
application/vnd.kidspiration kia
application/vnd.Kinar kne knp sdf
application/vnd.koan skp skd skm skt
application/vnd.kodak-descriptor sse
application/vnd.las.las+xml lasxml
application/vnd.liberty-request+xml
application/vnd.llamagraphics.life-balance.desktop lbd
application/vnd.llamagraphics.life-balance.exchange+xml lbe
application/vnd.lotus-1-2-3 123 wk4 wk3 wk1
application/vnd.lotus-approach apr vew
application/vnd.lotus-freelance prz pre
application/vnd.lotus-notes nsf ntf ndl ns4 ns3 ns2 nsh nsg
application/vnd.lotus-organizer or3 or2 org
application/vnd.lotus-screencam scm
application/vnd.lotus-wordpro lwp sam
application/vnd.macports.portpkg portpkg
application/vnd.marlin.drm.actiontoken+xml
application/vnd.marlin.drm.conftoken+xml
application/vnd.marlin.drm.license+xml
application/vnd.marlin.drm.mdcf mdc
application/vnd.mcd mcd
application/vnd.medcalcdata mc1
application/vnd.mediastation.cdkey cdkey
application/vnd.meridian-slingshot
application/vnd.MFER mwf
application/vnd.mfmp mfm
application/vnd.micrografx.flo flo
application/vnd.micrografx.igx igx
application/vnd.mif mif
application/vnd.minisoft-hp3000-save
application/vnd.mitsubishi.misty-guard.trustweb
application/vnd.Mobius.DAF daf
application/vnd.Mobius.DIS dis
application/vnd.Mobius.MBK mbk
application/vnd.Mobius.MQY mqy
application/vnd.Mobius.MSL msl
application/vnd.Mobius.PLC plc
application/vnd.Mobius.TXF txf
application/vnd.mophun.application mpn
application/vnd.mophun.certificate mpc
application/vnd.motorola.flexsuite
application/vnd.motorola.flexsuite.adsi
application/vnd.motorola.flexsuite.fis
application/vnd.motorola.flexsuite.gotap
application/vnd.motorola.flexsuite.kmr
application/vnd.motorola.flexsuite.ttc
application/vnd.motorola.flexsuite.wem
application/vnd.motorola.iprm
application/vnd.mozilla.xul+xml xul
application/vnd.ms-artgalry cil
application/vnd.ms-asf asf
application/vnd.ms-cab-compressed cab
application/vnd.ms-excel xls
application/vnd.ms-excel.template.macroEnabled.12 xltm
application/vnd.ms-excel.addin.macroEnabled.12 xlam
application/vnd.ms-excel.sheet.binary.macroEnabled.12 xlsb
application/vnd.ms-excel.sheet.macroEnabled.12 xlsm
application/vnd.ms-fontobject eot
application/vnd.ms-htmlhelp chm
application/vnd.ms-ims ims
application/vnd.ms-lrm lrm
application/vnd.ms-office.activeX+xml
application/vnd.ms-officetheme thmx
application/vnd.ms-playready.initiator+xml
application/vnd.ms-powerpoint ppt
application/vnd.ms-powerpoint.addin.macroEnabled.12 ppam
application/vnd.ms-powerpoint.presentation.macroEnabled.12 pptm
application/vnd.ms-powerpoint.slide.macroEnabled.12 sldm
application/vnd.ms-powerpoint.slideshow.macroEnabled.12 ppsm
application/vnd.ms-powerpoint.template.macroEnabled.12 potm
application/vnd.ms-project mpp
application/vnd.ms-tnef tnef tnf
application/vnd.ms-wmdrm.lic-chlg-req
application/vnd.ms-wmdrm.lic-resp
application/vnd.ms-wmdrm.meter-chlg-req
application/vnd.ms-wmdrm.meter-resp
application/vnd.ms-word.document.macroEnabled.12 docm
application/vnd.ms-word.template.macroEnabled.12 dotm
application/vnd.ms-works wcm wdb wks wps
application/vnd.ms-wpl wpl
application/vnd.ms-xpsdocument xps
application/vnd.mseq mseq
application/vnd.msign
application/vnd.multiad.creator crtr
application/vnd.multiad.creator.cif cif
application/vnd.music-niff
application/vnd.musician mus
application/vnd.muvee.style msty
application/vnd.ncd.control
application/vnd.ncd.reference
application/vnd.nervana entity request bkm kcm
application/vnd.netfpx
application/vnd.neurolanguage.nlu nlu
application/vnd.noblenet-directory nnd
application/vnd.noblenet-sealer nns
application/vnd.noblenet-web nnw
application/vnd.nokia.catalogs
application/vnd.nokia.conml+wbxml
application/vnd.nokia.conml+xml
application/vnd.nokia.iptv.config+xml
application/vnd.nokia.iSDS-radio-presets
application/vnd.nokia.landmark+wbxml
application/vnd.nokia.landmark+xml
application/vnd.nokia.landmarkcollection+xml
application/vnd.nokia.n-gage.ac+xml ac
application/vnd.nokia.n-gage.data ngdat
application/vnd.nokia.n-gage.symbian.install n-gage
application/vnd.nokia.ncd
application/vnd.nokia.pcd+wbxml
application/vnd.nokia.pcd+xml
application/vnd.nokia.radio-preset rpst
application/vnd.nokia.radio-presets rpss
application/vnd.novadigm.EDM edm
application/vnd.novadigm.EDX edx
application/vnd.novadigm.EXT ext
application/vnd.ntt-local.file-transfer
application/vnd.ntt-local.sip-ta_remote
application/vnd.ntt-local.sip-ta_tcp_stream
application/vnd.oasis.opendocument.chart odc
application/vnd.oasis.opendocument.chart-template otc
application/vnd.oasis.opendocument.database odb
application/vnd.oasis.opendocument.formula odf
application/vnd.oasis.opendocument.formula-template otf
application/vnd.oasis.opendocument.graphics odg
application/vnd.oasis.opendocument.graphics-template otg
application/vnd.oasis.opendocument.image odi
application/vnd.oasis.opendocument.image-template oti
application/vnd.oasis.opendocument.presentation odp
application/vnd.oasis.opendocument.presentation-template otp
application/vnd.oasis.opendocument.spreadsheet ods
application/vnd.oasis.opendocument.spreadsheet-template ots
application/vnd.oasis.opendocument.text odt
application/vnd.oasis.opendocument.text-master odm
application/vnd.oasis.opendocument.text-template ott
application/vnd.oasis.opendocument.text-web oth
application/vnd.obn
application/vnd.oipf.contentaccessdownload+xml
application/vnd.oipf.contentaccessstreaming+xml
application/vnd.oipf.cspg-hexbinary
application/vnd.oipf.dae.svg+xml
application/vnd.oipf.dae.xhtml+xml
application/vnd.oipf.mippvcontrolmessage+xml
application/vnd.oipf.pae.gem
application/vnd.oipf.spdiscovery+xml
application/vnd.oipf.spdlist+xml
application/vnd.oipf.ueprofile+xml
application/vnd.olpc-sugar xo
application/vnd.oma.bcast.associated-procedure-parameter+xml
application/vnd.oma.bcast.drm-trigger+xml
application/vnd.oma.bcast.imd+xml
application/vnd.oma.bcast.ltkm
application/vnd.oma.bcast.notification+xml
application/vnd.oma.bcast.provisioningtrigger
application/vnd.oma.bcast.sgboot
application/vnd.oma.bcast.sgdd+xml
application/vnd.oma.bcast.sgdu
application/vnd.oma.bcast.simple-symbol-container
application/vnd.oma.bcast.smartcard-trigger+xml
application/vnd.oma.bcast.sprov+xml
application/vnd.oma.bcast.stkm
application/vnd.oma.cab-address-book+xml
application/vnd.oma.cab-feature-handler+xml
application/vnd.oma.cab-pcc+xml
application/vnd.oma.cab-user-prefs+xml
application/vnd.oma.dcd
application/vnd.oma.dcdc
application/vnd.oma.dd2+xml dd2
application/vnd.oma.drm.risd+xml
application/vnd.oma.group-usage-list+xml
application/vnd.oma.poc.detailed-progress-report+xml
application/vnd.oma.poc.final-report+xml
application/vnd.oma.poc.groups+xml
application/vnd.oma.poc.invocation-descriptor+xml
application/vnd.oma.poc.optimized-progress-report+xml
application/vnd.oma.push
application/vnd.oma.scidm.messages+xml
application/vnd.oma.xcap-directory+xml
application/vnd.oma-scws-config
application/vnd.oma-scws-http-request
application/vnd.oma-scws-http-response
application/vnd.omads-email+xml
application/vnd.omads-file+xml
application/vnd.omads-folder+xml
application/vnd.omaloc-supl-init
application/vnd.openofficeorg.extension oxt
application/vnd.openxmlformats-officedocument.custom-properties+xml
application/vnd.openxmlformats-officedocument.customXmlProperties+xml
application/vnd.openxmlformats-officedocument.drawing+xml
application/vnd.openxmlformats-officedocument.drawingml.chart+xml
application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml
application/vnd.openxmlformats-officedocument.drawingml.diagramColors+xml
application/vnd.openxmlformats-officedocument.drawingml.diagramData+xml
application/vnd.openxmlformats-officedocument.drawingml.diagramLayout+xml
application/vnd.openxmlformats-officedocument.drawingml.diagramStyle+xml
application/vnd.openxmlformats-officedocument.extended-properties+xml
application/vnd.openxmlformats-officedocument.presentationml.commentAuthors+xml
application/vnd.openxmlformats-officedocument.presentationml.comments+xml
application/vnd.openxmlformats-officedocument.presentationml.handoutMaster+xml
application/vnd.openxmlformats-officedocument.presentationml.notesMaster+xml
application/vnd.openxmlformats-officedocument.presentationml.notesSlide+xml
application/vnd.openxmlformats-officedocument.presentationml.presProps+xml
application/vnd.openxmlformats-officedocument.presentationml.presentation pptx
application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml
application/vnd.openxmlformats-officedocument.presentationml.slide sldx
application/vnd.openxmlformats-officedocument.presentationml.slide+xml
application/vnd.openxmlformats-officedocument.presentationml.slideLayout+xml
application/vnd.openxmlformats-officedocument.presentationml.slideMaster+xml
application/vnd.openxmlformats-officedocument.presentationml.slideUpdateInfo+xml
application/vnd.openxmlformats-officedocument.presentationml.slideshow ppsx
application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml
application/vnd.openxmlformats-officedocument.presentationml.tableStyles+xml
application/vnd.openxmlformats-officedocument.presentationml.tags+xml
application/vnd.openxmlformats-officedocument.presentationml.template potx
application/vnd.openxmlformats-officedocument.presentationml.template.main+xml
application/vnd.openxmlformats-officedocument.presentationml.viewProps+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheDefinition+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.pivotCacheRecords+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTable+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.revisionHeaders+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.revisionLog+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet xlsx
application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.sheetMetadata+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.tableSingleCells+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.template xltx
application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.userNames+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.volatileDependencies+xml
application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml
application/vnd.openxmlformats-officedocument.theme+xml
application/vnd.openxmlformats-officedocument.themeOverride+xml
application/vnd.openxmlformats-officedocument.vmlDrawing
application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.document docx
application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.template dotx
application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml
application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml
application/vnd.openxmlformats-package.core-properties+xml
application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml
application/vnd.openxmlformats-package.relationships+xml
application/vnd.osa.netdeploy ndc
application/vnd.osgeo.mapguide.package mgp
# jar: application/x-java-archive
application/vnd.osgi.bundle
application/vnd.osgi.dp dp
application/vnd.otps.ct-kip+xml
application/vnd.palm prc pdb pqa oprc
application/vnd.paos+xml
application/vnd.pawaafile paw
application/vnd.pg.format str
application/vnd.pg.osasli ei6
application/vnd.piaccess.application-license pil
application/vnd.picsel efif
application/vnd.pmi.widget wg
application/vnd.poc.group-advertisement+xml
application/vnd.pocketlearn plf
application/vnd.powerbuilder6 pbd
application/vnd.powerbuilder6-s
application/vnd.powerbuilder7
application/vnd.powerbuilder7-s
application/vnd.powerbuilder75
application/vnd.powerbuilder75-s
application/vnd.preminet preminet
application/vnd.previewsystems.box box vbox
application/vnd.proteus.magazine mgz
application/vnd.publishare-delta-tree qps
# pti: image/prs.pti
application/vnd.pvi.ptid1 ptid
application/vnd.pwg-multiplexed
application/vnd.pwg-xhtml-print+xml
application/vnd.qualcomm.brew-app-res bar
application/vnd.Quark.QuarkXPress qxd qxt qwd qwt qxl qxb
application/vnd.quobject-quoxdocument quox quiz
application/vnd.radisys.moml+xml
application/vnd.radisys.msml-audit-conf+xml
application/vnd.radisys.msml-audit-conn+xml
application/vnd.radisys.msml-audit-dialog+xml
application/vnd.radisys.msml-audit-stream+xml
application/vnd.radisys.msml-audit+xml
application/vnd.radisys.msml-conf+xml
application/vnd.radisys.msml-dialog-base+xml
application/vnd.radisys.msml-dialog-fax-detect+xml
application/vnd.radisys.msml-dialog-fax-sendrecv+xml
application/vnd.radisys.msml-dialog-group+xml
application/vnd.radisys.msml-dialog-speech+xml
application/vnd.radisys.msml-dialog-transform+xml
application/vnd.radisys.msml-dialog+xml
application/vnd.radisys.msml+xml
application/vnd.rainstor.data tree
application/vnd.rapid
application/vnd.realvnc.bed bed
application/vnd.recordare.musicxml mxl
application/vnd.recordare.musicxml+xml
application/vnd.RenLearn.rlprint
application/vnd.rig.cryptonote cryptonote
application/vnd.route66.link66+xml link66
application/vnd.ruckus.download
application/vnd.s3sms
application/vnd.sailingtracker.track st
application/vnd.sbm.cid
application/vnd.sbm.mid2
application/vnd.scribus scd sla slaz
application/vnd.sealed.3df s3df
application/vnd.sealed.csf scsf
application/vnd.sealed.doc sdoc sdo s1w
application/vnd.sealed.eml seml sem
application/vnd.sealed.mht smht smh
application/vnd.sealed.net
# spp: application/scvp-vp-response
application/vnd.sealed.ppt sppt s1p
application/vnd.sealed.tiff stif
application/vnd.sealed.xls sxls sxl s1e
# stm: audio/x-stm
application/vnd.sealedmedia.softseal.html stml s1h
application/vnd.sealedmedia.softseal.pdf spdf spd s1a
application/vnd.seemail see
application/vnd.sema sema
application/vnd.semd semd
application/vnd.semf semf
application/vnd.shana.informed.formdata ifm
application/vnd.shana.informed.formtemplate itp
application/vnd.shana.informed.interchange iif
application/vnd.shana.informed.package ipk
application/vnd.SimTech-MindMapper twd twds
application/vnd.smaf mmf
application/vnd.smart.notebook notebook
application/vnd.smart.teacher teacher
application/vnd.software602.filler.form+xml fo
application/vnd.software602.filler.form-xml-zip zfo
application/vnd.solent.sdkm+xml sdkm sdkd
application/vnd.spotfire.dxp dxp
application/vnd.spotfire.sfs sfs
application/vnd.sss-cod
application/vnd.sss-dtf
application/vnd.sss-ntf
application/vnd.stepmania.stepchart sm
application/vnd.street-stream
application/vnd.sun.wadl+xml wadl
application/vnd.sus-calendar sus susp
application/vnd.svd
application/vnd.swiftview-ics
application/vnd.syncml.dm.notification
application/vnd.syncml.ds.notification
application/vnd.syncml.dm+wbxml bdm
application/vnd.syncml.dm+xml xdm
application/vnd.syncml+xml xsm
application/vnd.tao.intent-module-archive tao
application/vnd.tmobile-livetv tmo
application/vnd.trid.tpt tpt
application/vnd.triscape.mxs mxs
application/vnd.trueapp tra
application/vnd.truedoc
# cab: application/vnd.ms-cab-compressed
application/vnd.ubisoft.webplayer
application/vnd.ufdl ufdl ufd frm
application/vnd.uiq.theme utz
application/vnd.umajin umj
application/vnd.unity unityweb
application/vnd.uoml+xml uoml uo
application/vnd.uplanet.alert
application/vnd.uplanet.alert-wbxml
application/vnd.uplanet.bearer-choice
application/vnd.uplanet.bearer-choice-wbxml
application/vnd.uplanet.cacheop
application/vnd.uplanet.cacheop-wbxml
application/vnd.uplanet.channel
application/vnd.uplanet.channel-wbxml
application/vnd.uplanet.list
application/vnd.uplanet.list-wbxml
application/vnd.uplanet.listcmd
application/vnd.uplanet.listcmd-wbxml
application/vnd.uplanet.signal
application/vnd.vcx vcx
# sxi: application/vnd.sun.xml.impress
application/vnd.vd-study mxi study-inter model-inter
# mcd: application/vnd.mcd
application/vnd.vectorworks vwx
application/vnd.verimatrix.vcas
application/vnd.vidsoft.vidconference vsc
application/vnd.visio vsd vst vsw vss
application/vnd.visionary vis
# vsc: application/vnd.vidsoft.vidconference
application/vnd.vividence.scriptfile
application/vnd.vsf vsf
application/vnd.wap.sic sic
application/vnd.wap.slc slc
application/vnd.wap.wbxml wbxml
application/vnd.wap.wmlc wmlc
application/vnd.wap.wmlscriptc wmlsc
application/vnd.webturbo wtb
application/vnd.wfa.wsc wsc
application/vnd.wmc wmc
application/vnd.wmf.bootstrap
# nb: application/mathematica for now
application/vnd.wolfram.mathematica
application/vnd.wolfram.mathematica.package m
application/vnd.wolfram.player nbp
application/vnd.wordperfect wpd
application/vnd.wqd wqd
application/vnd.wrq-hp3000-labelled
application/vnd.wt.stf stf
application/vnd.wv.csp+xml
application/vnd.wv.csp+wbxml wv
application/vnd.wv.ssp+xml
application/vnd.xara xar
application/vnd.xfdl xfdl xfd
application/vnd.xfdl.webform
application/vnd.xmi+xml
application/vnd.xmpie.cpkg cpkg
application/vnd.xmpie.dpkg dpkg
# dpkg: application/vnd.xmpie.dpkg
application/vnd.xmpie.plan
application/vnd.xmpie.ppkg ppkg
application/vnd.xmpie.xlim xlim
application/vnd.yamaha.hv-dic hvd
application/vnd.yamaha.hv-script hvs
application/vnd.yamaha.hv-voice hvp
application/vnd.yamaha.openscoreformat osf
application/vnd.yamaha.openscoreformat.osfpvg+xml
application/vnd.yamaha.remote-setup
application/vnd.yamaha.smaf-audio saf
application/vnd.yamaha.smaf-phrase spf
application/vnd.yamaha.tunnel-udpencap
application/vnd.yellowriver-custom-menu cmp
application/vnd.zul zir zirz
application/vnd.zzazz.deck+xml zaz
application/voicexml+xml vxml
application/vq-rtcp-xr
application/watcherinfo+xml wif
application/whoispp-query
application/whoispp-response
application/widget wgt
application/wita
application/wordperfect5.1
application/wsdl+xml wsdl
application/wspolicy+xml wspolicy
application/x400-bp
application/xcap-att+xml xav
application/xcap-caps+xml xca
application/xcap-diff+xml xdf
application/xcap-el+xml xel
application/xcap-error+xml xer
application/xcap-ns+xml xns
application/xcon-conference-info-diff+xml
application/xcon-conference-info+xml
application/xenc+xml
application/xhtml+xml xhtml xhtm xht
# application/xhtml-voice+xml obsoleted by application/xv+xml
# xml, xsd, rng: text/xml
application/xml
# mod: audio/x-mod
application/xml-dtd dtd
# ent: text/xml-external-parsed-entity
application/xml-external-parsed-entity
application/xmpp+xml
application/xop+xml xop
application/xslt+xml xsl xslt
application/xv+xml mxml xhvml xvml xvm
application/yang yang
application/yin+xml yin
application/zip zip
audio/1d-interleaved-parityfec
audio/32kadpcm 726
# 3gp, 3gpp: video/3gpp
audio/3gpp
# 3g2, 3gpp2: video/3gpp2
audio/3gpp2
audio/ac3 ac3
audio/AMR amr
audio/AMR-WB awb
audio/amr-wb+
audio/asc acn
# aa3, omg: audio/ATRAC3
audio/ATRAC-ADVANCED-LOSSLESS aal
# aa3, omg: audio/ATRAC3
audio/ATRAC-X atx
audio/ATRAC3 at3 aa3 omg
audio/basic au snd
audio/BV16
audio/BV32
audio/clearmode
audio/CN
audio/DAT12
audio/dls dls
audio/dsr-es201108
audio/dsr-es202050
audio/dsr-es202211
audio/dsr-es202212
audio/DVI4
audio/eac3
audio/EVRC evc
# qcp: audio/qcelp
audio/EVRC-QCP
audio/EVRC0
audio/EVRC1
audio/EVRCB evb
audio/EVRCB0
audio/EVRCWB evw
audio/EVRCWB0
audio/EVRCWB1
audio/G719
audio/G722
audio/G7221
audio/G723
audio/G726-16
audio/G726-24
audio/G726-32
audio/G726-40
audio/G728
audio/G729
audio/G7291
audio/G729D
audio/G729E
audio/GSM
audio/GSM-EFR
audio/GSM-HR-08
audio/iLBC lbc
audio/ip-mr_v2.5
# wav: audio/wav
audio/L16 l16
audio/L20
audio/L24
audio/L8
audio/LPC
audio/mobile-xmf mxmf
# mp4, mpg4: video/mp4, see RFC 4337
audio/mp4
audio/MP4A-LATM
audio/MPA
audio/mpa-robust
audio/mpeg mp3 mpga mp1 mp2
audio/mpeg4-generic
audio/ogg oga ogg spx
audio/parityfec
audio/PCMA
audio/PCMA-WB
audio/PCMU
audio/PCMU-WB
audio/prs.sid sid psid
audio/qcelp qcp
audio/RED
audio/rtp-enc-aescm128
audio/rtp-midi
audio/rtx
audio/SMV smv
# qcp: audio/qcelp, see RFC 3625
audio/SMV-QCP
audio/SMV0
# mid: audio/midi
audio/sp-midi
audio/speex
audio/t140c
audio/t38
audio/telephone-event
audio/tone
audio/UEMCLIP
audio/ulpfec
audio/VDVI
audio/VMR-WB
audio/vnd.3gpp.iufp
audio/vnd.4SB
audio/vnd.audikoz koz
audio/vnd.CELP
audio/vnd.cisco.nse
audio/vnd.cmles.radio-events
audio/vnd.cns.anp1
audio/vnd.cns.inf1
audio/vnd.dece.audio uva uvva
audio/vnd.digital-winds eol
audio/vnd.dlna.adts
audio/vnd.dolby.heaac.1
audio/vnd.dolby.heaac.2
audio/vnd.dolby.mlp mlp
audio/vnd.dolby.mps
audio/vnd.dolby.pl2
audio/vnd.dolby.pl2x
audio/vnd.dolby.pl2z
audio/vnd.dolby.pulse.1
audio/vnd.dra
# wav: audio/wav, cpt: application/mac-compactpro
audio/vnd.dts dts
audio/vnd.dts.hd dtshd
audio/vnd.dvb.file dvb
audio/vnd.everad.plj plj
# rm: audio/x-pn-realaudio
audio/vnd.hns.audio
audio/vnd.lucent.voice lvp
audio/vnd.ms-playready.media.pya pya
# mxmf: audio/mobile-xmf
audio/vnd.nokia.mobile-xmf
audio/vnd.nortel.vbk vbk
audio/vnd.nuera.ecelp4800 ecelp4800
audio/vnd.nuera.ecelp7470 ecelp7470
audio/vnd.nuera.ecelp9600 ecelp9600
audio/vnd.octel.sbc
# audio/vnd.qcelp deprecated in favour of audio/qcelp
audio/vnd.rhetorex.32kadpcm
audio/vnd.rip rip
audio/vnd.sealedmedia.softseal.mpeg smp3 smp s1m
audio/vnd.vmx.cvsd
audio/vorbis
audio/vorbis-config
image/cgm
image/fits fits fit fts
image/g3fax
image/gif gif
image/ief ief
image/jp2 jp2 jpg2
image/jpeg jpg jpeg jpe jfif
image/jpm jpm jpgm
image/jpx jpx jpf
image/ktx ktx
image/naplps
image/png png
image/prs.btif btif btf
image/prs.pti pti
image/svg+xml svg svgz
image/t38 t38
image/tiff tiff tif
image/tiff-fx tfx
image/vnd.adobe.photoshop psd
image/vnd.cns.inf2
image/vnd.dece.graphic uvi uvvi uvg uvvg
image/vnd.djvu djvu djv
image/vnd.dvb.subtitle sub
image/vnd.dwg
image/vnd.dxf dxf
image/vnd.fastbidsheet fbs
image/vnd.fpx fpx
image/vnd.fst fst
image/vnd.fujixerox.edmics-mmr mmr
image/vnd.fujixerox.edmics-rlc rlc
image/vnd.globalgraphics.pgb pgb
image/vnd.microsoft.icon ico
image/vnd.mix
image/vnd.ms-modi mdi
image/vnd.net-fpx
image/vnd.radiance hdr rgbe xyze
image/vnd.sealed.png spng spn s1n
image/vnd.sealedmedia.softseal.gif sgif sgi s1g
image/vnd.sealedmedia.softseal.jpg sjpg sjp s1j
image/vnd.svf
image/vnd.wap.wbmp wbmp
image/vnd.xiff xif
message/CPIM
message/delivery-status
message/disposition-notification
message/external-body
message/feedback-report
message/global u8msg
message/global-delivery-status u8dsn
message/global-disposition-notification u8mdn
message/global-headers u8hdr
message/http
# cl: application/simple-filter+xml
message/imdn+xml
# message/news obsoleted by message/rfc822
message/partial
message/rfc822 eml mail art
message/s-http
message/sip
message/sipfrag
message/tracking-status
message/vnd.si.simp
model/iges igs iges
model/mesh msh mesh silo
model/vnd.collada+xml dae
model/vnd.dwf dwf
# 3dml, 3dm: text/vnd.in3d.3dml
model/vnd.flatland.3dml
model/vnd.gdl gdl gsm win dor lmp rsm msm ism
model/vnd.gs-gdl
model/vnd.gtw gtw
model/vnd.moml+xml moml
model/vnd.mts mts
model/vnd.parasolid.transmit.binary x_b xmt_bin
model/vnd.parasolid.transmit.text x_t xmt_txt
model/vnd.vtu vtu
model/vrml wrl vrml
multipart/alternative
multipart/appledouble
multipart/byteranges
multipart/digest
multipart/encrypted
multipart/form-data
multipart/header-set
multipart/mixed
multipart/parallel
multipart/related
multipart/report
multipart/signed
multipart/voice-message vpm
text/1d-interleaved-parityfec
text/calendar ics ifb
text/css css
text/csv csv
text/directory
text/dns soa zone
# text/ecmascript obsoleted by application/ecmascript
text/enriched
text/html html htm
# text/javascript obsoleted by application/javascript
text/n3 n3
text/parityfec
text/plain txt asc text pm el c h cc hh cxx hxx f90
text/prs.fallenstein.rst rst
text/prs.lines.tag tag dsc
text/RED
text/rfc822-headers
text/richtext rtx
# rtf: application/rtf
text/rtf
text/rtp-enc-aescm128
text/rtx
text/sgml sgml sgm
text/t140
text/tab-separated-values tsv
text/troff
text/turtle ttl
text/ulpfec
text/uri-list uris uri
text/vnd.abc abc
# curl: application/vnd.curl
text/vnd.curl
text/vnd.DMClientScript dms
text/vnd.esmertec.theme-descriptor jtd
text/vnd.fly fly
text/vnd.fmi.flexstor flx
text/vnd.graphviz gv dot
text/vnd.in3d.3dml 3dml 3dm
text/vnd.in3d.spot spot spo
text/vnd.IPTC.NewsML
text/vnd.IPTC.NITF
text/vnd.latex-z
text/vnd.motorola.reflex
text/vnd.ms-mediapackage mpf
text/vnd.net2phone.commcenter.command ccc
text/vnd.radisys.msml-basic-layout
text/vnd.si.uricatalogue uric
text/vnd.sun.j2me.app-descriptor jad
text/vnd.trolltech.linguist ts
text/vnd.wap.si si
text/vnd.wap.sl sl
text/vnd.wap.wml wml
text/vnd.wap.wmlscript wmls
text/xml xml xsd rng
text/xml-external-parsed-entity ent
video/1d-interleaved-parityfec
video/3gpp 3gp 3gpp
video/3gpp2 3g2 3gpp2
video/3gpp-tt
video/BMPEG
video/BT656
video/CelB
video/DV
video/H261
video/H263
video/H263-1998
video/H263-2000
video/H264
video/H264-RCDO
video/H264-SVC
video/JPEG
video/jpeg2000
video/mj2 mj2 mjp2
video/MP1S
video/MP2P
video/MP2T
video/mp4 mp4 mpg4
video/MP4V-ES
video/mpeg mpeg mpg mpe
video/mpeg4-generic
video/MPV
video/nv
video/ogg ogv
video/parityfec
video/pointer
video/quicktime mov qt
video/raw
video/rtp-enc-aescm128
video/rtx
video/SMPTE292M
video/ulpfec
video/vc1
video/vnd.CCTV
video/vnd.dece.hd uvh uvvh
video/vnd.dece.mobile uvm uvvm
video/vnd.dece.mp4 uvu uvvu
video/vnd.dece.pd uvp uvvp
video/vnd.dece.sd uvs uvvs
video/vnd.dece.video uvv uvvv
video/vnd.directv.mpeg
video/vnd.directv.mpeg-tts
video/vnd.dlna.mpeg-tts
video/vnd.fvt fvt
# rm: audio/x-pn-realaudio
video/vnd.hns.video
video/vnd.iptvforum.1dparityfec-1010
video/vnd.iptvforum.1dparityfec-2005
video/vnd.iptvforum.2dparityfec-1010
video/vnd.iptvforum.2dparityfec-2005
video/vnd.iptvforum.ttsavc
video/vnd.iptvforum.ttsmpeg2
video/vnd.motorola.video
video/vnd.motorola.videop
video/vnd.mpegurl mxu m4u
video/vnd.ms-playready.media.pyv pyv
video/vnd.nokia.interleaved-multimedia nim
video/vnd.nokia.videovoip
# mp4: video/mp4
video/vnd.objectvideo
video/vnd.sealed.mpeg1 smpg s11
# smpg: video/vnd.sealed.mpeg1
video/vnd.sealed.mpeg4 s14
video/vnd.sealed.swf sswf ssw
video/vnd.sealedmedia.softseal.mov smov smo s1q
# uvu, uvvu: video/vnd.dece.mp4
video/vnd.uvvu.mp4
video/vnd.vivo
# Non-IANA types
application/epub+zip epub
application/mac-compactpro cpt
application/metalink+xml metalink
application/rss+xml rss
application/vnd.android.package-archive apk
application/vnd.oma.dd+xml dd
application/vnd.oma.drm.content dcf
# odf: application/vnd.oasis.opendocument.formula
application/vnd.oma.drm.dcf o4a o4v
application/vnd.oma.drm.message dm
application/vnd.oma.drm.rights+wbxml drc
application/vnd.oma.drm.rights+xml dr
application/vnd.sun.xml.calc sxc
application/vnd.sun.xml.calc.template stc
application/vnd.sun.xml.draw sxd
application/vnd.sun.xml.draw.template std
application/vnd.sun.xml.impress sxi
application/vnd.sun.xml.impress.template sti
application/vnd.sun.xml.math sxm
application/vnd.sun.xml.writer sxw
application/vnd.sun.xml.writer.global sxg
application/vnd.sun.xml.writer.template stw
application/vnd.symbian.install sis
application/vnd.wap.mms-message mms
application/x-annodex anx
application/x-bcpio bcpio
application/x-bittorrent torrent
application/x-bzip2 bz2
application/x-cdlink vcd
application/x-chess-pgn pgn
application/x-cpio cpio
application/x-csh csh
application/x-director dcr dir dxr
application/x-dvi dvi
application/x-futuresplash spl
application/x-gtar gtar
application/x-gzip gz tgz
application/x-hdf hdf
application/x-java-archive jar
application/x-java-jnlp-file jnlp
application/x-java-pack200 pack
application/x-killustrator kil
application/x-latex latex
application/x-netcdf nc cdf
application/x-perl pl
application/x-rpm rpm
application/x-sh sh
application/x-shar shar
application/x-shockwave-flash swf
application/x-stuffit sit
application/x-sv4cpio sv4cpio
application/x-sv4crc sv4crc
application/x-tar tar
application/x-tcl tcl
application/x-tex tex
application/x-texinfo texinfo texi
application/x-troff t tr roff
application/x-troff-man man 1 2 3 4 5 6 7 8
application/x-troff-me me
application/x-troff-ms ms
application/x-ustar ustar
application/x-wais-source src
application/x-xpinstall xpi
application/x-xspf+xml xspf
application/x-xz xz
audio/midi mid midi kar
audio/x-aiff aif aiff aifc
audio/x-annodex axa
audio/x-flac flac
audio/x-mod mod ult uni m15 mtm 669 med
audio/x-mpegurl m3u
audio/x-ms-wax wax
audio/x-ms-wma wma
audio/x-pn-realaudio ram rm
audio/x-realaudio ra
audio/x-s3m s3m
audio/x-stm stm
audio/x-wav wav
chemical/x-xyz xyz
image/bmp bmp
image/x-cmu-raster ras
image/x-portable-anymap pnm
image/x-portable-bitmap pbm
image/x-portable-graymap pgm
image/x-portable-pixmap ppm
image/x-rgb rgb
image/x-targa tga
image/x-xbitmap xbm
image/x-xpixmap xpm
image/x-xwindowdump xwd
text/cache-manifest manifest
text/html-sandboxed sandboxed
text/x-pod pod
text/x-setext etx
text/x-vcard vcf
video/webm webm
video/x-annodex axv
video/x-flv flv
video/x-javafx fxm
video/x-ms-asf asx
video/x-ms-wm wm
video/x-ms-wmv wmv
video/x-ms-wmx wmx
video/x-ms-wvx wvx
video/x-msvideo avi
video/x-sgi-movie movie
x-conference/x-cooltalk ice
x-epoc/x-sisx-app sisx
| 48,509 | 1,446 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/tempfile.py | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
The default path names are returned as str. If you supply bytes as
input, all return values will be in bytes. Ex:
>>> tempfile.mkstemp()
(4, '/tmp/tmptpu9nin8')
>>> tempfile.mkdtemp(suffix=b'')
b'/tmp/tmppbi8f0hy'
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir",
"gettempprefixb", "gettempdirb",
]
# Imports.
import cosmo
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
import shutil as _shutil
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# This variable _was_ unused for legacy reasons, see issue 10354.
# But as of 3.5 we actually use it at runtime so changing it would
# have a possibly desirable side effect... But we do not want to support
# that as an API. It is undocumented on purpose. Do not depend on this.
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
def _infer_return_type(*args):
"""Look at the type of all args and divine their implied return type."""
return_type = None
for arg in args:
if arg is None:
continue
if isinstance(arg, bytes):
if return_type is str:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = bytes
else:
if return_type is bytes:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = str
if return_type is None:
return str # tempfile APIs return a str by default.
return return_type
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = template
else:
prefix = _os.fsencode(template)
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = gettempdirb()
return prefix, suffix, dir, output_type
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is eight characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt' or cosmo.kernel == 'nt':
dirlist.extend([ _os.path.expanduser(r'~\AppData\Local\Temp'),
_os.path.expandvars(r'%SYSTEMROOT%\Temp'),
r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if ((_os.name == 'nt' or cosmo.kernel == 'nt') and
_os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
continue
break # no point trying more names in this directory
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags, output_type):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if ((_os.name == 'nt' or cosmo.kernel == 'nt') and
_os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
continue
else:
raise
return (fd, _os.path.abspath(file))
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""The default prefix for temporary directories."""
return template
def gettempprefixb():
"""The default prefix for temporary directories as bytes."""
return _os.fsencode(gettempprefix())
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def gettempdirb():
"""A bytes version of tempfile.gettempdir()."""
return _os.fsencode(gettempdir())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is not None, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is not None, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is not None, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
If any of 'suffix', 'prefix' and 'dir' are not None, they must be the
same type. If they are bytes, the returned name will be bytes; str
otherwise.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if ((_os.name == 'nt' or cosmo.kernel == 'nt') and
_os.path.isdir(dir) and _os.access(dir, _os.W_OK)):
continue
else:
raise
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are similar to mkstemp, except that the 'text' argument is
not accepted, and suffix=None, prefix=None and bytes file names are not
supported.
THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may
refer to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as its 'name' attribute. The file will be automatically
deleted when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if delete and hasattr(_os, 'O_TEMPORARY'):
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
_os.unlink(name)
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin' or cosmo.kernel == 'nt':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
# Is the O_TMPFILE flag available and does it work?
# The flag is set to False if os.open(dir, os.O_TMPFILE) raises an
# IsADirectoryError exception
_O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
global _O_TMPFILE_WORKS
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _O_TMPFILE_WORKS:
try:
flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
fd = _os.open(dir, flags2, 0o600)
except IsADirectoryError:
# Linux kernel older than 3.11 ignores the O_TMPFILE flag:
# O_TMPFILE is read as O_DIRECTORY. Trying to open a directory
# with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a
# directory cannot be open to write. Set flag to False to not
# try again.
_O_TMPFILE_WORKS = False
except OSError:
# The filesystem of the directory does not support O_TMPFILE.
# For example, OSError(95, 'Operation not supported').
#
# On Linux kernel older than 3.11, trying to open a regular
# file (or a symbolic link to a regular file) with O_TMPFILE
# fails with NotADirectoryError, because O_TMPFILE is read as
# O_DIRECTORY.
pass
else:
try:
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
# Fallback to _mkstemp_inner().
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix=None, prefix=None, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# get double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix=None, prefix=None, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message):
_shutil.rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
_shutil.rmtree(self.name)
| 26,926 | 815 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/locale.py | """Locale support module.
The module provides low-level access to the C lib's locale APIs and adds high
level number formatting APIs as well as a locale aliasing engine to complement
these.
The aliasing engine includes support for many commonly used locale names and
maps them to values suitable for passing to the C lib's setlocale() function. It
also includes default encodings for all supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import collections
from builtins import str as _builtin_str
import functools
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
def _strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return (a > b) - (a < b)
def _strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
# These may or may not exist in _locale, so be sure to set them.
if 'strxfrm' not in globals():
strxfrm = _strxfrm
if 'strcoll' not in globals():
strcoll = _strcoll
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
if last_interval is None:
raise ValueError("invalid grouping")
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, collections.Mapping):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to string, taking the locale into account."""
return format("%.12g", val)
def delocalize(string):
"Parses a string as a normalized number according to the locale settings."
conv = localeconv()
#First, get rid of the grouping
ts = conv['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = conv['decimal_point']
if dd:
string = string.replace(dd, '.')
return string
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
return func(delocalize(string))
def atoi(string):
"Converts a string to an integer according to the locale settings."
return int(delocalize(string))
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print(s1, "is", atoi(s1))
#standard formatting
s1 = str(3.14)
print(s1, "is", atof(s1))
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, [email protected]
# Various tweaks by Fredrik Lundh <[email protected]>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
def _replace_encoding(code, encoding):
if '.' in code:
langname = code[:code.index('.')]
else:
langname = code
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print('norm encoding: %r' % norm_encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(),
norm_encoding)
#print('aliased encoding: %r' % norm_encoding)
encoding = norm_encoding
norm_encoding = norm_encoding.lower()
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
else:
norm_encoding = norm_encoding.replace('_', '')
norm_encoding = norm_encoding.replace('-', '')
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
#print('found encoding %r' % encoding)
return langname + '.' + encoding
def _append_modifier(code, modifier):
if modifier == 'euro':
if '.' not in code:
return code + '.ISO8859-15'
_, _, encoding = code.partition('.')
if encoding in ('ISO8859-15', 'UTF-8'):
return code
if encoding == 'ISO8859-1':
return _replace_encoding(code, 'ISO8859-15')
return code + '@' + modifier
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding and modifier
code = localename.lower()
if ':' in code:
# ':' is sometimes used as encoding delimiter.
code = code.replace(':', '.')
if '@' in code:
code, modifier = code.split('@', 1)
else:
modifier = ''
if '.' in code:
langname, encoding = code.split('.')[:2]
else:
langname = code
encoding = ''
# First lookup: fullname (possibly with encoding and modifier)
lang_enc = langname
if encoding:
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lang_enc += '.' + norm_encoding
lookup_name = lang_enc
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print('first lookup failed')
if modifier:
# Second try: fullname without modifier (possibly with encoding)
code = locale_alias.get(lang_enc, None)
if code is not None:
#print('lookup without modifier succeeded')
if '@' not in code:
return _append_modifier(code, modifier)
if code.split('@', 1)[1].lower() == modifier:
return code
#print('second lookup failed')
if encoding:
# Third try: langname (without encoding, possibly with modifier)
lookup_name = langname
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
#print('lookup without encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding)
code, modifier = code.split('@', 1)
return _replace_encoding(code, encoding) + '@' + modifier
if modifier:
# Fourth try: langname (without encoding and modifier)
code = locale_alias.get(langname, None)
if code is not None:
#print('lookup without modifier and encoding succeeded')
if '@' not in code:
code = _replace_encoding(code, encoding)
return _append_modifier(code, modifier)
code, defmod = code.split('@', 1)
if defmod.lower() == modifier:
return _replace_encoding(code, encoding) + '@' + defmod
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError('unknown locale: %s' % localename)
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
try:
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
except (TypeError, ValueError):
raise TypeError('Locale must be None, a string, or an iterable of two strings -- language code, encoding.')
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError('category LC_ALL is not supported')
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, an iterable of two strings (language code and encoding),
or None.
Iterables are converted to strings using the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and not isinstance(locale, _builtin_str):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _bootlocale
return _bootlocale.getpreferredencoding(False)
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
res = getdefaultlocale()[1]
if res is None:
# LANG not set, default conservatively to ASCII
res = 'utf-8'
return res
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
import _bootlocale
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = _bootlocale.getpreferredencoding(False)
if do_setlocale:
setlocale(LC_CTYPE, oldloc)
return result
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
'koi8c': 'KOI8-C',
'microsoftcp1251': 'CP1251',
'microsoftcp1255': 'CP1255',
'microsoftcp1256': 'CP1256',
'88591': 'ISO8859-1',
'88592': 'ISO8859-2',
'88595': 'ISO8859-5',
'885915': 'ISO8859-15',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_t': 'KOI8-T',
'koi8_u': 'KOI8-U',
'kz1048': 'RK1048',
'cp1251': 'CP1251',
'cp1255': 'CP1255',
'cp1256': 'CP1256',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
for k, v in sorted(locale_encoding_alias.items()):
k = k.replace('_', '')
locale_encoding_alias.setdefault(k, v)
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
#
# AP 2010-04-12:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.6.5
# and older):
#
# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
# SS 2013-12-20:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 3.3.3
# and older):
#
# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'sd' -> '[email protected]' to 'sd_IN.UTF-8'
# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
#
# SS 2018-05-05:
# Updated alias mapping with glibc 2.27 supported locales.
#
# These are the differences compared to the old mapping (Python 3.6.5
# and older):
#
# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia'
# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154'
# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R'
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'a3_az': 'az_AZ.KOI8-C',
'a3_az.koic': 'az_AZ.KOI8-C',
'aa_dj': 'aa_DJ.ISO8859-1',
'aa_er': 'aa_ER.UTF-8',
'aa_et': 'aa_ET.UTF-8',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'agr_pe': 'agr_PE.UTF-8',
'ak_gh': 'ak_GH.UTF-8',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
'anp_in': 'anp_IN.UTF-8',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_ss': 'ar_SS.UTF-8',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'as_in': 'as_IN.UTF-8',
'ast_es': 'ast_ES.ISO8859-15',
'ayc_pe': 'ayc_PE.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'az_ir': 'az_IR.UTF-8',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
'be_by': 'be_BY.CP1251',
'be_by@latin': 'be_BY.UTF-8@latin',
'bem_zm': 'bem_ZM.UTF-8',
'ber_dz': 'ber_DZ.UTF-8',
'ber_ma': 'ber_MA.UTF-8',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bhb_in.utf8': 'bhb_IN.UTF-8',
'bho_in': 'bho_IN.UTF-8',
'bho_np': 'bho_NP.UTF-8',
'bi_vu': 'bi_VU.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'brx_in': 'brx_IN.UTF-8',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'byn_er': 'byn_ER.UTF-8',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c.utf8': 'en_US.UTF-8',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es@valencia': 'ca_ES.UTF-8@valencia',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_it': 'ca_IT.ISO8859-1',
'catalan': 'ca_ES.ISO8859-1',
'ce_ru': 'ce_RU.UTF-8',
'cextend': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'chr_us': 'chr_US.UTF-8',
'ckb_iq': 'ckb_IQ.UTF-8',
'cmn_tw': 'cmn_TW.UTF-8',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'csb_pl': 'csb_PL.UTF-8',
'cv_ru': 'cv_RU.UTF-8',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da_dk': 'da_DK.ISO8859-1',
'danish': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de_at': 'de_AT.ISO8859-1',
'de_be': 'de_BE.ISO8859-1',
'de_ch': 'de_CH.ISO8859-1',
'de_de': 'de_DE.ISO8859-1',
'de_it': 'de_IT.ISO8859-1',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'deutsch': 'de_DE.ISO8859-1',
'doi_in': 'doi_IN.UTF-8',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'dv_mv': 'dv_MV.UTF-8',
'dz_bt': 'dz_BT.UTF-8',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_cy': 'el_CY.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en_ag': 'en_AG.UTF-8',
'en_au': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_bw': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_dk': 'en_DK.ISO8859-1',
'en_dl.utf8': 'en_DL.UTF-8',
'en_gb': 'en_GB.ISO8859-1',
'en_hk': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_il': 'en_IL.UTF-8',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_sc.utf8': 'en_SC.UTF-8',
'en_sg': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_zm': 'en_ZM.UTF-8',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.utf8': 'en_ZS.UTF-8',
'eng_gb': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english.iso88591': 'en_US.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo.utf8': 'eo.UTF-8',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_us.utf8': 'eo_US.UTF-8',
'eo_xx': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cu': 'es_CU.UTF-8',
'es_do': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_gt': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pe': 'es_PE.ISO8859-1',
'es_pr': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_sv': 'es_SV.ISO8859-1',
'es_us': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_ve': 'es_VE.ISO8859-1',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_fr': 'eu_FR.ISO8859-1',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'ff_sn': 'ff_SN.UTF-8',
'fi': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fil_ph': 'fil_PH.UTF-8',
'finnish': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fr': 'fr_FR.ISO8859-1',
'fr_be': 'fr_BE.ISO8859-1',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_lu': 'fr_LU.ISO8859-1',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'fur_it': 'fur_IT.UTF-8',
'fy_de': 'fy_DE.UTF-8',
'fy_nl': 'fy_NL.UTF-8',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'ger_de': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'gez_er': 'gez_ER.UTF-8',
'gez_et': 'gez_ET.UTF-8',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'greek': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'ha_ng': 'ha_NG.UTF-8',
'hak_tw': 'hak_TW.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'hebrew': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hif_fj': 'hif_FJ.UTF-8',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hsb_de': 'hsb_DE.ISO8859-2',
'ht_ht': 'ht_HT.UTF-8',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'hy_am': 'hy_AM.UTF-8',
'hy_am.armscii8': 'hy_AM.ARMSCII_8',
'ia': 'ia.UTF-8',
'ia_fr': 'ia_FR.UTF-8',
'icelandic': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'ig_ng': 'ig_NG.UTF-8',
'ik_ca': 'ik_CA.UTF-8',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it_ch': 'it_CH.ISO8859-1',
'it_it': 'it_IT.ISO8859-1',
'italian': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.utf8': 'iw_IL.UTF-8',
'ja': 'ja_JP.eucJP',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kab_dz': 'kab_DZ.UTF-8',
'kk_kz': 'kk_KZ.ptcp154',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'kok_in': 'kok_IN.UTF-8',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
'[email protected]': 'ks_IN.UTF-8@devanagari',
'ku_tr': 'ku_TR.ISO8859-9',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lb_lu': 'lb_LU.UTF-8',
'lg_ug': 'lg_UG.ISO8859-10',
'li_be': 'li_BE.UTF-8',
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'ln_cd': 'ln_CD.UTF-8',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'lzh_tw': 'lzh_TW.UTF-8',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
'mai_np': 'mai_NP.UTF-8',
'mfe_mu': 'mfe_MU.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'miq_ni': 'miq_NI.UTF-8',
'mjw_in': 'mjw_IN.UTF-8',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'ml': 'ml_IN.UTF-8',
'ml_in': 'ml_IN.UTF-8',
'mn_mn': 'mn_MN.UTF-8',
'mni_in': 'mni_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
'nan_tw': 'nan_TW.UTF-8',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nds_de': 'nds_DE.UTF-8',
'nds_nl': 'nds_NL.UTF-8',
'ne_np': 'ne_NP.UTF-8',
'nhn_mx': 'nhn_MX.UTF-8',
'niu_nu': 'niu_NU.UTF-8',
'niu_nz': 'niu_NZ.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl_aw': 'nl_AW.UTF-8',
'nl_be': 'nl_BE.ISO8859-1',
'nl_nl': 'nl_NL.ISO8859-1',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'norwegian': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'om_et': 'om_ET.UTF-8',
'om_ke': 'om_KE.ISO8859-1',
'or': 'or_IN.UTF-8',
'or_in': 'or_IN.UTF-8',
'os_ru': 'os_RU.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
'pap_aw': 'pap_AW.UTF-8',
'pap_cw': 'pap_CW.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_us': 'pd_US.ISO8859-1',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'ps_af': 'ps_AF.UTF-8',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_pt': 'pt_PT.ISO8859-1',
'quz_pe': 'quz_PE.UTF-8',
'raj_in': 'raj_IN.UTF-8',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru_ru': 'ru_RU.UTF-8',
'ru_ua': 'ru_UA.KOI8-U',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.KOI8-R',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'sa_in': 'sa_IN.UTF-8',
'sat_in': 'sat_IN.UTF-8',
'sc_it': 'sc_IT.UTF-8',
'sd': 'sd_IN.UTF-8',
'sd_in': 'sd_IN.UTF-8',
'[email protected]': 'sd_IN.UTF-8@devanagari',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sgs_lt': 'sgs_LT.UTF-8',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'shn_mm': 'shn_MM.UTF-8',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sid_et': 'sid_ET.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'sm_ws': 'sm_WS.UTF-8',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
'so_so': 'so_SO.ISO8859-1',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_mk': 'sq_MK.UTF-8',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latn': 'sr_CS.UTF-8@latin',
'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8': 'sr_RS.UTF-8',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_se': 'sv_SE.ISO8859-1',
'sw_ke': 'sw_KE.UTF-8',
'sw_tz': 'sw_TZ.UTF-8',
'swedish': 'sv_SE.ISO8859-1',
'szl_pl': 'szl_PL.UTF-8',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
'tcy_in.utf8': 'tcy_IN.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'the_np': 'the_NP.UTF-8',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
'tk_tm': 'tk_TM.UTF-8',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'to_to': 'to_TO.UTF-8',
'tpi_pg': 'tpi_PG.UTF-8',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif',
'turkish': 'tr_TR.ISO8859-9',
'ug_cn': 'ug_CN.UTF-8',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'univ': 'en_US.utf',
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'unm_us': 'unm_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wae_ch': 'wae_CH.UTF-8',
'wal_et': 'wal_ET.UTF-8',
'wo_sn': 'wo_SN.UTF-8',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
'yuw_pg': 'yuw_PG.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_sg': 'zh_SG.GB2312',
'zh_sg.gbk': 'zh_SG.GBK',
'zh_tw': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Caribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print('Locale defaults as determined by getdefaultlocale():')
print('-'*72)
lang, enc = getdefaultlocale()
print('Language: ', lang or '(undefined)')
print('Encoding: ', enc or '(undefined)')
print()
print('Locale settings on startup:')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
print()
print('Locale settings after calling resetlocale():')
print('-'*72)
resetlocale()
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
try:
setlocale(LC_ALL, "")
except:
print('NOTE:')
print('setlocale(LC_ALL, "") does not support the default locale')
print('given in the OS environment variables.')
else:
print()
print('Locale settings after calling setlocale(LC_ALL, ""):')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print('Locale aliasing:')
print()
_print_locale()
print()
print('Number formatting:')
print()
_test()
| 77,300 | 1,732 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/inspect.py | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python 3 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <[email protected]>',
'Yury Selivanov <[email protected]>')
import abc
import ast
import collections.abc
import enum
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# dis.COMPILER_FLAG_NAMES
CO_OPTIMIZED = 1
CO_NEWLOCALS = 2
CO_VARARGS = 4
CO_VARKEYWORDS = 8
CO_NESTED = 16
CO_GENERATOR = 32
CO_NOFREE = 64
CO_COROUTINE = 128
CO_ITERABLE_COROUTINE = 256
CO_ASYNC_GENERATOR = 512
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provide the same attributes as functions.
See help(isfunction) for a list of attributes."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def iscoroutinefunction(object):
"""Return true if the object is a coroutine function.
Coroutine functions are defined with "async def" syntax.
"""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_COROUTINE)
def isasyncgenfunction(object):
"""Return true if the object is an asynchronous generator function.
Asynchronous generator functions are defined with "async def"
syntax and have "yield" expressions in their body.
"""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_ASYNC_GENERATOR)
def isasyncgen(object):
"""Return true if the object is an asynchronous generator."""
return isinstance(object, types.AsyncGeneratorType)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def iscoroutine(object):
"""Return true if the object is a coroutine."""
return isinstance(object, types.CoroutineType)
def isawaitable(object):
"""Return true if object can be passed to an ``await`` expression."""
return (isinstance(object, types.CoroutineType) or
isinstance(object, types.GeneratorType) and
bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or
isinstance(object, collections.abc.Awaitable))
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including *, ** args
or keyword only arguments)
co_code string of raw compiled bytecode
co_cellvars tuple of names of cell variables
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
| 16=nested | 32=generator | 64=nofree | 128=coroutine
| 256=iterable_coroutine | 512=async_generator
co_freevars tuple of names of free variables
co_kwonlyargcount number of keyword only arguments (not including ** arg)
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
if not isinstance(object, type):
return False
if object.__flags__ & TPFLAGS_IS_ABSTRACT:
return True
if not issubclass(type(object), abc.ABCMeta):
return False
if hasattr(object, '__abstractmethods__'):
# It looks like ABCMeta.__new__ has finished running;
# TPFLAGS_IS_ABSTRACT should have been accurate.
return False
# It looks like ABCMeta.__new__ has not finished running yet; we're
# probably in __init_subclass__. We'll look for abstractmethods manually.
for name, value in object.__dict__.items():
if getattr(value, "__isabstractmethod__", False):
return True
for base in object.__bases__:
for name in getattr(base, "__abstractmethods__", ()):
value = getattr(object, name, None)
if getattr(value, "__isabstractmethod__", False):
return True
return False
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple([cls for cls in metamro if cls not in (type, object)])
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj is get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj is get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj if get_obj is not None else dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, staticmethod):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, classmethod):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
# Memoise by id to tolerate non-hashable objects, but store objects to
# ensure they aren't destroyed, which would allow their IDs to be reused.
memo = {id(f): f}
recursion_limit = sys.getrecursionlimit()
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if (id_func in memo) or (len(memo) >= recursion_limit):
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo[id_func] = func
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def _findclass(func):
cls = sys.modules.get(func.__module__)
if cls is None:
return None
for name in func.__qualname__.split('.')[:-1]:
cls = getattr(cls, name)
if not isclass(cls):
return None
return cls
def _finddoc(obj):
if isclass(obj):
for base in obj.__mro__:
if base is not object:
try:
doc = base.__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
if ismethod(obj):
name = obj.__func__.__name__
self = obj.__self__
if (isclass(self) and
getattr(getattr(self, name, None), '__func__') is obj.__func__):
# classmethod
cls = self
else:
cls = self.__class__
elif isfunction(obj):
name = obj.__name__
cls = _findclass(obj)
if cls is None or getattr(cls, name) is not obj:
return None
elif isbuiltin(obj):
name = obj.__name__
self = obj.__self__
if (isclass(self) and
self.__qualname__ + '.' + name == obj.__qualname__):
# classmethod
cls = self
else:
cls = self.__class__
# Should be tested before isdatadescriptor().
elif isinstance(obj, property):
func = obj.fget
name = func.__name__
cls = _findclass(func)
if cls is None or getattr(cls, name) is not obj:
return None
elif ismethoddescriptor(obj) or isdatadescriptor(obj):
name = obj.__name__
cls = obj.__objclass__
if getattr(cls, name) is not obj:
return None
else:
return None
for base in cls.__mro__:
try:
doc = getattr(base, name).__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if doc is None:
try:
doc = _finddoc(object)
except (AttributeError, TypeError):
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if file:
# Invalidate cache if needed.
linecache.checkcache(file)
else:
file = getfile(object)
# Allow filenames in form of "<something>" to pass through.
# `doctest` monkeypatches `linecache` module to enable
# inspection, so let `linecache.getlines` to be called.
if not (file.startswith('<') and file.endswith('>')):
raise OSError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.indecorator = False
self.decoratorhasargs = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started and not self.indecorator:
# skip any decorators
if token == "@":
self.indecorator = True
# look for the first "def", "class" or "lambda"
elif token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif token == "(":
if self.indecorator:
self.decoratorhasargs = True
elif token == ")":
if self.indecorator:
self.indecorator = False
self.decoratorhasargs = False
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
# hitting a NEWLINE when in a decorator without args
# ends the decorator
if self.indecorator and not self.decoratorhasargs:
self.indecorator = False
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
object = unwrap(object)
lines, lnum = findsource(object)
if istraceback(object):
object = object.tb_frame
# for module or frame that corresponds to module, return all source lines
if (ismodule(object) or
(isframe(object) and object.f_code.co_name == "<module>")):
return lines, 0
else:
return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's parameters.
A tuple of four things is returned: (args, varargs, keywords, defaults).
'args' is a list of the argument names, including keyword-only argument names.
'varargs' and 'keywords' are the names of the * and ** parameters or None.
'defaults' is an n-tuple of the default values of the last n parameters.
This function is deprecated, as it does not support annotations or
keyword-only parameters and will raise ValueError if either is present
on the supplied callable.
For a more structured introspection API, use inspect.signature() instead.
Alternatively, use getfullargspec() for an API with a similar namedtuple
based interface, but full support for annotations and keyword-only
parameters.
Deprecated since Python 3.5, use `inspect.getfullargspec()`.
"""
warnings.warn("inspect.getargspec() is deprecated since Python 3.0, "
"use inspect.signature() or inspect.getfullargspec()",
DeprecationWarning, stacklevel=2)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only parameters or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's parameters.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations).
'args' is a list of the parameter names.
'varargs' and 'varkw' are the names of the * and ** parameters or None.
'defaults' is an n-tuple of the default values of the last n parameters.
'kwonlyargs' is a list of keyword-only parameter names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping parameter names to annotations.
Notable differences from inspect.signature():
- the "self" parameter is always reported, even for bound methods
- wrapper chains defined by __wrapped__ *not* unwrapped automatically
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_from_callable(func,
follow_wrapper_chains=False,
skip_bound_arg=False,
sigcls=Signature)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if getattr(annotation, '__module__', None) == 'typing':
return repr(annotation).replace('typing.', '')
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__qualname__
return annotation.__module__+'.'+annotation.__qualname__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
frameinfo = (frame,) + getframeinfo(frame, context)
framelist.append(FrameInfo(*frameinfo))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
framelist.append(FrameInfo(*frameinfo))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
# ------------------------------------------------ coroutine introspection
CORO_CREATED = 'CORO_CREATED'
CORO_RUNNING = 'CORO_RUNNING'
CORO_SUSPENDED = 'CORO_SUSPENDED'
CORO_CLOSED = 'CORO_CLOSED'
def getcoroutinestate(coroutine):
"""Get current state of a coroutine object.
Possible states are:
CORO_CREATED: Waiting to start execution.
CORO_RUNNING: Currently being executed by the interpreter.
CORO_SUSPENDED: Currently suspended at an await expression.
CORO_CLOSED: Execution has completed.
"""
if coroutine.cr_running:
return CORO_RUNNING
if coroutine.cr_frame is None:
return CORO_CLOSED
if coroutine.cr_frame.f_lasti == -1:
return CORO_CREATED
return CORO_SUSPENDED
def getcoroutinelocals(coroutine):
"""
Get the mapping of coroutine local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
frame = getattr(coroutine, "cr_frame", None)
if frame is not None:
return frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
"""Private helper. Checks if ``cls`` has an attribute
named ``method_name`` and returns it only if it is a
pure python function.
"""
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
"""Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it.
"""
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
"""Private helper to transform signatures for unbound
functions to bound methods.
"""
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
"""Private helper to test if `obj` is a callable that might
support Argument Clinic's __text_signature__ protocol.
"""
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
"""Private helper to test if `obj` is a duck type of FunctionType.
A good example of such objects are functions compiled with
Cython, which have all attributes that a pure Python function
would have, but have their code statically compiled.
"""
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
""" Private helper to get first parameter name from a
__text_signature__ of a builtin method, which should
be in the following format: '($param1, ...)'.
Assumptions are that the first argument won't have
a default value or an annotation.
"""
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Private helper function. Takes a signature in Argument Clinic's
extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
"""Private helper to parse content of '__text_signature__'
and return a Signature based on it.
"""
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules.copy()
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
"""Private helper function to get signature for
builtin callables.
"""
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_from_function(cls, func):
"""Private helper: constructs Signature for the given python function."""
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
def _signature_from_callable(obj, *,
follow_wrapper_chains=True,
skip_bound_arg=True,
sigcls):
"""Private helper function to get signature for arbitrary
callable objects.
"""
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_from_callable(
obj.__func__,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
if isinstance(obj, types.MethodType):
# If the unwrapped object is a *method*, we might want to
# skip its first parameter (self).
# See test_signature_wrapped_bound_method for details.
return _signature_from_callable(
obj,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
if not isinstance(sig, Signature):
raise TypeError(
'unexpected object {!r} in __signature__ '
'attribute'.format(sig))
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_from_callable(
partialmethod.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
if first_wrapped_param.kind is Parameter.VAR_POSITIONAL:
# First argument of the wrapped callable is `*args`, as in
# `partialmethod(lambda *args)`.
return sig
else:
sig_params = tuple(sig.parameters.values())
assert (not sig_params or
first_wrapped_param is not sig_params[0])
new_params = (first_wrapped_param,) + sig_params
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return _signature_from_function(sigcls, obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(sigcls, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_from_callable(
obj.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_from_callable(
new,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_from_callable(
init,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(sigcls, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if (obj.__init__ is object.__init__ and
obj.__new__ is object.__new__):
# Return a signature of 'object' builtin.
return signature(object)
else:
raise ValueError(
'no signature found for builtin type {!r}'.format(obj))
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
"""A private marker - used in Parameter & Signature."""
class _empty:
"""Marker object for Signature.empty and Parameter.empty."""
class _ParameterKind(enum.IntEnum):
POSITIONAL_ONLY = 0
POSITIONAL_OR_KEYWORD = 1
VAR_POSITIONAL = 2
KEYWORD_ONLY = 3
VAR_KEYWORD = 4
def __str__(self):
return self._name_
_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY
_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD
_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL
_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY
_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD
_PARAM_NAME_MAPPING = {
_POSITIONAL_ONLY: 'positional-only',
_POSITIONAL_OR_KEYWORD: 'positional or keyword',
_VAR_POSITIONAL: 'variadic positional',
_KEYWORD_ONLY: 'keyword-only',
_VAR_KEYWORD: 'variadic keyword'
}
_get_paramkind_descr = _PARAM_NAME_MAPPING.__getitem__
class Parameter:
"""Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
"""
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
try:
self._kind = _ParameterKind(kind)
except ValueError:
raise ValueError(f'value {kind!r} is not a valid Parameter.kind')
if default is not _empty:
if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'
msg = msg.format(_get_paramkind_descr(self._kind))
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
msg = 'name must be a str, not a {}'.format(type(name).__name__)
raise TypeError(msg)
if name[0] == '.' and name[1:].isdigit():
# These are implicit arguments generated by comprehensions. In
# order to provide a friendlier interface to users, we recast
# their name as "implicitN" and treat them as positional-only.
# See issue 19611.
if self._kind != _POSITIONAL_OR_KEYWORD:
msg = (
'implicit arguments must be passed as '
'positional or keyword arguments, not {}'
)
msg = msg.format(_get_paramkind_descr(self._kind))
raise ValueError(msg)
self._kind = _POSITIONAL_ONLY
name = 'implicit{}'.format(name[1:])
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
def __reduce__(self):
return (type(self),
(self._name, self._kind),
{'_default': self._default,
'_annotation': self._annotation})
def __setstate__(self, state):
self._default = state['_default']
self._annotation = state['_annotation']
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
"""Creates a customized copy of the Parameter."""
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} "{}">'.format(self.__class__.__name__, self)
def __hash__(self):
return hash((self.name, self.kind, self.annotation, self.default))
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Parameter):
return NotImplemented
return (self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
class BoundArguments:
"""Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
"""
__slots__ = ('arguments', '_signature', '__weakref__')
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def apply_defaults(self):
"""Set default values for missing arguments.
For variable-positional arguments (*args) the default is an
empty tuple.
For variable-keyword arguments (**kwargs) the default is an
empty dict.
"""
arguments = self.arguments
new_arguments = []
for name, param in self._signature.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not _empty:
val = param.default
elif param.kind is _VAR_POSITIONAL:
val = ()
elif param.kind is _VAR_KEYWORD:
val = {}
else:
# This BoundArguments was likely produced by
# Signature.bind_partial().
continue
new_arguments.append((name, val))
self.arguments = OrderedDict(new_arguments)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, BoundArguments):
return NotImplemented
return (self.signature == other.signature and
self.arguments == other.arguments)
def __setstate__(self, state):
self._signature = state['_signature']
self.arguments = state['arguments']
def __getstate__(self):
return {'_signature': self._signature, 'arguments': self.arguments}
def __repr__(self):
args = []
for arg, value in self.arguments.items():
args.append('{}={!r}'.format(arg, value))
return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args))
class Signature:
"""A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
"""
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
"""Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
"""
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = (
'wrong parameter order: {} parameter before {} '
'parameter'
)
msg = msg.format(_get_paramkind_descr(top_kind),
_get_paramkind_descr(kind))
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
"""Constructs Signature for the given python function.
Deprecated since Python 3.5, use `Signature.from_callable()`.
"""
warnings.warn("inspect.Signature.from_function() is deprecated since "
"Python 3.5, use Signature.from_callable()",
DeprecationWarning, stacklevel=2)
return _signature_from_function(cls, func)
@classmethod
def from_builtin(cls, func):
"""Constructs Signature for the given builtin function.
Deprecated since Python 3.5, use `Signature.from_callable()`.
"""
warnings.warn("inspect.Signature.from_builtin() is deprecated since "
"Python 3.5, use Signature.from_callable()",
DeprecationWarning, stacklevel=2)
return _signature_from_builtin(cls, func)
@classmethod
def from_callable(cls, obj, *, follow_wrapped=True):
"""Constructs Signature for the given callable object."""
return _signature_from_callable(obj, sigcls=cls,
follow_wrapper_chains=follow_wrapped)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
"""Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
"""
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def _hash_basis(self):
params = tuple(param for param in self.parameters.values()
if param.kind != _KEYWORD_ONLY)
kwo_params = {param.name: param for param in self.parameters.values()
if param.kind == _KEYWORD_ONLY}
return params, kwo_params, self.return_annotation
def __hash__(self):
params, kwo_params, return_annotation = self._hash_basis()
kwo_params = frozenset(kwo_params.values())
return hash((params, kwo_params, return_annotation))
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Signature):
return NotImplemented
return self._hash_basis() == other._hash_basis()
def _bind(self, args, kwargs, *, partial=False):
"""Private method. Don't use directly."""
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = 'missing a required argument: {arg!r}'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError(
'too many positional arguments') from None
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError(
'multiple values for argument {arg!r}'.format(
arg=param.name)) from None
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('missing a required argument: {arg!r}'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError(
'got an unexpected keyword argument {arg!r}'.format(
arg=next(iter(kwargs))))
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
"""Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
"""Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs, partial=True)
def __reduce__(self):
return (type(self),
(tuple(self._parameters.values()),),
{'_return_annotation': self._return_annotation})
def __setstate__(self, state):
self._return_annotation = state['_return_annotation']
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def signature(obj, *, follow_wrapped=True):
"""Get a signature object for the passed callable."""
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
def _main():
""" Logic for inspecting an object given at command line """
try:
import argparse
import importlib
except ImportError:
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
| 117,016 | 3,135 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/csv.py |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
from collections import OrderedDict
from io import StringIO
__all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter",
"unix_dialect"]
class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = OrderedDict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = rowdict.keys() - self.fieldnames
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields]))
return (rowdict.get(key, self.restval) for key in self.fieldnames)
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
return self.writer.writerows(map(self._dict_to_list, rowdicts))
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
groupindex = regexp.groupindex
for m in matches:
n = groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| 16,180 | 450 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_pydecimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Named tuple representation
'DecimalTuple',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Exceptional conditions that trigger InvalidOperation
'DivisionImpossible', 'InvalidContext', 'ConversionSyntax', 'DivisionUndefined',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__xname__ = __name__ # sys.modules lookup (--without-threads)
__name__ = 'decimal' # For pickling
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
__libmpdec_version__ = "2.4.2" # compatible libmpdec version
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if a string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__xname__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
m = _parser(value.strip().replace("_", ""))
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac') or ''
exp = int(m.group('exp') or '0')
self._int = str(int(intpart+fracpart))
self._exp = exp - len(fracpart)
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(int(diag or '0')).lstrip('0')
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
@classmethod
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compare self to other. Return a decimal value:
a or b is a NaN ==> Decimal('NaN')
a < b ==> Decimal('-1')
a == b ==> Decimal('0')
a > b ==> Decimal('1')
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def as_integer_ratio(self):
"""Express a finite Decimal instance in the form n / d.
Returns a pair (n, d) of integers. When called on an infinity
or NaN, raises OverflowError or ValueError respectively.
>>> Decimal('3.14').as_integer_ratio()
(157, 50)
>>> Decimal('-123e5').as_integer_ratio()
(-12300000, 1)
>>> Decimal('0.00').as_integer_ratio()
(0, 1)
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot convert NaN to integer ratio")
else:
raise OverflowError("cannot convert Infinity to integer ratio")
if not self:
return 0, 1
# Find n, d in lowest terms such that abs(self) == n / d;
# we'll deal with the sign later.
n = int(self._int)
if self._exp >= 0:
# self is an integer.
n, d = n * 10**self._exp, 1
else:
# Find d2, d5 such that abs(self) = n / (2**d2 * 5**d5).
d5 = -self._exp
while d5 > 0 and n % 5 == 0:
n //= 5
d5 -= 1
# (n & -n).bit_length() - 1 counts trailing zeros in binary
# representation of n (provided n is nonzero).
d2 = -self._exp
shift2 = min((n & -n).bit_length() - 1, d2)
if shift2:
n >>= shift2
d2 -= shift2
d = 5**d5 << d2
if self._sign:
n = -n
return n, d
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to a string, using engineering notation if an exponent is needed.
Engineering notation has an exponent which is a multiple of 3. This
can leave up to 3 digits to the left of the decimal place and may
require the addition of either one or two trailing zeros.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more than precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero; '
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
if spec['type'] == '%':
body += '%'
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding = type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and (num != num.strip() or '_' in num):
return self._raise_error(ConversionSyntax,
"trailing or leading whitespace and "
"underscores are not permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Convert to a string, using engineering notation if an exponent is needed.
Engineering notation has an exponent which is a multiple of 3. This
can leave up to 3 digits to the left of the decimal place and may
require the addition of either one or two trailing zeros.
The operation is not affected by the context.
>>> ExtendedContext.to_eng_string(Decimal('123E+1'))
'1.23E+3'
>>> ExtendedContext.to_eng_string(Decimal('123E+3'))
'123E+3'
>>> ExtendedContext.to_eng_string(Decimal('123E-10'))
'12.3E-9'
>>> ExtendedContext.to_eng_string(Decimal('-123E-12'))
'-123E-12'
>>> ExtendedContext.to_eng_string(Decimal('7E-7'))
'700E-9'
>>> ExtendedContext.to_eng_string(Decimal('7E+1'))
'70'
>>> ExtendedContext.to_eng_string(Decimal('0E+1'))
'0.00E+3'
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999,
Emin=-999999,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
# \s*
\Z
""", re.VERBOSE | re.IGNORECASE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
| 230,228 | 6,450 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_compat_pickle.py | # This module is used to map the old Python 2 names to the new names used in
# Python 3 for the pickle module. This needed to make pickle streams
# generated with Python 2 loadable by Python 3.
# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
# Thus, this could cause the module to be imported recursively.
IMPORT_MAPPING = {
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'tkFileDialog': 'tkinter.filedialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'test.test_support': 'test.support',
'commands': 'subprocess',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
'urllib2': 'urllib.request',
'anydbm': 'dbm',
'_abcoll' : 'collections.abc',
}
# This contains rename rules that are easy to handle. We ignore the more
# complex stuff (e.g. mapping the names in the urllib and types modules).
# These rules should be run before import names are fixed.
NAME_MAPPING = {
('__builtin__', 'xrange'): ('builtins', 'range'),
('__builtin__', 'reduce'): ('functools', 'reduce'),
('__builtin__', 'intern'): ('sys', 'intern'),
('__builtin__', 'unichr'): ('builtins', 'chr'),
('__builtin__', 'unicode'): ('builtins', 'str'),
('__builtin__', 'long'): ('builtins', 'int'),
('itertools', 'izip'): ('builtins', 'zip'),
('itertools', 'imap'): ('builtins', 'map'),
('itertools', 'ifilter'): ('builtins', 'filter'),
('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
('UserList', 'UserList'): ('collections', 'UserList'),
('UserString', 'UserString'): ('collections', 'UserString'),
('whichdb', 'whichdb'): ('dbm', 'whichdb'),
('_socket', 'fromfd'): ('socket', 'fromfd'),
('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
('urllib', 'quote'): ('urllib.parse', 'quote'),
('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
('urllib', 'unquote'): ('urllib.parse', 'unquote'),
('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
('urllib2', 'URLError'): ('urllib.error', 'URLError'),
}
PYTHON2_EXCEPTIONS = (
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"BufferError",
"BytesWarning",
"DeprecationWarning",
"EOFError",
"EnvironmentError",
"Exception",
"FloatingPointError",
"FutureWarning",
"GeneratorExit",
"IOError",
"ImportError",
"ImportWarning",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NotImplementedError",
"OSError",
"OverflowError",
"PendingDeprecationWarning",
"ReferenceError",
"RuntimeError",
"RuntimeWarning",
# StandardError is gone in Python 3, so we map it to Exception
"StopIteration",
"SyntaxError",
"SyntaxWarning",
"SystemError",
"SystemExit",
"TabError",
"TypeError",
"UnboundLocalError",
"UnicodeDecodeError",
"UnicodeEncodeError",
"UnicodeError",
"UnicodeTranslateError",
"UnicodeWarning",
"UserWarning",
"ValueError",
"Warning",
"ZeroDivisionError",
)
try:
WindowsError
except NameError:
pass
else:
PYTHON2_EXCEPTIONS += ("WindowsError",)
for excname in PYTHON2_EXCEPTIONS:
NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
MULTIPROCESSING_EXCEPTIONS = (
'AuthenticationError',
'BufferTooShort',
'ProcessError',
'TimeoutError',
)
for excname in MULTIPROCESSING_EXCEPTIONS:
NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
# Same, but for 3.x to 2.x
REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
# Non-mutual mappings.
IMPORT_MAPPING.update({
'cPickle': 'pickle',
'_elementtree': 'xml.etree.ElementTree',
'FileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
# For compatibility with broken pickles saved in old Python 3 versions
'UserDict': 'collections',
'UserList': 'collections',
'UserString': 'collections',
'whichdb': 'dbm',
'StringIO': 'io',
'cStringIO': 'io',
})
REVERSE_IMPORT_MAPPING.update({
'_bz2': 'bz2',
'_dbm': 'dbm',
'_functools': 'functools',
'_gdbm': 'gdbm',
'_pickle': 'pickle',
})
NAME_MAPPING.update({
('__builtin__', 'basestring'): ('builtins', 'str'),
('exceptions', 'StandardError'): ('builtins', 'Exception'),
('UserDict', 'UserDict'): ('collections', 'UserDict'),
('socket', '_socketobject'): ('socket', 'SocketType'),
})
REVERSE_NAME_MAPPING.update({
('_functools', 'reduce'): ('__builtin__', 'reduce'),
('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
('xmlrpc.server', 'XMLRPCDocGenerator'):
('DocXMLRPCServer', 'XMLRPCDocGenerator'),
('xmlrpc.server', 'DocXMLRPCRequestHandler'):
('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
('xmlrpc.server', 'DocXMLRPCServer'):
('DocXMLRPCServer', 'DocXMLRPCServer'),
('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
('http.server', 'SimpleHTTPRequestHandler'):
('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
('http.server', 'CGIHTTPRequestHandler'):
('CGIHTTPServer', 'CGIHTTPRequestHandler'),
('_socket', 'socket'): ('socket', '_socketobject'),
})
PYTHON3_OSERROR_EXCEPTIONS = (
'BrokenPipeError',
'ChildProcessError',
'ConnectionAbortedError',
'ConnectionError',
'ConnectionRefusedError',
'ConnectionResetError',
'FileExistsError',
'FileNotFoundError',
'InterruptedError',
'IsADirectoryError',
'NotADirectoryError',
'PermissionError',
'ProcessLookupError',
'TimeoutError',
)
for excname in PYTHON3_OSERROR_EXCEPTIONS:
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
PYTHON3_IMPORTERROR_EXCEPTIONS = (
'ModuleNotFoundError',
)
for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
| 8,749 | 252 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/bdb.py | """Debugger basics"""
import fnmatch
import sys
import os
from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR
__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR
class BdbQuit(Exception):
"""Exception to give up completely."""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
self.frame_returning = None
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
# Ignore call events in generator except when stepping.
if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
return self.trace_dispatch
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
# Ignore return events in generator except when stepping.
if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
return self.trace_dispatch
try:
self.frame_returning = frame
self.user_return(frame, arg)
finally:
self.frame_returning = None
if self.quitting: raise BdbQuit
# The user issued a 'next' or 'until' command.
if self.stopframe is frame and self.stoplineno != -1:
self._set_stopinfo(None, None)
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
# When stepping with next/until/return in a generator frame, skip
# the internal StopIteration exception (with no traceback)
# triggered by a subiterator run with the 'yield from' statement.
if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
and arg[0] is StopIteration and arg[2] is None):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
# Stop at the StopIteration or GeneratorExit exception when the user
# has set stopframe in a generator by issuing a return command, or a
# next/until command at the last statement in the generator before the
# exception.
elif (self.stopframe and frame is not self.stopframe
and self.stopframe.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
and arg[0] in (StopIteration, GeneratorExit)):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
if not self.stopframe:
return True
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if filename not in self.breaks:
return False
lineno = frame.f_lineno
if lineno not in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if lineno not in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError("subclass of bdb must implement do_clear()")
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = False
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame, lineno=None):
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
# the name "until" is borrowed from gdb
if lineno is None:
lineno = frame.f_lineno + 1
self._set_stopinfo(frame, frame, lineno)
def set_step(self):
"""Stop after one line of code."""
# Issue #13183: pdb skips frames after hitting a breakpoint and running
# step commands.
# Restore the trace function in the caller (that may not have been set
# for performance reasons) when returning from the current frame.
if self.frame_returning:
caller_frame = self.frame_returning.f_back
if caller_frame and not caller_frame.f_trace:
caller_frame.f_trace = self.trace_dispatch
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
self._set_stopinfo(frame, None, -1)
else:
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = True
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=False, cond=None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename, lineno)
list = self.breaks.setdefault(filename, [])
if lineno not in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if filename not in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename, lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
bp = self.get_bpbynumber(arg)
except ValueError as err:
return str(err)
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if filename not in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_bpbynumber(self, arg):
if not arg:
raise ValueError('Breakpoint number expected')
try:
number = int(arg)
except ValueError:
raise ValueError('Non-numeric breakpoint number %s' % arg)
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
raise ValueError('Breakpoint number %d out of range' % number)
if bp is None:
raise ValueError('Breakpoint %d already deleted' % number)
return bp
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, reprlib
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s += frame.f_code.co_name
else:
s += "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s += reprlib.repr(args)
else:
s += '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s += '->'
s += reprlib.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line:
s += lprefix + line.strip()
return s
# The following methods can be called by clients to use
# a debugger to debug a statement or an expression.
# Both can be given as a string, or a code object.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
if isinstance(cmd, str):
cmd = compile(cmd, "<string>", "exec")
sys.settrace(self.trace_dispatch)
try:
exec(cmd, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = True
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = True
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = True
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class.
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=False, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = True
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next += 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def bpprint(self, out=None):
if out is None:
out = sys.stdout
print(self.bpformat(), file=out)
def bpformat(self):
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
ret += '\n\tstop only if %s' % (self.cond,)
if self.ignore:
ret += '\n\tignore next %d hits' % (self.ignore,)
if self.hits:
if self.hits > 1:
ss = 's'
else:
ss = ''
ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss)
return ret
def __str__(self):
return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line)
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file, line]
for b in possibles:
if not b.enabled:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits += 1
if not b.cond:
# If unconditional, and ignoring go on to next, else break
if b.ignore > 0:
b.ignore -= 1
continue
else:
# breakpoint and marker that it's ok to delete if temporary
return (b, True)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals, frame.f_locals)
if val:
if b.ignore > 0:
b.ignore -= 1
# continue
else:
return (b, True)
# else:
# continue
except:
# if eval fails, most conservative thing is to stop on
# breakpoint regardless of ignore count. Don't delete
# temporary, as another hint to user.
return (b, False)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print('+++ call', name, args)
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print('+++', fn, frame.f_lineno, name, ':', line.strip())
def user_return(self, frame, retval):
print('+++ return', retval)
def user_exception(self, frame, exc_stuff):
print('+++ exception', exc_stuff)
self.set_continue()
def foo(n):
print('foo(', n, ')')
x = bar(n*10)
print('bar returned', x)
def bar(a):
print('bar(', a, ')')
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
| 23,556 | 677 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/shelve.py | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
if self.dict is None:
return
try:
self.sync()
try:
self.dict.close()
except AttributeError:
pass
finally:
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except:
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
# import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
| 8,517 | 244 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pdb.py | #! /usr/bin/env python3
"""
The Python Debugger Pdb
=======================
To use the debugger in its simplest form:
>>> import pdb
>>> pdb.run('<a statement>')
The debugger's prompt is '(Pdb) '. This will stop in the first
function call in <a statement>.
Alternatively, if a statement terminated with an unhandled exception,
you can use pdb's post-mortem facility to inspect the contents of the
traceback:
>>> <a statement>
<exception traceback>
>>> import pdb
>>> pdb.pm()
The commands recognized by the debugger are listed in the next
section. Most can be abbreviated as indicated; e.g., h(elp) means
that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel',
nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in
square brackets. Alternatives in the command syntax are separated
by a vertical bar (|).
A blank line repeats the previous command literally, except for
'list', where it lists the next 11 lines.
Commands that the debugger doesn't recognize are assumed to be Python
statements and are executed in the context of the program being
debugged. Python statements can also be prefixed with an exclamation
point ('!'). This is a powerful way to inspect the program being
debugged; it is even possible to change variables or call functions.
When an exception occurs in such a statement, the exception name is
printed but the debugger's state is not changed.
The debugger supports aliases, which can save typing. And aliases can
have parameters (see the alias help entry) which allows one a certain
level of adaptability to the context under examination.
Multiple commands may be entered on a single line, separated by the
pair ';;'. No intelligence is applied to separating the commands; the
input is split at the first ';;', even if it is in the middle of a
quoted string.
If a file ".pdbrc" exists in your home directory or in the current
directory, it is read in and executed as if it had been typed at the
debugger prompt. This is particularly useful for aliases. If both
files exist, the one in the home directory is read first and aliases
defined there can be overridden by the local file. This behavior can be
disabled by passing the "readrc=False" argument to the Pdb constructor.
Aside from aliases, the debugger is not directly programmable; but it
is implemented as a class from which you can derive your own debugger
class, which you can make as fancy as you like.
Debugger commands
=================
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
import os
import re
import sys
import cmd
import bdb
import dis
import code
import glob
import pprint
import signal
import inspect
import traceback
import linecache
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except OSError:
return None
# consumer of this info expects the first line to be 1
with fp:
for lineno, line in enumerate(fp, start=1):
if cre.match(line):
return funcname, filename, lineno
return None
def getsourcelines(obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def lasti2lineno(code, lasti):
linestarts = list(dis.findlinestarts(code))
linestarts.reverse()
for i, lineno in linestarts:
if lasti >= i:
return lineno
return 0
class _rstr(str):
"""String that doesn't quote its repr."""
def __repr__(self):
return self
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
_previous_sigint_handler = None
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None,
nosigint=False, readrc=True):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.displaying = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = False
self.tb_lineno = {}
# Try to load readline if it exists
try:
import readline
# remove some common file name delimiters
readline.set_completer_delims(' \t\n`@#$%^&*()=+[{]}\\|;:\'",<>?')
except ImportError:
pass
self.allow_kbdint = False
self.nosigint = nosigint
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if readrc:
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
with open(os.path.join(envHome, ".pdbrc")) as rcFile:
self.rcLines.extend(rcFile)
except OSError:
pass
try:
with open(".pdbrc") as rcFile:
self.rcLines.extend(rcFile)
except OSError:
pass
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
def sigint_handler(self, signum, frame):
if self.allow_kbdint:
raise KeyboardInterrupt
self.message("\nProgram interrupted. (Use 'cont' to resume).")
self.set_step()
self.set_trace(frame)
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
self.tb_lineno.clear()
def setup(self, f, tb):
self.forget()
self.stack, self.curindex = self.get_stack(f, tb)
while tb:
# when setting up post-mortem debugging with a traceback, save all
# the original line numbers to be displayed along the current line
# numbers (which can be different, e.g. due to finally clauses)
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if not self.rcLines:
return
# local copy because of recursion
rcLines = self.rcLines
rcLines.reverse()
# execute every line only once
self.rcLines = []
while rcLines:
line = rcLines.pop().strip()
if line and line[0] != '#':
if self.onecmd(line):
# if onecmd returns True, the command wants to exit
# from the interaction, save leftover rc lines
# to execute before next interaction
self.rcLines += reversed(rcLines)
return True
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.message('--Call--')
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self, frame):
"""Call every command that was set for the current active breakpoint
(if there is one).
Returns True if the normal interaction function must be called,
False otherwise."""
# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit
if getattr(self, "currentbp", False) and \
self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self._cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
self.message('--Return--')
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
# An 'Internal StopIteration' exception is an exception debug event
# issued by the interpreter when handling a subgenerator run with
# 'yield from' or a generator controlled by a for loop. No exception has
# actually occurred in this case. The debugger uses this debug event to
# stop when the debuggee is returning from such generators.
prefix = 'Internal ' if (not exc_traceback
and exc_type is StopIteration) else ''
self.message('%s%s' % (prefix,
traceback.format_exception_only(exc_type, exc_value)[-1].strip()))
self.interaction(frame, exc_traceback)
# General interaction function
def _cmdloop(self):
while True:
try:
# keyboard interrupts allow for an easy way to cancel
# the current command, so allow them during interactive input
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message('--KeyboardInterrupt--')
# Called before loop, handles display expressions
def preloop(self):
displaying = self.displaying.get(self.curframe)
if displaying:
for expr, oldvalue in displaying.items():
newvalue = self._getval_except(expr)
# check for identity first; this prevents custom __eq__ to
# be called at every loop, and also prevents instances whose
# fields are changed to be displayed
if newvalue is not oldvalue and newvalue != oldvalue:
displaying[expr] = newvalue
self.message('display %s: %r [old: %r]' %
(expr, newvalue, oldvalue))
def interaction(self, frame, traceback):
# Restore the previous signal handler at the Pdb prompt.
if Pdb._previous_sigint_handler:
signal.signal(signal.SIGINT, Pdb._previous_sigint_handler)
Pdb._previous_sigint_handler = None
if self.setup(frame, traceback):
# no interaction desired at this time (happens if .pdbrc contains
# a command like "continue")
self.forget()
return
self.print_stack_entry(self.stack[self.curindex])
self._cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
self.message(repr(obj))
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii += 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self, line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
# one of the resuming commands
if func.__name__ in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# interface abstraction functions
def message(self, msg):
print(msg, file=self.stdout)
def error(self, msg):
print('***', msg, file=self.stdout)
# Generic completion functions. Individual complete_foo methods can be
# assigned below to one of these functions.
def _complete_location(self, text, line, begidx, endidx):
# Complete a file/module/function location for break/tbreak/clear.
if line.strip().endswith((':', ',')):
# Here comes a line number or a condition which we can't complete.
return []
# First, try to find matching functions (i.e. expressions).
try:
ret = self._complete_expression(text, line, begidx, endidx)
except Exception:
ret = []
# Then, try to complete file names as well.
globs = glob.glob(text + '*')
for fn in globs:
if os.path.isdir(fn):
ret.append(fn + '/')
elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')):
ret.append(fn + ':')
return ret
def _complete_bpnumber(self, text, line, begidx, endidx):
# Complete a breakpoint number. (This would be more helpful if we could
# display additional info along with the completions, such as file/line
# of the breakpoint.)
return [str(i) for i, bp in enumerate(bdb.Breakpoint.bpbynumber)
if bp is not None and str(i).startswith(text)]
def _complete_expression(self, text, line, begidx, endidx):
# Complete an arbitrary expression.
if not self.curframe:
return []
# Collect globals and locals. It is usually not really sensible to also
# complete builtins, and they clutter the namespace quite heavily, so we
# leave them out.
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
if '.' in text:
# Walk an attribute chain up to the last part, similar to what
# rlcompleter does. This will bail if any of the parts are not
# simple attribute access, which is what we want.
dotted = text.split('.')
try:
obj = ns[dotted[0]]
for part in dotted[1:-1]:
obj = getattr(obj, part)
except (KeyError, AttributeError):
return []
prefix = '.'.join(dotted[:-1]) + '.'
return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])]
else:
# Complete a simple name.
return [n for n in ns.keys() if n.startswith(text)]
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
def do_commands(self, arg):
"""commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber.
The commands themselves are entered on the following lines.
Type a line containing just 'end' to terminate the commands.
The commands are executed when the breakpoint is hit.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up
again. Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations)
terminates the command list (as if that command was
immediately followed by end). This is because any time you
resume execution (even with a simple next or step), you may
encounter another breakpoint -- which could have its own
command list, leading to ambiguities about which list to
execute.
If you use the 'silent' command in the command list, the usual
message about stopping at a breakpoint is not printed. This
may be desirable for breakpoints that are to print a specific
message and then continue. If none of the other commands
print anything, you will see no sign that the breakpoint was
reached.
"""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber) - 1
else:
try:
bnum = int(arg)
except:
self.error("Usage: commands [bnum]\n ...\n end")
return
self.commands_bnum = bnum
# Save old definitions for the case of a keyboard interrupt.
if bnum in self.commands:
old_command_defs = (self.commands[bnum],
self.commands_doprompt[bnum],
self.commands_silent[bnum])
else:
old_command_defs = None
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
except KeyboardInterrupt:
# Restore old definitions.
if old_command_defs:
self.commands[bnum] = old_command_defs[0]
self.commands_doprompt[bnum] = old_command_defs[1]
self.commands_silent[bnum] = old_command_defs[2]
else:
del self.commands[bnum]
del self.commands_doprompt[bnum]
del self.commands_silent[bnum]
self.error('command definition aborted, old commands restored')
finally:
self.commands_defining = False
self.prompt = prompt_back
complete_commands = _complete_bpnumber
def do_break(self, arg, temporary = 0):
"""b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted.
"""
if not arg:
if self.breaks: # There's at least one
self.message("Num Type Disp Enb Where")
for bp in bdb.Breakpoint.bpbynumber:
if bp:
self.message(bp.bpformat())
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
self.error('%r not found from sys.path' % filename)
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError:
self.error('Bad lineno: %s' % arg)
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, '__func__'):
func = func.__func__
code = func.__code__
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
self.error('The specified object %r is not a function '
'or was not found along sys.path.' % arg)
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err:
self.error(err)
else:
bp = self.get_breaks(filename, line)[-1]
self.message("Breakpoint %d at %s:%d" %
(bp.number, bp.file, bp.line))
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
complete_break = _complete_location
complete_b = _complete_location
def do_tbreak(self, arg):
"""tbreak [ ([filename:]lineno | function) [, condition] ]
Same arguments as break, but sets a temporary breakpoint: it
is automatically deleted when first hit.
"""
self.do_break(arg, 1)
complete_tbreak = _complete_location
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
self.message('End of file')
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
self.error('Blank or comment')
return 0
return lineno
def do_enable(self, arg):
"""enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
breakpoint numbers.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.enable()
self.message('Enabled %s' % bp)
complete_enable = _complete_bpnumber
def do_disable(self, arg):
"""disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
breakpoint numbers. Disabling a breakpoint means it cannot
cause the program to stop execution, but unlike clearing a
breakpoint, it remains in the list of breakpoints and can be
(re-)enabled.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.disable()
self.message('Disabled %s' % bp)
complete_disable = _complete_bpnumber
def do_condition(self, arg):
"""condition bpnumber [condition]
Set a new condition for the breakpoint, an expression which
must evaluate to true before the breakpoint is honored. If
condition is absent, any existing condition is removed; i.e.,
the breakpoint is made unconditional.
"""
args = arg.split(' ', 1)
try:
cond = args[1]
except IndexError:
cond = None
try:
bp = self.get_bpbynumber(args[0].strip())
except IndexError:
self.error('Breakpoint number expected')
except ValueError as err:
self.error(err)
else:
bp.cond = cond
if not cond:
self.message('Breakpoint %d is now unconditional.' % bp.number)
else:
self.message('New condition set for breakpoint %d.' % bp.number)
complete_condition = _complete_bpnumber
def do_ignore(self, arg):
"""ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
"""
args = arg.split()
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = self.get_bpbynumber(args[0].strip())
except IndexError:
self.error('Breakpoint number expected')
except ValueError as err:
self.error(err)
else:
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' %
(countstr, bp.number))
else:
self.message('Will stop next time breakpoint %d is reached.'
% bp.number)
complete_ignore = _complete_bpnumber
def do_clear(self, arg):
"""cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
"""
if not arg:
try:
reply = input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp]
self.clear_all_breaks()
for bp in bplist:
self.message('Deleted %s' % bp)
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
bplist = self.get_breaks(filename, lineno)
err = self.clear_break(filename, lineno)
if err:
self.error(err)
else:
for bp in bplist:
self.message('Deleted %s' % bp)
return
numberlist = arg.split()
for i in numberlist:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
self.clear_bpbynumber(i)
self.message('Deleted %s' % bp)
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
complete_clear = _complete_location
complete_cl = _complete_location
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
"""
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def _select_frame(self, number):
assert 0 <= number < len(self.stack)
self.curindex = number
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
def do_up(self, arg):
"""u(p) [count]
Move the current frame count (default one) levels up in the
stack trace (to an older frame).
"""
if self.curindex == 0:
self.error('Oldest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = 0
else:
newframe = max(0, self.curindex - count)
self._select_frame(newframe)
do_u = do_up
def do_down(self, arg):
"""d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
do_d = do_down
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
return 1
do_unt = do_until
def do_step(self, arg):
"""s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current
function).
"""
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
"""n(ext)
Continue execution until the next line in the current function
is reached or it returns.
"""
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""run [args...]
Restart the debugged python program. If a string is supplied
it is split with "shlex", and the result is used as the new
sys.argv. History, breakpoints, actions and debugger options
are preserved. "restart" is an alias for "run".
"""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
# this is caught in the main debugger loop
raise Restart
do_restart = do_run
def do_return(self, arg):
"""r(eturn)
Continue execution until the current function returns.
"""
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
"""c(ont(inue))
Continue execution, only stop when a breakpoint is encountered.
"""
if not self.nosigint:
try:
Pdb._previous_sigint_handler = \
signal.signal(signal.SIGINT, self.sigint_handler)
except ValueError:
# ValueError happens when do_continue() is invoked from
# a non-main thread in which case we just continue without
# SIGINT set. Would printing a message here (once) make
# sense?
pass
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
"""j(ump) lineno
Set the next line that will be executed. Only available in
the bottom-most frame. This lets you jump back and execute
code again, or jump forward to skip code that you don't want
to run.
It should be noted that not all jumps are allowed -- for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause.
"""
if self.curindex + 1 != len(self.stack):
self.error('You can only jump within the bottom frame')
return
try:
arg = int(arg)
except ValueError:
self.error("The 'jump' command requires a line number")
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
self.error('Jump failed: %s' % e)
do_j = do_jump
def do_debug(self, arg):
"""debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
"""
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
self.message("ENTERING RECURSIVE DEBUGGER")
sys.call_tracing(p.run, (arg, globals, locals))
self.message("LEAVING RECURSIVE DEBUGGER")
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
complete_debug = _complete_expression
def do_quit(self, arg):
"""q(uit)\nexit
Quit from the debugger. The program being executed is aborted.
"""
self._user_requested_quit = True
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
"""EOF
Handles the receipt of EOF as a command.
"""
self.message('')
self._user_requested_quit = True
self.set_quit()
return 1
def do_args(self, arg):
"""a(rgs)
Print the argument list of the current function.
"""
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
if name in dict:
self.message('%s = %r' % (name, dict[name]))
else:
self.message('%s = *** undefined ***' % (name,))
do_a = do_args
def do_retval(self, arg):
"""retval
Print the return value for the last return of a function.
"""
if '__return__' in self.curframe_locals:
self.message(repr(self.curframe_locals['__return__']))
else:
self.error('Not yet returned!')
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
raise
def _getval_except(self, arg, frame=None):
try:
if frame is None:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
else:
return eval(arg, frame.f_globals, frame.f_locals)
except:
exc_info = sys.exc_info()[:2]
err = traceback.format_exception_only(*exc_info)[-1].strip()
return _rstr('** raised %s **' % err)
def do_p(self, arg):
"""p expression
Print the value of the expression.
"""
try:
self.message(repr(self._getval(arg)))
except:
pass
def do_pp(self, arg):
"""pp expression
Pretty-print the value of the expression.
"""
try:
self.message(pprint.pformat(self._getval(arg)))
except:
pass
complete_print = _complete_expression
complete_p = _complete_expression
complete_pp = _complete_expression
def do_list(self, arg):
"""l(ist) [first [,last] | .]
List source code for the current file. Without arguments,
list 11 lines around the current line or continue the previous
listing. With . as argument, list 11 lines around the current
line. With one argument, list 11 lines starting at that line.
With two arguments, list the given range; if the second
argument is less than the first, it is a count.
The current line in the current frame is indicated by "->".
If an exception is being debugged, the line where the
exception was originally raised or propagated is indicated by
">>", if it differs from the current line.
"""
self.lastcmd = 'list'
last = None
if arg and arg != '.':
try:
if ',' in arg:
first, last = arg.split(',')
first = int(first.strip())
last = int(last.strip())
if last < first:
# assume it's a count
last = first + last
else:
first = int(arg.strip())
first = max(1, first - 5)
except ValueError:
self.error('Error in argument: %r' % arg)
return
elif self.lineno is None or arg == '.':
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines = linecache.getlines(filename, self.curframe.f_globals)
self._print_lines(lines[first-1:last], first, breaklist,
self.curframe)
self.lineno = min(last, len(lines))
if len(lines) < last:
self.message('[EOF]')
except KeyboardInterrupt:
pass
do_l = do_list
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe)
except OSError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
do_ll = do_longlist
def do_source(self, arg):
"""source expression
Try to get source code for the given object and display it.
"""
try:
obj = self._getval(arg)
except:
return
try:
lines, lineno = getsourcelines(obj)
except (OSError, TypeError) as err:
self.error(err)
return
self._print_lines(lines, lineno)
complete_source = _complete_expression
def _print_lines(self, lines, start, breaks=(), frame=None):
"""Print a range of lines."""
if frame:
current_lineno = frame.f_lineno
exc_lineno = self.tb_lineno.get(frame, -1)
else:
current_lineno = exc_lineno = -1
for lineno, line in enumerate(lines, start):
s = str(lineno).rjust(3)
if len(s) < 4:
s += ' '
if lineno in breaks:
s += 'B'
else:
s += ' '
if lineno == current_lineno:
s += '->'
elif lineno == exc_lineno:
s += '>>'
self.message(s + '\t' + line.rstrip())
def do_whatis(self, arg):
"""whatis arg
Print the type of the argument.
"""
try:
value = self._getval(arg)
except:
# _getval() already printed the error
return
code = None
# Is it a function?
try:
code = value.__code__
except Exception:
pass
if code:
self.message('Function %s' % code.co_name)
return
# Is it an instance method?
try:
code = value.__func__.__code__
except Exception:
pass
if code:
self.message('Method %s' % code.co_name)
return
# Is it a class?
if value.__class__ is type:
self.message('Class %s.%s' % (value.__module__, value.__qualname__))
return
# None of the above...
self.message(type(value))
complete_whatis = _complete_expression
def do_display(self, arg):
"""display [expression]
Display the value of the expression if it changed, each time execution
stops in the current frame.
Without expression, list all display expressions for the current frame.
"""
if not arg:
self.message('Currently displaying:')
for item in self.displaying.get(self.curframe, {}).items():
self.message('%s: %r' % item)
else:
val = self._getval_except(arg)
self.displaying.setdefault(self.curframe, {})[arg] = val
self.message('display %s: %r' % (arg, val))
complete_display = _complete_expression
def do_undisplay(self, arg):
"""undisplay [expression]
Do not display the expression any more in the current frame.
Without expression, clear all display expressions for the current frame.
"""
if arg:
try:
del self.displaying.get(self.curframe, {})[arg]
except KeyError:
self.error('not displaying %s' % arg)
else:
self.displaying.pop(self.curframe, None)
def complete_undisplay(self, text, line, begidx, endidx):
return [e for e in self.displaying.get(self.curframe, {})
if e.startswith(text)]
def do_interact(self, arg):
"""interact
Start an interactive interpreter whose global namespace
contains all the (global and local) names found in the current scope.
"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact("*interactive*", local=ns)
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
def complete_unalias(self, text, line, begidx, endidx):
return [a for a in self.aliases if a.startswith(text)]
# List of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
prefix = '> '
else:
prefix = ' '
self.message(prefix +
self.format_stack_entry(frame_lineno, prompt_prefix))
# Provide help
def do_help(self, arg):
"""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
"""
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
else:
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO '
'if you need command help' % arg)
return
self.message(command.__doc__.rstrip())
do_h = do_help
def help_exec(self):
"""(!) statement
Execute the (one-line) statement in the context of the current
stack frame. The exclamation point can be omitted unless the
first word of the statement resembles a debugger command. To
assign to a global variable you must always prefix the command
with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)
"""
self.message((self.help_exec.__doc__ or '').strip())
def help_pdb(self):
help()
# other helper functions
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % \
(fp.read(), self.mainpyfile)
self.run(statement)
# Collect all command help into docstring, if not run with -OO
if __doc__ is not None:
# unfortunately we can't guess this order from the class definition
_help_order = [
'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable',
'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until',
'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist',
'args', 'p', 'pp', 'whatis', 'source', 'display', 'undisplay',
'interact', 'alias', 'unalias', 'debug', 'quit',
]
for _command in _help_order:
__doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n'
__doc__ += Pdb.help_exec.__doc__
del _help_order, _command
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
import pydoc
pydoc.pager(__doc__)
_usage = """\
usage: pdb.py [-c command] ... pyfile [arg] ...
Debug the Python program given by pyfile.
Initial commands are read from .pdbrc files in your home directory
and in the current directory, if they exist. Commands supplied with
-c are executed after commands from .pdbrc files.
To let the script run until an exception occurs, use "-c continue".
To let the script run up to a given line X in the debugged file, use
"-c 'until X'"."""
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['--help', '--command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
commands.append(optarg)
mainpyfile = args[0] # Get script filename
if not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(args))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=' ')
print(sys.exc_info()[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
| 61,312 | 1,695 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/getopt.py | """Parser for command line options.
This module helps scripts to parse the command line arguments in
sys.argv. It supports the same conventions as the Unix getopt()
function (including the special meanings of arguments of the form `-'
and `--'). Long options similar to those supported by GNU software
may be used as well via an optional third argument. This module
provides two functions and an exception:
getopt() -- Parse command line options
gnu_getopt() -- Like getopt(), but allow option and non-option arguments
to be intermixed.
GetoptError -- exception (class) raised with 'opt' attribute, which is the
option involved with the exception.
"""
# Long option support added by Lars Wirzenius <[email protected]>.
#
# Gerrit Holl <[email protected]> moved the string-based exceptions
# to class-based exceptions.
#
# Peter Ã
strand <[email protected]> added gnu_getopt().
#
# TODO for gnu_getopt():
#
# - GNU getopt_long_only mechanism
# - allow the caller to specify ordering
# - RETURN_IN_ORDER option
# - GNU extension with '-' as first character of option string
# - optional arguments, specified by double colons
# - an option string with a W followed by semicolon should
# treat "-W foo" as "--foo"
__all__ = ["GetoptError","error","getopt","gnu_getopt"]
import os
try:
from gettext import gettext as _
except ImportError:
# Bootstrapping Python: gettext's dependencies not built yet
def _(s): return s
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
error = GetoptError # backward compatibility
def getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
Parses command line options and parameter list. args is the
argument list to be parsed, without the leading reference to the
running program. Typically, this means "sys.argv[1:]". shortopts
is the string of option letters that the script wants to
recognize, with options that require an argument followed by a
colon (i.e., the same format that Unix getopt() uses). If
specified, longopts is a list of strings with the names of the
long options which should be supported. The leading '--'
characters should not be included in the option name. Options
which require an argument should be followed by an equal sign
('=').
The return value consists of two elements: the first is a list of
(option, value) pairs; the second is the list of program arguments
left after the option list was stripped (this is a trailing slice
of the first argument). Each option-and-value pair returned has
the option as its first element, prefixed with a hyphen (e.g.,
'-x'), and the option argument as its second element, or an empty
string if the option has no argument. The options occur in the
list in the same order in which they were found, thus allowing
multiple occurrences. Long and short options may be mixed.
"""
opts = []
if type(longopts) == type(""):
longopts = [longopts]
else:
longopts = list(longopts)
while args and args[0].startswith('-') and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0].startswith('--'):
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
else:
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
return opts, args
def gnu_getopt(args, shortopts, longopts = []):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
elif os.environ.get("POSIXLY_CORRECT"):
all_options_first = True
else:
all_options_first = False
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-' and args[0] != '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i+1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError(_('option --%s requires argument') % opt, opt)
optarg, args = args[0], args[1:]
elif optarg is not None:
raise GetoptError(_('option --%s must not have an argument') % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError(_('option --%s not recognized') % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError(_('option --%s not a unique prefix') % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError(_('option -%s requires argument') % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i+1)
raise GetoptError(_('option -%s not recognized') % opt, opt)
if __name__ == '__main__':
import sys
print(getopt(sys.argv[1:], "a:b", ["alpha=", "beta"]))
| 7,489 | 216 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pathlib.py | import cosmo
import fnmatch
import functools
import io
import os
import ntpath
import posixpath
import re
import sys
from collections.abc import Sequence
from contextlib import contextmanager
from errno import EINVAL, ENOENT, ENOTDIR
from operator import attrgetter
from _stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO
from urllib.parse import quote_from_bytes as urlquote_from_bytes
from posix import _getfinalpathname
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(sys.intern(x))
else:
if rel and rel != '.':
parsed.append(sys.intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt' or cosmo.kernel == 'nt')
drive_letters = (
set(chr(x) for x in range(ord('a'), ord('z') + 1)) |
set(chr(x) for x in range(ord('A'), ord('Z') + 1))
)
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
{'CON', 'PRN', 'AUX', 'NUL'} |
{'COM%d' % i for i in range(1, 10)} |
{'LPT%d' % i for i in range(1, 10)}
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2+1:]
else:
return part[:index2], sep, part[index2+1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path, strict=False):
s = str(path)
if not s:
return os.getcwd()
previous_s = None
if _getfinalpathname is not None:
if strict:
return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts = [] # End of the path after the first one not found
while True:
try:
s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s:
return path
else:
return os.path.join(s, *reversed(tail_parts))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
# Try to guess user home directory. By default all users
# directories are located in the same place and are named by
# corresponding usernames. If current user home directory points
# to nonstandard place, this guess is likely wrong.
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path, strict=False):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict:
raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
scandir = _wrap_strfunc(os.scandir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod)
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
replace = _wrap_binary_strfunc(os.replace)
if 0 and nt: # [jart] what
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError("symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError("Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
self.dironly = True
else:
self.successor = _TerminatingSelector()
self.dironly = False
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
scandir = parent_path._accessor.scandir
if not is_dir(parent_path):
return iter([])
return self._select_from(parent_path, is_dir, exists, scandir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, scandir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
path = parent_path._make_child_relpath(self.name)
if (is_dir if self.dironly else exists)(path):
for p in self.successor._select_from(path, is_dir, exists, scandir):
yield p
except PermissionError:
return
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
cf = parent_path._flavour.casefold
entries = list(scandir(parent_path))
for entry in entries:
if not self.dironly or entry.is_dir():
name = entry.name
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(path, is_dir, exists, scandir):
yield p
except PermissionError:
return
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
try:
entries = list(scandir(parent_path))
for entry in entries:
if entry.is_dir() and not entry.is_symlink():
path = parent_path._make_child_relpath(entry.name)
for p in self._iterate_directories(path, is_dir, scandir):
yield p
except PermissionError:
return
def _select_from(self, parent_path, is_dir, exists, scandir):
try:
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(parent_path, is_dir, scandir):
for p in successor_select(starting_point, is_dir, exists, scandir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
except PermissionError:
return
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""Base class for manipulating paths without I/O.
PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
else:
a = os.fspath(a)
if isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
else:
raise TypeError(
"argument should be a str object or an os.PathLike "
"object returning str, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overridden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def __fspath__(self):
return str(self)
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
return os.fsencode(str(self))
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return self._cparts == other._cparts and self._flavour is other._flavour
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if not isinstance(other, PurePath) or self._flavour is not other._flavour:
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""A list of the final component's suffixes, if any."""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed. If the path
has no suffix, add given suffix. If the given suffix is an empty
string, remove the suffix from the path.
"""
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
@property
def parent(self):
"""The logical parent of the path."""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
# Can't subclass os.PathLike from PurePath and keep the constructor
# optimizations in PurePath._parse_args().
os.PathLike.register(PurePath)
class PurePosixPath(PurePath):
"""PurePath subclass for non-Windows systems.
On a POSIX system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
"""PurePath subclass for Windows systems.
On a Windows system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
class Path(PurePath):
"""PurePath subclass that can make system calls.
Path represents a filesystem path but unlike PurePath, also offers
methods to do system calls on path objects. Depending on your system,
instantiating a Path will return either a PosixPath or a WindowsPath
object. You can also instantiate a PosixPath or WindowsPath directly,
but cannot instantiate a WindowsPath on a POSIX system or vice versa.
"""
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
"""
return cls(os.getcwd())
@classmethod
def home(cls):
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
"""
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
"""
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in {'.', '..'}:
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given pattern.
"""
if not pattern:
raise ValueError("Unacceptable pattern: {!r}".format(pattern))
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given pattern, anywhere in this subtree.
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self, strict=False):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path doesn't exist or is forbidden
self.stat()
s = str(self.absolute())
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
return io.open(str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
def read_bytes(self):
"""
Open the file in bytes mode, read it, and close the file.
"""
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
"""
Open the file in text mode, read it, and close the file.
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
"""
# type-check for the buffer interface before truncating the file
view = memoryview(data)
with self.open(mode='wb') as f:
return f.write(view)
def write_text(self, data, encoding=None, errors=None):
"""
Open the file in text mode, write to it, and close the file.
"""
if not isinstance(data, str):
raise TypeError('data must be str, not %s' %
data.__class__.__name__)
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
"""
Create a new directory at this given path.
"""
if self._closed:
self._raise_closed()
try:
self._accessor.mkdir(self, mode)
except FileNotFoundError:
if not parents or self.parent == self:
raise
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir():
raise
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
self._accessor.unlink(self)
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root) and
self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
"""Path subclass for non-Windows systems.
On a POSIX system, instantiating a Path should return this object.
"""
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
"""Path subclass for Windows systems.
On a Windows system, instantiating a Path should return this object.
"""
__slots__ = ()
def owner(self):
raise NotImplementedError("Path.owner() is unsupported on this system")
def group(self):
raise NotImplementedError("Path.group() is unsupported on this system")
| 48,835 | 1,454 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/keyword.py | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# load the output skeleton from the target, taking care to preserve its
# newline convention.
with open(optfile, newline='') as fp:
format = fp.readlines()
nl = format[0][len(format[0].strip()):] if format else '\n'
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "'," + nl)
lines.sort()
# insert the lines of keywords into the skeleton
try:
start = format.index("#--start keywords--" + nl) + 1
end = format.index("#--end keywords--" + nl)
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
with open(optfile, 'w', newline='') as fp:
fp.writelines(format)
if __name__ == "__main__":
main()
| 2,211 | 95 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/codeop.py | r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
| 5,994 | 169 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pyclbr.py | """Parse a Python module and describe its classes and methods.
Parse enough of a Python file to recognize imports and class and
method definitions, and to find out the superclasses of a class.
The interface consists of a single function:
readmodule_ex(module [, path])
where module is the name of a Python module, and path is an optional
list of directories where the module is to be searched. If present,
path is prepended to the system search path sys.path. The return
value is a dictionary. The keys of the dictionary are the names of
the classes defined in the module (including classes that are defined
via the from XXX import YYY construct). The values are class
instances of the class Class defined here. One special key/value pair
is present for packages: the key '__path__' has a list as its value
which contains the package search path.
A class is described by the class Class in this module. Instances
of this class have the following instance variables:
module -- the module name
name -- the name of the class
super -- a list of super classes (Class instances)
methods -- a dictionary of methods
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
The dictionary of methods uses the method names as keys and the line
numbers on which the method was defined as values.
If the name of a super class is not recognized, the corresponding
entry in the list of super classes is not a class instance but a
string giving the name of the super class. Since import statements
are recognized and imported modules are scanned as well, this
shouldn't happen often.
A function is described by the class Function in this module.
Instances of this class have the following instance variables:
module -- the module name
name -- the name of the class
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
"""
import io
import sys
import importlib.util
import tokenize
from token import NAME, DEDENT, OP
__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
_modules = {} # cache of modules we've seen
# each Python class is represented by an instance of this class
class Class:
'''Class to represent a Python class.'''
def __init__(self, module, name, super, file, lineno):
self.module = module
self.name = name
if super is None:
super = []
self.super = super
self.methods = {}
self.file = file
self.lineno = lineno
def _addmethod(self, name, lineno):
self.methods[name] = lineno
class Function:
'''Class to represent a top-level Python function'''
def __init__(self, module, name, file, lineno):
self.module = module
self.name = name
self.file = file
self.lineno = lineno
def readmodule(module, path=None):
'''Backwards compatible interface.
Call readmodule_ex() and then only keep Class objects from the
resulting dictionary.'''
res = {}
for key, value in _readmodule(module, path or []).items():
if isinstance(value, Class):
res[key] = value
return res
def readmodule_ex(module, path=None):
'''Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.
'''
return _readmodule(module, path or [])
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = {}
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i+1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
if not '__path__' in parent:
raise ImportError('No package named {}'.format(package))
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
search_path = path
else:
search_path = path + sys.path
# XXX This will change once issue19944 lands.
spec = importlib.util._find_spec_from_path(fullmodule, search_path)
_modules[fullmodule] = dict
# is module a package?
if spec.submodule_search_locations is not None:
dict['__path__'] = spec.submodule_search_locations
try:
source = spec.loader.get_source(fullmodule)
if source is None:
return dict
except (AttributeError, ImportError):
# not Python source, can't do anything with this module
return dict
fname = spec.loader.get_filename(fullmodule)
f = io.StringIO(source)
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = next(g)[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = next(g)[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = next(g)[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
except StopIteration:
pass
f.close()
return dict
def _getnamelist(g):
# Helper to get a comma-separated list of dotted names plus 'as'
# clauses. Return a list of pairs (name, name2) where name2 is
# the 'as' name, or None if there is no 'as' clause.
names = []
while True:
name, token = _getname(g)
if not name:
break
if token == 'as':
name2, token = _getname(g)
else:
name2 = None
names.append((name, name2))
while token != "," and "\n" not in token:
token = next(g)[1]
if token != ",":
break
return names
def _getname(g):
# Helper to get a dotted name, return a pair (name, token) where
# name is the dotted name, or None if there was no dotted name,
# and token is the next input token.
parts = []
tokentype, token = next(g)[0:2]
if tokentype != NAME and token != '*':
return (None, token)
parts.append(token)
while True:
tokentype, token = next(g)[0:2]
if token != '.':
break
tokentype, token = next(g)[0:2]
if tokentype != NAME:
break
parts.append(token)
return (".".join(parts), token)
def _main():
# Main program for testing.
import os
from operator import itemgetter
mod = sys.argv[1]
if os.path.exists(mod):
path = [os.path.dirname(mod)]
mod = os.path.basename(mod)
if mod.lower().endswith(".py"):
mod = mod[:-3]
else:
path = []
dict = readmodule_ex(mod, path)
objs = list(dict.values())
objs.sort(key=lambda a: getattr(a, 'lineno', 0))
for obj in objs:
if isinstance(obj, Class):
print("class", obj.name, obj.super, obj.lineno)
methods = sorted(obj.methods.items(), key=itemgetter(1))
for name, lineno in methods:
if name != "__path__":
print(" def", name, lineno)
elif isinstance(obj, Function):
print("def", obj.name, obj.lineno)
if __name__ == "__main__":
_main()
| 13,558 | 353 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pkgutil.py | """Utilities to support packages."""
from collections import namedtuple
from functools import singledispatch as simplegeneric
import importlib
import importlib.util
import importlib.machinery
import os
import os.path
import sys
from types import ModuleType
import warnings
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules', 'get_data',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
'ModuleInfo',
]
ModuleInfo = namedtuple('ModuleInfo', 'module_finder name ispkg')
ModuleInfo.__doc__ = 'A namedtuple with minimal info about a module.'
def _get_spec(finder, name):
"""Return the finder-specific module spec."""
# Works with legacy finders.
try:
find_spec = finder.find_spec
except AttributeError:
loader = finder.find_module(name)
if loader is None:
return None
return importlib.util.spec_from_loader(name, loader)
else:
return find_spec(name)
def read_code(stream):
# This helper is needed in order for the PEP 302 emulation to
# correctly handle compiled files
import marshal
magic = stream.read(4)
if magic != importlib.util.MAGIC_NUMBER:
return None
stream.read(8) # Skip timestamp and size
return marshal.load(stream)
def walk_packages(path=None, prefix='', onerror=None):
"""Yields ModuleInfo for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for info in iter_modules(path, prefix):
yield info
if info.ispkg:
try:
__import__(info.name)
except ImportError:
if onerror is not None:
onerror(info.name)
except Exception:
if onerror is not None:
onerror(info.name)
else:
raise
else:
path = getattr(sys.modules[info.name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
yield from walk_packages(path, info.name+'.', onerror)
def iter_modules(path=None, prefix=''):
"""Yields ModuleInfo for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield ModuleInfo(i, name, ispkg)
@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
# Implement a file walker for the normal importlib path hook
def _iter_file_finder_modules(importer, prefix=''):
if importer.path is None or not os.path.isdir(importer.path):
return
yielded = {}
import inspect
try:
filenames = os.listdir(importer.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(importer.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
try:
dircontents = os.listdir(path)
except OSError:
# ignore unreadable directories like import does
dircontents = []
for fn in dircontents:
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
iter_importer_modules.register(
importlib.machinery.FileFinder, _iter_file_finder_modules)
def _import_imp():
global imp
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
imp = importlib.import_module('imp')
class ImpImporter:
"""PEP 302 Finder that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 finder that searches that
directory. ImpImporter(None) produces a PEP 302 finder that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
global imp
warnings.warn("This emulation is deprecated, use 'importlib' instead",
DeprecationWarning)
_import_imp()
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
try:
filenames = os.listdir(self.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
try:
dircontents = os.listdir(path)
except OSError:
# ignore unreadable directories like import does
dircontents = []
for fn in dircontents:
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
warnings.warn("This emulation is deprecated, use 'importlib' instead",
DeprecationWarning)
_import_imp()
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
with open(pathname, "rb") as file:
return file.read()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'r')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_code(self.file)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
fullname = self._fix_name(fullname)
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
with open(self.filename[:-1], 'r') as f:
self.source = f.read()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
finder = ImpImporter(self.filename)
spec = _get_spec(finder, '__init__')
return spec.loader
def get_filename(self, fullname=None):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
if mod_type==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif mod_type in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = sorted(zipimport._zip_directory_cache[importer.archive])
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield prefix + fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a finder for the given path item
The returned finder is cached in sys.path_importer_cache
if it was newly created by a path hook.
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
sys.path_importer_cache.setdefault(path_item, importer)
break
except ImportError:
pass
else:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield finders for the given module name
If fullname contains a '.', the finders will be for the package
containing fullname, otherwise they will be all registered top level
finders (i.e. those on both sys.meta_path and sys.path_hooks).
If the named module is in a package, that package is imported as a side
effect of invoking this function.
If no module name is specified, all top level finders are produced.
"""
if fullname.startswith('.'):
msg = "Relative module name {!r} not supported".format(fullname)
raise ImportError(msg)
if '.' in fullname:
# Get the containing package's __path__
pkg_name = fullname.rpartition(".")[0]
pkg = importlib.import_module(pkg_name)
path = getattr(pkg, '__path__', None)
if path is None:
return
else:
yield from sys.meta_path
path = sys.path
for item in path:
yield get_importer(item)
def get_loader(module_or_name):
"""Get a "loader" object for module_or_name
Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if module_or_name is None:
return None
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
if getattr(module, '__spec__', None) is None:
return None
fullname = module.__name__
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a "loader" object for fullname
This is a backwards compatibility wrapper around
importlib.util.find_spec that converts most failures to ImportError
and only returns the loader rather than the full spec
"""
if fullname.startswith('.'):
msg = "Relative module name {!r} not supported".format(fullname)
raise ImportError(msg)
try:
spec = importlib.util.find_spec(fullname)
except (ImportError, AttributeError, TypeError, ValueError) as ex:
# This hack fixes an impedance mismatch between pkgutil and
# importlib, where the latter raises other errors for cases where
# pkgutil previously raised ImportError
msg = "Error while finding loader for {!r} ({}: {})"
raise ImportError(msg.format(fullname, type(ex), ex)) from ex
return spec.loader if spec is not None else None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
sname_pkg = name + ".pkg"
path = path[:] # Start with a copy of the existing path
parent_package, _, final_name = name.rpartition('.')
if parent_package:
try:
search_path = sys.modules[parent_package].__path__
except (KeyError, AttributeError):
# We can't do anything: find_loader() returns None when
# passed a dotted name.
return path
else:
search_path = sys.path
for dir in search_path:
if not isinstance(dir, str):
continue
finder = get_importer(dir)
if finder is not None:
portions = []
if hasattr(finder, 'find_spec'):
spec = finder.find_spec(final_name)
if spec is not None:
portions = spec.submodule_search_locations or []
# Is this finder PEP 420 compliant?
elif hasattr(finder, 'find_loader'):
_, portions = finder.find_loader(final_name)
for portion in portions:
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
if portion not in path:
path.append(portion)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except OSError as msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
with f:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
return path
def get_data(package, resource):
"""Get a resource from a package.
This is a wrapper round the PEP 302 loader get_data API. The package
argument should be the name of a package, in standard module format
(foo.bar). The resource argument should be in the form of a relative
filename, using '/' as the path separator. The parent directory name '..'
is not allowed, and nor is a rooted name (starting with a '/').
The function returns a binary string, which is the contents of the
specified resource.
For packages located in the filesystem, which have already been imported,
this is the rough equivalent of
d = os.path.dirname(sys.modules[package].__file__)
data = open(os.path.join(d, resource), 'rb').read()
If the package cannot be located or loaded, or it uses a PEP 302 loader
which does not support get_data(), then None is returned.
"""
spec = importlib.util.find_spec(package)
if spec is None:
return None
loader = spec.loader
if loader is None or not hasattr(loader, 'get_data'):
return None
# XXX needs test
mod = (sys.modules.get(package) or
importlib._bootstrap._load(spec))
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
| 21,315 | 635 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/reprlib.py | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__qualname__ = getattr(user_function, '__qualname__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
if not x:
return "array('%s')" % x.typecode
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
if not x:
return 'set()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, '{', '}', self.maxset)
def repr_frozenset(self, x, level):
if not x:
return 'frozenset()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset({', '})',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| 5,336 | 165 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/ipaddress.py | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
__version__ = '1.0'
import functools
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return address.to_bytes(4, 'big')
except OverflowError:
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return address.to_bytes(16, 'big')
except OverflowError:
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, (~number & (number-1)).bit_length())
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
(last_int - first_int + 1).bit_length() - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted, last.network_address <= net.network_address
# is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase:
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = "%r (len %d != %d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = ip_int.to_bytes(byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg) from None
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (str(self),)
@functools.total_ordering
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _BaseAddress):
return NotImplemented
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, int):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, int):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
@functools.total_ordering
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _BaseNetwork):
return NotImplemented
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if (other.network_address >= s1.network_address and
other.broadcast_address <= s1.broadcast_address):
yield s2
s1, s2 = s1.subnets()
elif (other.network_address >= s2.network_address and
other.broadcast_address <= s2.broadcast_address):
yield s1
s1, s2 = s2.subnets()
else:
# If we goth here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we goth here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4:
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2**IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset({255, 254, 252, 248, 240, 224, 192, 128, 0})
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, int):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return int.from_bytes(map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str)) from None
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(map(str, ip_int.to_bytes(4, 'big')))
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
try:
for x in mask:
if int(x) not in self._valid_mask_octets:
return False
except ValueError:
# Found something that isn't an integer or isn't valid
return False
for idx, y in enumerate(mask):
if idx > 0 and y > mask[idx - 1]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, int):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
self._ip = int.from_bytes(address, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
@functools.lru_cache()
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
@functools.lru_cache()
def is_global(self):
return self not in self._constants._public_network and not self.is_private
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, int)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (self.network < other.network or
self.network == other.network and address_less)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return hash((self._ip, self._prefixlen, int(self.network.network_address)))
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (int, bytes)):
addr = address
mask = self._max_prefixlen
# Constructing from a tuple (addr, [mask])
elif isinstance(address, tuple):
addr = address[0]
mask = address[1] if len(address) > 1 else self._max_prefixlen
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
else:
args = _split_optional_netmask(address)
addr = self._ip_int_from_string(args[0])
mask = args[1] if len(args) == 2 else self._max_prefixlen
self.network_address = IPv4Address(addr)
self.netmask, self._prefixlen = self._make_netmask(mask)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
@functools.lru_cache()
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants:
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6:
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2**IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, int):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str)) from None
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT-1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str)) from None
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x+4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = str(self.ip)
else:
ip_str = str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x+4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, int):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
self._ip = int.from_bytes(address, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
@functools.lru_cache()
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, int)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (self.network < other.network or
self.network == other.network and address_less)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return hash((self._ip, self._prefixlen, int(self.network.network_address)))
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (int, bytes)):
addr = address
mask = self._max_prefixlen
# Constructing from a tuple (addr, [mask])
elif isinstance(address, tuple):
addr = address[0]
mask = address[1] if len(address) > 1 else self._max_prefixlen
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
else:
args = _split_optional_netmask(address)
addr = self._ip_int_from_string(args[0])
mask = args[1] if len(args) == 2 else self._max_prefixlen
self.network_address = IPv6Address(addr)
self.netmask, self._prefixlen = self._make_netmask(mask)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants:
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| 74,565 | 2,267 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/os.py | r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix or nt, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory (always '.')
- os.pardir is a string representing the parent directory (always '..')
- os.sep is the (or a most common) pathname separator ('/' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
import abc
import sys
import cosmo
import errno
import stat as st
_names = sys.builtin_module_names
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep", "_exit"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
return list(getattr(module, "__all__", (n for n in dir(module) if n[0] != '_')))
name = 'posix'
linesep = '\n'
realname = "nt" if cosmo.kernel == "nt" else "posix"
from posix import *
from posix import _exit
__all__.append('_exit')
import posixpath as path
from posix import _have_functions
import posix
__all__.extend(_get_exports_list(posix))
del posix
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if True or _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
if _exists("chown"):
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
if _exists("chown"):
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
top = fspath(top)
dirs = []
nondirs = []
walk_dirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
walk_dirs.append(entry.path)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
yield from walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except OSError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except OSError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
# list() from dict object is an atomic operation
keys = list(self._data)
for key in keys:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
def fsencode(filename):
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
encoding with 'surrogateescape' error handler, return bytes unchanged.
On Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, str):
return filename.encode(encoding, errors)
else:
return filename
def fsdecode(filename):
"""Decode filename (an os.PathLike, bytes, or str) from the filesystem
encoding with 'surrogateescape' error handler, return str unchanged. On
Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
return filename
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
if not isinstance(args, (tuple, list)):
raise TypeError('argv must be a tuple or a list')
if not args or not args[0]:
raise ValueError('argv first element cannot be empty')
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] isn't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
try:
import subprocess, io
except ImportError:
raise ImportError('cosmopolitan os.popen() requires manually yoinking subprocess')
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
# For testing purposes, make sure the function is available when the C
# implementation exists.
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
# If there is no C implementation, make the pure Python version the
# implementation as transparently as possible.
if not _exists('fspath'):
fspath = _fspath
fspath.__name__ = "fspath"
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
if __name__ == 'PYOBJ.COM':
CLD_CONTINUED = 0
CLD_DUMPED = 0
CLD_EXITED = 0
CLD_TRAPPED = 0
DirEntry = 0
EX_CANTCREAT = 0
EX_CONFIG = 0
EX_DATAERR = 0
EX_IOERR = 0
EX_NOHOST = 0
EX_NOINPUT = 0
EX_NOPERM = 0
EX_NOUSER = 0
EX_OK = 0
EX_OSERR = 0
EX_OSFILE = 0
EX_PROTOCOL = 0
EX_SOFTWARE = 0
EX_TEMPFAIL = 0
EX_UNAVAILABLE = 0
EX_USAGE = 0
F_LOCK = 0
F_OK = 0
F_TEST = 0
F_TLOCK = 0
F_ULOCK = 0
GRND_NONBLOCK = 0
GRND_RANDOM = 0
HAVE_FACCESSAT = 0
HAVE_FCHMODAT = 0
HAVE_FCHOWNAT = 0
HAVE_FSTATAT = 0
HAVE_FSTATAT = 0
HAVE_LCHFLAGS = 0
HAVE_LCHMOD = 0
HAVE_LCHOWN = 0
HAVE_LINKAT = 0
HAVE_LSTAT = 0
HAVE_LUTIMES = 0
HAVE_UTIMENSAT = 0
MS_WINDOWS = 0
O_ACCMODE = 0
O_APPEND = 0
O_ASYNC = 0
O_CLOEXEC = 0
O_CREAT = 0
O_DIRECT = 0
O_DIRECTORY = 0
O_DSYNC = 0
O_EXCL = 0
O_EXEC = 0
O_EXLOCK = 0
O_LARGEFILE = 0
O_NDELAY = 0
O_NOATIME = 0
O_NOCTTY = 0
O_NOFOLLOW = 0
O_NOFOLLOW = 0
O_NOFOLLOW_ANY = 0
O_NONBLOCK = 0
O_PATH = 0
O_RANDOM = 0
O_RDONLY = 0
O_RDWR = 0
O_RSYNC = 0
O_SEQUENTIAL = 0
O_SHLOCK = 0
O_SYNC = 0
O_TMPFILE = 0
O_TRUNC = 0
O_TTY_INIT = 0
O_WRONLY = 0
POSIX_FADV_DONTNEED = 0
POSIX_FADV_NOREUSE = 0
POSIX_FADV_NORMAL = 0
POSIX_FADV_RANDOM = 0
POSIX_FADV_SEQUENTIAL = 0
POSIX_FADV_WILLNEED = 0
PRIO_PGRP = 0
PRIO_PROCESS = 0
PRIO_USER = 0
RTLD_GLOBAL = 0
RTLD_LAZY = 0
RTLD_LOCAL = 0
RTLD_NOW = 0
R_OK = 0
SCHED_BATCH = 0
SCHED_FIFO = 0
SCHED_IDLE = 0
SCHED_OTHER = 0
SCHED_RESET_ON_FORK = 0
SCHED_RR = 0
WCONTINUED = 0
WCOREDUMP = 0
WEXITED = 0
WEXITSTATUS = 0
WIFCONTINUED = 0
WIFEXITED = 0
WIFSIGNALED = 0
WIFSTOPPED = 0
WNOHANG = 0
WNOWAIT = 0
WSTOPPED = 0
WSTOPSIG = 0
WTERMSIG = 0
WUNTRACED = 0
W_OK = 0
X_OK = 0
_exit = 0
_have_functions = 0
abort = 0
access = 0
chdir = 0
chmod = 0
chown = 0
chroot = 0
close = 0
closerange = 0
cpu_count = 0
device_encoding = 0
dup = 0
dup2 = 0
environ = 0
error = 0
execv = 0
execve = 0
fchdir = 0
fchmod = 0
fchown = 0
fdatasync = 0
fork = 0
fpathconf = 0
fspath = 0
fstat = 0
fsync = 0
ftruncate = 0
get_blocking = 0
get_inheritable = 0
get_terminal_size = 0
getcwd = 0
getcwdb = 0
geteuid = 0
getgrouplist = 0
getgroups = 0
getlogin = 0
getpgid = 0
getpgrp = 0
getpid = 0
getpriority = 0
getsid = 0
getuid = 0
initgroups = 0
isatty = 0
kill = 0
killpg = 0
lchown = 0
link = 0
listdir = 0
lseek = 0
lstat = 0
major = 0
makedev = 0
minor = 0
mkdir = 0
mkfifo = 0
mknod = 0
nice = 0
open = 0
openpty = 0
pathconf = 0
pathconf_names = 0
pipe = 0
pipe2 = 0
posix_fadvise = 0
pread = 0
putenv = 0
pwrite = 0
read = 0
readlink = 0
readv = 0
remove = 0
rename = 0
replace = 0
rmdir = 0
scandir = 0
sched_yield = 0
sendfile = 0
set_blocking = 0
set_inheritable = 0
setegid = 0
seteuid = 0
setgid = 0
setpgid = 0
setpriority = 0
setregid = 0
setresgid = 0
setresuid = 0
setreuid = 0
setsid = 0
setuid = 0
stat = 0
stat_float_times = 0
stat_result = 0
statvfs_result = 0
strerror = 0
symlink = 0
sync = 0
sysconf = 0
sysconf_names = 0
system = 0
tcgetpgrp = 0
tcsetpgrp = 0
terminal_size = 0
times = 0
times_result = 0
truncate = 0
umask = 0
uname = 0
uname_result = 0
unlink = 0
unsetenv = 0
urandom = 0
utime = 0
wait = 0
wait3 = 0
wait4 = 0
waitpid = 0
write = 0
writev = 0
| 41,038 | 1,271 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/fnmatch.py | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return r'(?s:%s)\Z' % res
| 3,166 | 110 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/timeit.py | #! /usr/bin/env python3
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-p] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 3)
-s/--setup S: statement to be executed once initially (default 'pass').
Execution time of this setup statement is NOT timed.
-p/--process: use time.process_time() (default is time.perf_counter())
-t/--time: use time.time() (deprecated)
-c/--clock: use time.clock() (deprecated)
-v/--verbose: print raw timing results; repeat for more digits precision
-u/--unit: set the output time unit (usec, msec, or sec)
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
Note: there is a certain baseline overhead associated with executing a
pass statement. It differs between versions. The code here doesn't try
to hide it, but you should be aware of it. The baseline overhead can be
measured by invoking the program without arguments.
Classes:
Timer
Functions:
timeit(string, string) -> float
repeat(string, string) -> list
default_timer() -> float
"""
import gc
import sys
import time
import itertools
__all__ = ["Timer", "timeit", "repeat", "default_timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 3
default_timer = time.perf_counter
_globals = globals
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer{init}):
{setup}
_t0 = _timer()
for _i in _it:
{stmt}
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string). If 'globals' is specified, the code will be
executed within that namespace (as opposed to inside timeit's
namespace).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer,
globals=None):
"""Constructor. See class doc string."""
self.timer = timer
local_ns = {}
global_ns = _globals() if globals is None else globals
init = ''
if isinstance(setup, str):
# Check that the code can be compiled outside a function
compile(setup, dummy_src_name, "exec")
stmtprefix = setup + '\n'
setup = reindent(setup, 4)
elif callable(setup):
local_ns['_setup'] = setup
init += ', _setup=_setup'
stmtprefix = ''
setup = '_setup()'
else:
raise ValueError("setup is neither a string nor callable")
if isinstance(stmt, str):
# Check that the code can be compiled outside a function
compile(stmtprefix + stmt, dummy_src_name, "exec")
stmt = reindent(stmt, 8)
elif callable(stmt):
local_ns['_stmt'] = stmt
init += ', _stmt=_stmt'
stmt = '_stmt()'
else:
raise ValueError("stmt is neither a string nor callable")
src = template.format(stmt=stmt, setup=setup, init=init)
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec(code, global_ns, local_ns)
self.inner = local_ns["inner"]
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
it = itertools.repeat(None, number)
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 3;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def autorange(self, callback=None):
"""Return the number of loops and time taken so that total time >= 0.2.
Calls the timeit method with *number* set to successive powers of
ten (10, 100, 1000, ...) up to a maximum of one billion, until
the time taken is at least 0.2 second, or the maximum is reached.
Returns ``(number, time_taken)``.
If *callback* is given and is not None, it will be called after
each trial with two arguments: ``callback(number, time_taken)``.
"""
for i in range(1, 10):
number = 10**i
time_taken = self.timeit(number)
if callback:
callback(number, time_taken)
if time_taken >= 0.2:
break
return (number, time_taken)
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number, globals=None):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer, globals).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number, globals=None):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer, globals).repeat(repeat, number)
def main(args=None, *, _wrap_timer=None):
"""Main program, used when run as a script.
The optional 'args' argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
'_wrap_timer' is an internal interface used for unit testing. If it
is not None, it must be a callable that accepts a timer function
and returns another timer function (used for unit testing).
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:u:s:r:tcpvh",
["number=", "setup=", "repeat=",
"time", "clock", "process",
"verbose", "unit=", "help"])
except getopt.error as err:
print(err)
print("use -h/--help for command line help")
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
time_unit = None
units = {"usec": 1, "msec": 1e3, "sec": 1e6}
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-u", "--unit"):
if a in units:
time_unit = a
else:
print("Unrecognized unit. Please select usec, msec, or sec.",
file=sys.stderr)
return 2
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-t", "--time"):
timer = time.time
if o in ("-c", "--clock"):
timer = time.clock
if o in ("-p", "--process"):
timer = time.process_time
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print(__doc__, end=' ')
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
if _wrap_timer is not None:
timer = _wrap_timer(timer)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
callback = None
if verbose:
def callback(number, time_taken):
msg = "{num} loops -> {secs:.{prec}g} secs"
print(msg.format(num=number, secs=time_taken, prec=precision))
try:
number, _ = t.autorange(callback)
except:
t.print_exc()
return 1
try:
r = t.repeat(repeat, number)
except:
t.print_exc()
return 1
best = min(r)
if verbose:
print("raw times:", " ".join(["%.*g" % (precision, x) for x in r]))
print("%d loops," % number, end=' ')
usec = best * 1e6 / number
if time_unit is not None:
scale = units[time_unit]
else:
scales = [(scale, unit) for unit, scale in units.items()]
scales.sort(reverse=True)
for scale, time_unit in scales:
if usec >= scale:
break
print("best of %d: %.*g %s per loop" % (repeat, precision,
usec/scale, time_unit))
best = min(r)
usec = best * 1e6 / number
worst = max(r)
if worst >= best * 4:
usec = worst * 1e6 / number
import warnings
warnings.warn_explicit(
"The test results are likely unreliable. The worst\n"
"time (%.*g %s) was more than four times slower than the best time." %
(precision, usec/scale, time_unit),
UserWarning, '', 0)
return None
if __name__ == "__main__":
sys.exit(main())
| 13,334 | 363 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/smtplib.py | #! /usr/bin/env python3
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print(s.help())
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
[email protected].
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <[email protected]>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <[email protected]>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <[email protected]>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <[email protected]>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <[email protected]>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import io
import re
import email.utils
import email.message
import email.generator
import base64
import hmac
import copy
import datetime
import sys
from email.base64mime import body_encode as encode_base64
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
bCRLF = b"\r\n"
_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(OSError):
"""Base class for all exceptions raised by this module."""
class SMTPNotSupportedError(SMTPException):
"""The command or option is not supported by the SMTP server.
This exception is raised when an attempt is made to run a command or a
command with an option which is not supported by the server.
"""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addrstring):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything email.utils.parseaddr can handle.
"""
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, use it as is and hope for the best.
if addrstring.strip().startswith('<'):
return addrstring
return "<%s>" % addrstring
return "<%s>" % addr
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
# Legacy method kept for backward compatibility.
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
def _quote_periods(bindata):
return re.sub(br'(?m)^\.', b'..', bindata)
def _fix_eols(data):
return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. If a host is specified the
connect method is called, and if it returns anything other than a
success code an SMTPConnectError is raised. If specified,
`local_hostname` is used as the FQDN of the local host in the HELO/EHLO
command. Otherwise, the local hostname is found using
socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host,
port) for the socket to bind to as its source address before
connecting. If the host is '' and port is 0, the OS default behavior
will be used.
"""
self._host = host
self.timeout = timeout
self.esmtp_features = {}
self.command_encoding = 'ascii'
self.source_address = source_address
if host:
(code, msg) = self.connect(host, port)
if code != 220:
self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def __enter__(self):
return self
def __exit__(self, *args):
try:
code, message = self.docmd("QUIT")
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
self.close()
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _print_debug(self, *args):
if self.debuglevel > 1:
print(datetime.datetime.now().time(), *args, file=sys.stderr)
else:
print(*args, file=sys.stderr)
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout,
self.source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if source_address:
self.source_address = source_address
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return (code, msg)
def send(self, s):
"""Send `s' to the server."""
if self.debuglevel > 0:
self._print_debug('send:', repr(s))
if hasattr(self, 'sock') and self.sock:
if isinstance(s, str):
# send is used by the 'data' command, where command_encoding
# should not be used, but 'data' needs to convert the string to
# binary itself anyway, so that's not a problem.
s = s.encode(self.command_encoding)
try:
self.sock.sendall(s)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if not line:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != b"-":
break
errmsg = b"\n".join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
resp = self.ehlo_resp.decode("latin-1").split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
self.command_encoding = 'ascii'
return self.docmd("rset")
def _rset(self):
"""Internal 'rset' command which ignores any SMTPServerDisconnected error.
Used internally in the library, since the server disconnected error
should appear to the application when the *next* command is issued, if
we are doing an internal "safety" reset.
"""
try:
self.rset()
except SMTPServerDisconnected:
pass
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=()):
"""SMTP 'mail' command -- begins mail xfer session.
This method may raise the following exceptions:
SMTPNotSupportedError The options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
"""
optionlist = ''
if options and self.does_esmtp:
if any(x.lower()=='smtputf8' for x in options):
if self.has_extn('smtputf8'):
self.command_encoding = 'utf-8'
else:
raise SMTPNotSupportedError(
'SMTPUTF8 not supported by server')
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=()):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent. If msg
is a string, lone '\\r' and '\\n' characters are converted to
'\\r\\n' characters. If msg is bytes, it is transmitted as is.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
q = _quote_periods(msg)
if q[-2:] != bCRLF:
q = q + bCRLF
q = q + b"." + bCRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, msg))
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def auth(self, mechanism, authobject, *, initial_response_ok=True):
"""Authentication command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - the valid values are those listed in the 'auth'
element of 'esmtp_features'.
'authobject' must be a callable object taking a single argument:
data = authobject(challenge)
It will be called to process the server's challenge response; the
challenge argument it is passed will be a bytes. It should return
an ASCII string that will be base64 encoded and sent to the server.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
"""
# RFC 4954 allows auth methods to provide an initial response. Not all
# methods support it. By definition, if they return something other
# than None when challenge is None, then they do. See issue #15014.
mechanism = mechanism.upper()
initial_response = (authobject() if initial_response_ok else None)
if initial_response is not None:
response = encode_base64(initial_response.encode('ascii'), eol='')
(code, resp) = self.docmd("AUTH", mechanism + " " + response)
else:
(code, resp) = self.docmd("AUTH", mechanism)
# If server responds with a challenge, send the response.
if code == 334:
challenge = base64.decodebytes(resp)
response = encode_base64(
authobject(challenge).encode('ascii'), eol='')
(code, resp) = self.docmd(response)
if code in (235, 503):
return (code, resp)
raise SMTPAuthenticationError(code, resp)
def auth_cram_md5(self, challenge=None):
""" Authobject to use with CRAM-MD5 authentication. Requires self.user
and self.password to be set."""
# CRAM-MD5 does not support initial-response.
if challenge is None:
return None
return self.user + " " + hmac.HMAC(
self.password.encode('ascii'), challenge, 'md5').hexdigest()
def auth_plain(self, challenge=None):
""" Authobject to use with PLAIN authentication. Requires self.user and
self.password to be set."""
return "\0%s\0%s" % (self.user, self.password)
def auth_login(self, challenge=None):
""" Authobject to use with LOGIN authentication. Requires self.user and
self.password to be set."""
if challenge is None:
return self.user
else:
return self.password
def login(self, user, password, *, initial_response_ok=True):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPNotSupportedError The AUTH command is not supported by the
server.
SMTPException No suitable authentication method was
found.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPNotSupportedError(
"SMTP AUTH extension not supported by server.")
# Authentication methods the server claims to support
advertised_authlist = self.esmtp_features["auth"].split()
# Authentication methods we can handle in our preferred order:
preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN']
# We try the supported authentications in our preferred order, if
# the server supports them.
authlist = [auth for auth in preferred_auths
if auth in advertised_authlist]
if not authlist:
raise SMTPException("No suitable authentication method found.")
# Some servers advertise authentication methods they don't really
# support, so if authentication fails, we continue until we've tried
# all methods.
self.user, self.password = user, password
for authmethod in authlist:
method_name = 'auth_' + authmethod.lower().replace('-', '_')
try:
(code, resp) = self.auth(
authmethod, getattr(self, method_name),
initial_response_ok=initial_response_ok)
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
if code in (235, 503):
return (code, resp)
except SMTPAuthenticationError as e:
last_exception = e
# We could not login successfully. Return result of last attempt.
raise last_exception
def starttls(self, keyfile=None, certfile=None, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPNotSupportedError(
"STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom context instead", DeprecationWarning, 2)
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.sock = context.wrap_socket(self.sock,
server_hostname=self._host)
self.file = None
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
else:
# RFC 3207:
# 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=(),
rcpt_options=()):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
msg may be a string containing characters in the ASCII range, or a byte
string. A string is encoded to bytes using the ascii codec, and lone
\\r and \\n characters are converted to \\r\\n characters.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["[email protected]","[email protected]","[email protected]","[email protected]"]
>>> msg = '''\\
... From: [email protected]
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("[email protected]",tolist,msg)
{ "[email protected]" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if code == 421:
self.close()
raise SMTPRecipientsRefused(senderrs)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self._rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPDataError(code, resp)
#if we goth here then somebody got our mail
return senderrs
def send_message(self, msg, from_addr=None, to_addrs=None,
mail_options=(), rcpt_options=()):
"""Converts message to a bytestring and passes it to sendmail.
The arguments are as for sendmail, except that msg is an
email.message.Message object. If from_addr is None or to_addrs is
None, these arguments are taken from the headers of the Message as
described in RFC 2822 (a ValueError is raised if there is more than
one set of 'Resent-' headers). Regardless of the values of from_addr and
to_addr, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object won't be transmitted. The Message
object is then serialized using email.generator.BytesGenerator and
sendmail is called to transmit the message. If the sender or any of
the recipient addresses contain non-ASCII and the server advertises the
SMTPUTF8 capability, the policy is cloned with utf8 set to True for the
serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send.
If the server does not support SMTPUTF8, an SMTPNotSupported error is
raised. Otherwise the generator is called without modifying the
policy.
"""
# 'Resent-Date' is a mandatory field if the Message is resent (RFC 2822
# Section 3.6.6). In such a case, we use the 'Resent-*' fields. However,
# if there is more than one 'Resent-' block there's no way to
# unambiguously determine which one is the most recent in all cases,
# so rather than guess we raise a ValueError in that case.
#
# TODO implement heuristics to guess the correct Resent-* block with an
# option allowing the user to enable the heuristics. (It should be
# possible to guess correctly almost all of the time.)
self.ehlo_or_helo_if_needed()
resent = msg.get_all('Resent-Date')
if resent is None:
header_prefix = ''
elif len(resent) == 1:
header_prefix = 'Resent-'
else:
raise ValueError("message has more than one 'Resent-' header block")
if from_addr is None:
# Prefer the sender field per RFC 2822:3.6.2.
from_addr = (msg[header_prefix + 'Sender']
if (header_prefix + 'Sender') in msg
else msg[header_prefix + 'From'])
from_addr = email.utils.getaddresses([from_addr])[0][1]
if to_addrs is None:
addr_fields = [f for f in (msg[header_prefix + 'To'],
msg[header_prefix + 'Bcc'],
msg[header_prefix + 'Cc'])
if f is not None]
to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)]
# Make a local copy so we can delete the bcc headers.
msg_copy = copy.copy(msg)
del msg_copy['Bcc']
del msg_copy['Resent-Bcc']
international = False
try:
''.join([from_addr, *to_addrs]).encode('ascii')
except UnicodeEncodeError:
if not self.has_extn('smtputf8'):
raise SMTPNotSupportedError(
"One or more source or delivery addresses require"
" internationalized email support, but the server"
" does not advertise the required SMTPUTF8 capability")
international = True
with io.BytesIO() as bytesmsg:
if international:
g = email.generator.BytesGenerator(
bytesmsg, policy=msg.policy.clone(utf8=True))
mail_options = (*mail_options, 'SMTPUTF8', 'BODY=8BITMIME')
else:
g = email.generator.BytesGenerator(bytesmsg)
g.flatten(msg_copy, linesep='\r\n')
flatmsg = bytesmsg.getvalue()
return self.sendmail(from_addr, to_addrs, flatmsg, mail_options,
rcpt_options)
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
# A new EHLO is required after reconnecting with connect()
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL
encrypted socket (to use this class you need a socket module that was
compiled with SSL support). If host is not specified, '' (the local
host) is used. If port is omitted, the standard SMTP-over-SSL port
(465) is used. local_hostname and source_address have the same meaning
as they do in the SMTP class. keyfile and certfile are also optional -
they can contain a PEM formatted private key and certificate chain file
for the SSL connection. context also optional, can contain a
SSLContext, and is an alternative to keyfile and certfile; If it is
specified both keyfile and certfile must be None.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
new_socket = socket.create_connection((host, port), timeout,
self.source_address)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for
LMTP, so our connect() method must support that as well as a regular
host:port server. local_hostname and source_address have the same
meaning as they do in the SMTP class. To specify a Unix socket,
you must use an absolute path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
source_address=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname=local_hostname,
source_address=source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port, source_address=source_address)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.file = None
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self._print_debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', msg)
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
def prompt(prompt):
sys.stdout.write(prompt + ": ")
sys.stdout.flush()
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print("Enter message, end with ^D:")
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print("Message length is %d" % len(msg))
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| 44,211 | 1,117 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/mimetypes.py | """Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=True) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=True) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib.parse
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = [
"knownfiles", "inited", "MimeTypes",
"guess_type", "guess_all_extensions", "guess_extension",
"add_type", "init", "read_mime_types",
"suffix_map", "encodings_map", "types_map", "common_types"
]
knownfiles = [
"/zip/.python/mime.types",
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.parse.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename, encoding='utf-8') as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
else:
if '\0' not in ctype:
yield ctype
i += 1
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr:
for subkeyname in enum_types(hkcr):
try:
with _winreg.OpenKey(hkcr, subkeyname) as subkey:
# Only check file extensions
if not subkeyname.startswith("."):
continue
# raises EnvironmentError if no 'Content Type' value
mimetype, datatype = _winreg.QueryValueEx(
subkey, 'Content Type')
if datatype != _winreg.REG_SZ:
continue
self.add_type(mimetype, subkeyname, strict)
except EnvironmentError:
continue
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except OSError:
return None
with f:
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.svgz': '.svg.gz',
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.iana.org/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.csv' : 'text/csv',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ico' : 'image/vnd.microsoft.icon',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/javascript',
'.json' : 'application/json',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.m3u' : 'application/vnd.apple.mpegurl',
'.m3u8' : 'application/vnd.apple.mpegurl',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mjs' : 'application/javascript',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.svg' : 'image/svg+xml',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.webm' : 'video/webm',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print(USAGE)
if msg: print(msg)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print(guess)
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print('type:', guess, 'encoding:', encoding)
| 21,073 | 601 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/configparser.py | """Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True, default_section='DEFAULT',
interpolation=<unset>, converters=<unset>):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
When `default_section' is given, the name of the special section is
named accordingly. By default it is called ``"DEFAULT"`` but this can
be customized to point to any other valid section name. Its current
value can be retrieved using the ``parser_instance.default_section``
attribute and may be modified at runtime.
When `interpolation` is given, it should be an Interpolation subclass
instance. It will be used as the handler for option value
pre-processing when using getters. RawConfigParser object s don't do
any sort of interpolation, whereas ConfigParser uses an instance of
BasicInterpolation. The library also provides a ``zc.buildbot``
inspired ExtendedInterpolation implementation.
When `converters` is given, it should be a dictionary where each key
represents the name of a type converter and each value is a callable
implementing the conversion from string to the desired datatype. Every
converter gets its corresponding get*() method on the parser object and
section proxies.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the iterable of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import os
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationMissingOptionError", "InterpolationSyntaxError",
"ParsingError", "MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"Interpolation", "BasicInterpolation", "ExtendedInterpolation",
"LegacyInterpolation", "SectionProxy", "ConverterMapping",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution: option {!r} in section {!r} contains "
"an interpolation key {!r} which is not a valid option name. "
"Raw value: {!r}".format(option, section, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Recursion limit exceeded in value substitution: option {!r} "
"in section {!r} contains an interpolation key which "
"cannot be substituted in {} steps. Raw value: {!r}"
"".format(option, section, MAX_INTERPOLATION_DEPTH,
rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %r' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %r, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
rawval = parser.get(section, option, raw=True, fallback=rest)
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rawval)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rawval, var) from None
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
rawval = parser.get(section, option, raw=True, fallback=rest)
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rawval)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rawval, ":".join(path)) from None
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0]) from None
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET, converters=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._converters = ConverterMapping(self)
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
if converters is not _UNSET:
self._converters.update(converters)
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section) from None
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or an iterable of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify an iterable of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the iterable will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, (str, os.PathLike)):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def _get_conv(self, section, option, conv, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
try:
return self._get(section, conv, option, raw=raw, vars=vars,
**kwargs)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
return fallback
# getint, getfloat and getboolean provided directly for backwards compat
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
return self._get_conv(section, option, int, raw=raw, vars=vars,
fallback=fallback, **kwargs)
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
return self._get_conv(section, option, float, raw=raw, vars=vars,
fallback=fallback, **kwargs)
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET, **kwargs):
return self._get_conv(section, option, self._convert_to_boolean,
raw=raw, vars=vars, fallback=fallback, **kwargs)
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section) from None
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section) from None
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
self._join_multiline_values()
# if any parsing errors occurred, raise an exception
if e:
raise e
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
@property
def converters(self):
return self._converters
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
for conv in parser.converters:
key = 'get' + conv
getter = functools.partial(self.get, _impl=getattr(parser, key))
setattr(self, key, getter)
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
def get(self, option, fallback=None, *, raw=False, vars=None,
_impl=None, **kwargs):
"""Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
"""
# If `_impl` is provided, it should be a getter method on the parser
# object that provides the desired type conversion.
if not _impl:
_impl = self._parser.get
return _impl(self._name, option, raw=raw, vars=vars,
fallback=fallback, **kwargs)
class ConverterMapping(MutableMapping):
"""Enables reuse of get*() methods between the parser and section proxies.
If a parser class implements a getter directly, the value for the given
key will be ``None``. The presence of the converter name here enables
section proxies to find and use the implementation on the parser class.
"""
GETTERCRE = re.compile(r"^get(?P<name>.+)$")
def __init__(self, parser):
self._parser = parser
self._data = {}
for getter in dir(self._parser):
m = self.GETTERCRE.match(getter)
if not m or not callable(getattr(self._parser, getter)):
continue
self._data[m.group('name')] = None # See class docstring.
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
try:
k = 'get' + key
except TypeError:
raise ValueError('Incompatible key: {} (type: {})'
''.format(key, type(key)))
if k == 'get':
raise ValueError('Incompatible key: cannot use "" as a name')
self._data[key] = value
func = functools.partial(self._parser._get_conv, conv=value)
func.converter = value
setattr(self._parser, k, func)
for proxy in self._parser.values():
getter = functools.partial(proxy.get, _impl=func)
setattr(proxy, k, getter)
def __delitem__(self, key):
try:
k = 'get' + (key or None)
except TypeError:
raise KeyError(key)
del self._data[key]
for inst in itertools.chain((self._parser,), self._parser.values()):
try:
delattr(inst, k)
except AttributeError:
# don't raise since the entry was present in _data, silently
# clean up
continue
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
| 53,592 | 1,343 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/tabnanny.py | #! /usr/bin/env python3
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error as msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by process_tokens() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except OSError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
finally:
f.close()
if verbose:
print("%r: Clean bill of health." % (file,))
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i//tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i//ts + 1)*ts*count[i] =
# trailing + ts * sum (i//ts + 1)*count[i] =
# trailing + ts * sum i//ts*count[i] + count[i] =
# trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i//ts*count[i]) + num_tabs]
# and note that i//ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i//tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = (str(tup[0]) for tup in w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
| 11,403 | 333 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/ssl.py | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLS
PROTOCOL_TLS_CLIENT
PROTOCOL_TLS_SERVER
PROTOCOL_TLSv1
PROTOCOL_TLSv1_1
PROTOCOL_TLSv1_2
The following constants identify various SSL alert message descriptions as per
http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6
ALERT_DESCRIPTION_CLOSE_NOTIFY
ALERT_DESCRIPTION_UNEXPECTED_MESSAGE
ALERT_DESCRIPTION_BAD_RECORD_MAC
ALERT_DESCRIPTION_RECORD_OVERFLOW
ALERT_DESCRIPTION_DECOMPRESSION_FAILURE
ALERT_DESCRIPTION_HANDSHAKE_FAILURE
ALERT_DESCRIPTION_BAD_CERTIFICATE
ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE
ALERT_DESCRIPTION_CERTIFICATE_REVOKED
ALERT_DESCRIPTION_CERTIFICATE_EXPIRED
ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN
ALERT_DESCRIPTION_ILLEGAL_PARAMETER
ALERT_DESCRIPTION_UNKNOWN_CA
ALERT_DESCRIPTION_ACCESS_DENIED
ALERT_DESCRIPTION_DECODE_ERROR
ALERT_DESCRIPTION_DECRYPT_ERROR
ALERT_DESCRIPTION_PROTOCOL_VERSION
ALERT_DESCRIPTION_INSUFFICIENT_SECURITY
ALERT_DESCRIPTION_INTERNAL_ERROR
ALERT_DESCRIPTION_USER_CANCELLED
ALERT_DESCRIPTION_NO_RENEGOTIATION
ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION
ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
ALERT_DESCRIPTION_UNRECOGNIZED_NAME
ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE
ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE
ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY
"""
import ipaddress
import textwrap
import re
import sys
import os
from collections import namedtuple
from enum import Enum as _Enum, IntEnum as _IntEnum, IntFlag as _IntFlag
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import _SSLContext, MemoryBIO, SSLSession
from _ssl import (
SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError,
SSLSyscallError, SSLEOFError,
)
from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj
from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes
try:
from _ssl import RAND_egd
except ImportError:
# LibreSSL does not provide RAND_egd
pass
from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN, HAS_TLSv1_3
from _ssl import _OPENSSL_API_VERSION
_IntEnum._convert(
'_SSLMethod', __name__,
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=_ssl)
_IntFlag._convert(
'Options', __name__,
lambda name: name.startswith('OP_'),
source=_ssl)
_IntEnum._convert(
'AlertDescription', __name__,
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=_ssl)
_IntEnum._convert(
'SSLErrorNumber', __name__,
lambda name: name.startswith('SSL_ERROR_'),
source=_ssl)
_IntFlag._convert(
'VerifyFlags', __name__,
lambda name: name.startswith('VERIFY_'),
source=_ssl)
_IntEnum._convert(
'VerifyMode', __name__,
lambda name: name.startswith('CERT_'),
source=_ssl)
PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_SSLv23 = _SSLMethod.PROTOCOL_TLS
_PROTOCOL_NAMES = {value: name for name, value in _SSLMethod.__members__.items()}
_SSLv2_IF_EXISTS = getattr(_SSLMethod, 'PROTOCOL_SSLv2', None)
if sys.platform == "win32":
from _ssl import enum_certificates, enum_crls
from socket import socket, AF_INET, SOCK_STREAM, create_connection
from socket import SOL_SOCKET, SO_TYPE
import base64 # for DER-to-PEM translation
import errno
import warnings
socket_error = OSError # keep that public name in module namespace
if _ssl.HAS_TLS_UNIQUE:
CHANNEL_BINDING_TYPES = ['tls-unique']
else:
CHANNEL_BINDING_TYPES = []
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
# Enable a better set of ciphers by default
# This list has been explicitly chosen to:
# * TLS 1.3 ChaCha20 and AES-GCM cipher suites
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer AEAD over CBC for better performance and security
# * Prefer AES-GCM over ChaCha20 because most platforms have AES-NI
# (ChaCha20 needs OpenSSL 1.1.0 or patched 1.0.2)
# * Prefer any AES-GCM and ChaCha20 over any AES-CBC for better
# performance and security
# * Then Use HIGH cipher suites as a fallback
# * Disable NULL authentication, NULL encryption, 3DES and MD5 MACs
# for security reasons
_DEFAULT_CIPHERS = (
'TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:'
'TLS13-AES-128-GCM-SHA256:'
'ECDH+AESGCM:ECDH+CHACHA20:DH+AESGCM:DH+CHACHA20:ECDH+AES256:DH+AES256:'
'ECDH+AES128:DH+AES:ECDH+HIGH:DH+HIGH:RSA+AESGCM:RSA+AES:RSA+HIGH:'
'!aNULL:!eNULL:!MD5:!3DES'
)
# Restricted and more secure ciphers for the server side
# This list has been explicitly chosen to:
# * TLS 1.3 ChaCha20 and AES-GCM cipher suites
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer AEAD over CBC for better performance and security
# * Prefer AES-GCM over ChaCha20 because most platforms have AES-NI
# * Prefer any AES-GCM and ChaCha20 over any AES-CBC for better
# performance and security
# * Then Use HIGH cipher suites as a fallback
# * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, RC4, and
# 3DES for security reasons
_RESTRICTED_SERVER_CIPHERS = (
'TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:'
'TLS13-AES-128-GCM-SHA256:'
'ECDH+AESGCM:ECDH+CHACHA20:DH+AESGCM:DH+CHACHA20:ECDH+AES256:DH+AES256:'
'ECDH+AES128:DH+AES:ECDH+HIGH:DH+HIGH:RSA+AESGCM:RSA+AES:RSA+HIGH:'
'!aNULL:!eNULL:!MD5:!DSS:!RC4:!3DES'
)
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
leftmost, *remainder = dn.split(r'.')
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
ip = ipaddress.ip_address(ipname.rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
host_ip = ipaddress.ip_address(hostname)
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
DefaultVerifyPaths = namedtuple("DefaultVerifyPaths",
"cafile capath openssl_cafile_env openssl_cafile openssl_capath_env "
"openssl_capath")
def get_default_verify_paths():
"""Return paths to default cafile and capath.
"""
parts = _ssl.get_default_verify_paths()
# environment vars shadow paths
cafile = os.environ.get(parts[0], parts[1])
capath = os.environ.get(parts[2], parts[3])
return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None,
capath if os.path.isdir(capath) else None,
*parts)
class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")):
"""ASN.1 object identifier lookup
"""
__slots__ = ()
def __new__(cls, oid):
return super().__new__(cls, *_txt2obj(oid, name=False))
@classmethod
def fromnid(cls, nid):
"""Create _ASN1Object from OpenSSL numeric ID
"""
return super().__new__(cls, *_nid2obj(nid))
@classmethod
def fromname(cls, name):
"""Create _ASN1Object from short name, long name or OID
"""
return super().__new__(cls, *_txt2obj(name, name=True))
class Purpose(_ASN1Object, _Enum):
"""SSLContext purpose flags with X509v3 Extended Key Usage objects
"""
SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
class SSLContext(_SSLContext):
"""An SSLContext holds various SSL-related configuration options and
data, such as certificates and possibly a private key."""
__slots__ = ('protocol', '__weakref__')
_windows_cert_stores = ("CA", "ROOT")
def __new__(cls, protocol=PROTOCOL_TLS, *args, **kwargs):
self = _SSLContext.__new__(cls, protocol)
if protocol != _SSLv2_IF_EXISTS:
self.set_ciphers(_DEFAULT_CIPHERS)
return self
def __init__(self, protocol=PROTOCOL_TLS):
self.protocol = protocol
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None, session=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self, _session=session)
def wrap_bio(self, incoming, outgoing, server_side=False,
server_hostname=None, session=None):
sslobj = self._wrap_bio(incoming, outgoing, server_side=server_side,
server_hostname=server_hostname)
return SSLObject(sslobj, session=session)
def set_npn_protocols(self, npn_protocols):
protos = bytearray()
for protocol in npn_protocols:
b = bytes(protocol, 'ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('NPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_npn_protocols(protos)
def set_alpn_protocols(self, alpn_protocols):
protos = bytearray()
for protocol in alpn_protocols:
b = bytes(protocol, 'ascii')
if len(b) == 0 or len(b) > 255:
raise SSLError('ALPN protocols must be 1 to 255 in length')
protos.append(len(b))
protos.extend(b)
self._set_alpn_protocols(protos)
def _load_windows_store_certs(self, storename, purpose):
certs = bytearray()
try:
for cert, encoding, trust in enum_certificates(storename):
# CA certs are never PKCS#7 encoded
if encoding == "x509_asn":
if trust is True or purpose.oid in trust:
certs.extend(cert)
except PermissionError:
warnings.warn("unable to enumerate Windows certificate store")
if certs:
self.load_verify_locations(cadata=certs)
return certs
def load_default_certs(self, purpose=Purpose.SERVER_AUTH):
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
if sys.platform == "win32":
for storename in self._windows_cert_stores:
self._load_windows_store_certs(storename, purpose)
self.set_default_verify_paths()
@property
def options(self):
return Options(super().options)
@options.setter
def options(self, value):
super(SSLContext, SSLContext).options.__set__(self, value)
@property
def verify_flags(self):
return VerifyFlags(super().verify_flags)
@verify_flags.setter
def verify_flags(self, value):
super(SSLContext, SSLContext).verify_flags.__set__(self, value)
@property
def verify_mode(self):
value = super().verify_mode
try:
return VerifyMode(value)
except ValueError:
return value
@verify_mode.setter
def verify_mode(self, value):
super(SSLContext, SSLContext).verify_mode.__set__(self, value)
def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
# SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
# OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
# by default.
context = SSLContext(PROTOCOL_TLS)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_unverified_context(protocol=PROTOCOL_TLS, *, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
# SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
# OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
# by default.
context = SSLContext(protocol)
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
# Used by http.client if no context is explicitly passed.
_create_default_https_context = create_default_context
# Backwards compatibility alias, even though it's not a public name.
_create_stdlib_context = _create_unverified_context
class SSLObject:
"""This class implements an interface on top of a low-level SSL object as
implemented by OpenSSL. This object captures the state of an SSL connection
but does not provide any network IO itself. IO needs to be performed
through separate "BIO" objects which are OpenSSL's IO abstraction layer.
This class does not have a public constructor. Instances are returned by
``SSLContext.wrap_bio``. This class is typically used by framework authors
that want to implement asynchronous IO for SSL through memory buffers.
When compared to ``SSLSocket``, this object lacks the following features:
* Any form of network IO, including methods such as ``recv`` and ``send``.
* The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery.
"""
def __init__(self, sslobj, owner=None, session=None):
self._sslobj = sslobj
# Note: _sslobj takes a weak reference to owner
self._sslobj.owner = owner or self
if session is not None:
self._sslobj.session = session
@property
def context(self):
"""The SSLContext that is currently in use."""
return self._sslobj.context
@context.setter
def context(self, ctx):
self._sslobj.context = ctx
@property
def session(self):
"""The SSLSession for client socket."""
return self._sslobj.session
@session.setter
def session(self, session):
self._sslobj.session = session
@property
def session_reused(self):
"""Was the client session reused during handshake"""
return self._sslobj.session_reused
@property
def server_side(self):
"""Whether this is a server-side socket."""
return self._sslobj.server_side
@property
def server_hostname(self):
"""The currently set server hostname (for SNI), or ``None`` if no
server hostame is set."""
return self._sslobj.server_hostname
def read(self, len=1024, buffer=None):
"""Read up to 'len' bytes from the SSL object and return them.
If 'buffer' is provided, read into this buffer and return the number of
bytes read.
"""
if buffer is not None:
v = self._sslobj.read(len, buffer)
else:
v = self._sslobj.read(len)
return v
def write(self, data):
"""Write 'data' to the SSL object and return the number of bytes
written.
The 'data' argument must support the buffer interface.
"""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the certificate provided
by the other end of the SSL channel.
Return None if no certificate was provided, {} if a certificate was
provided, but not validated.
"""
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
"""Return the currently selected NPN protocol as a string, or ``None``
if a next protocol was not negotiated or if NPN is not supported by one
of the peers."""
if _ssl.HAS_NPN:
return self._sslobj.selected_npn_protocol()
def selected_alpn_protocol(self):
"""Return the currently selected ALPN protocol as a string, or ``None``
if a next protocol was not negotiated or if ALPN is not supported by one
of the peers."""
if _ssl.HAS_ALPN:
return self._sslobj.selected_alpn_protocol()
def cipher(self):
"""Return the currently selected cipher as a 3-tuple ``(name,
ssl_version, secret_bits)``."""
return self._sslobj.cipher()
def shared_ciphers(self):
"""Return a list of ciphers shared by the client during the handshake or
None if this is not a valid server connection.
"""
return self._sslobj.shared_ciphers()
def compression(self):
"""Return the current compression algorithm in use, or ``None`` if
compression was not negotiated or not supported by one of the peers."""
return self._sslobj.compression()
def pending(self):
"""Return the number of bytes that can be read immediately."""
return self._sslobj.pending()
def do_handshake(self):
"""Start the SSL/TLS handshake."""
self._sslobj.do_handshake()
if self.context.check_hostname:
if not self.server_hostname:
raise ValueError("check_hostname needs server_hostname "
"argument")
match_hostname(self.getpeercert(), self.server_hostname)
def unwrap(self):
"""Start the SSL shutdown handshake."""
return self._sslobj.shutdown()
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake)."""
if cb_type not in CHANNEL_BINDING_TYPES:
raise ValueError("Unsupported channel binding type")
if cb_type != "tls-unique":
raise NotImplementedError(
"{0} channel binding type not implemented"
.format(cb_type))
return self._sslobj.tls_unique_cb()
def version(self):
"""Return a string identifying the protocol version used by the
current SSL channel. """
return self._sslobj.version()
def verify_client_post_handshake(self):
return self._sslobj.verify_client_post_handshake()
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_TLS, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None, _session=None):
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
if server_side:
if server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if _session is not None:
raise ValueError("session can only be specified in "
"client mode")
if self._context.check_hostname and not server_hostname:
raise ValueError("check_hostname requires server_hostname")
self._session = _session
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
if sock is not None:
socket.__init__(self,
family=sock.family,
type=sock.type,
proto=sock.proto,
fileno=sock.fileno())
self.settimeout(sock.gettimeout())
sock.detach()
elif fileno is not None:
socket.__init__(self, fileno=fileno)
else:
socket.__init__(self, family=family, type=type, proto=proto)
# See if we are connected
try:
self.getpeername()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
sslobj = self._context._wrap_socket(self, server_side,
server_hostname)
self._sslobj = SSLObject(sslobj, owner=self,
session=self._session)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except (OSError, ValueError):
self.close()
raise
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
@property
def session(self):
"""The SSLSession for client socket."""
if self._sslobj is not None:
return self._sslobj.session
@session.setter
def session(self, session):
self._session = session
if self._sslobj is not None:
self._sslobj.session = session
@property
def session_reused(self):
"""Was the client session reused during handshake"""
if self._sslobj is not None:
return self._sslobj.session_reused
def dup(self):
raise NotImplementedError("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=1024, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Read on closed or unwrapped SSL socket.")
try:
return self._sslobj.read(len, buffer)
except SSLError as x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
self._checkClosed()
if not self._sslobj:
raise ValueError("Write on closed or unwrapped SSL socket.")
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.getpeercert(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def selected_alpn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_ALPN:
return None
else:
return self._sslobj.selected_alpn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def shared_ciphers(self):
self._checkClosed()
if not self._sslobj:
return None
return self._sslobj.shared_ciphers()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
return self._sslobj.write(data)
else:
return socket.send(self, data, flags)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return socket.sendto(self, data, flags_or_addr)
else:
return socket.sendto(self, data, flags_or_addr, addr)
def sendmsg(self, *args, **kwargs):
# Ensure programs don't send data unencrypted if they try to
# use this method.
raise NotImplementedError("sendmsg not allowed on instances of %s" %
self.__class__)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
else:
return socket.sendall(self, data, flags)
def sendfile(self, file, offset=0, count=None):
"""Send a file, possibly by using os.sendfile() if this is a
clear-text socket. Return the total number of bytes sent.
"""
if self._sslobj is None:
# os.sendfile() works with plain sockets only
return super().sendfile(file, offset, count)
else:
return self._sendfile_use_send(file, offset, count)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
return self.read(nbytes, buffer)
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
self._checkClosed()
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def recvmsg(self, *args, **kwargs):
raise NotImplementedError("recvmsg not allowed on instances of %s" %
self.__class__)
def recvmsg_into(self, *args, **kwargs):
raise NotImplementedError("recvmsg_into not allowed on instances of "
"%s" % self.__class__)
def pending(self):
self._checkClosed()
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def shutdown(self, how):
self._checkClosed()
self._sslobj = None
socket.shutdown(self, how)
def unwrap(self):
if self._sslobj:
s = self._sslobj.unwrap()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def verify_client_post_handshake(self):
if self._sslobj:
return self._sslobj.verify_client_post_handshake()
else:
raise ValueError("No SSL wrapper around " + str(self))
def _real_close(self):
self._sslobj = None
socket._real_close(self)
def do_handshake(self, block=False):
"""Perform a TLS/SSL handshake."""
self._check_connected()
timeout = self.gettimeout()
try:
if timeout == 0.0 and block:
self.settimeout(None)
self._sslobj.do_handshake()
finally:
self.settimeout(timeout)
def _real_connect(self, addr, connect_ex):
if self.server_side:
raise ValueError("can't connect in server-side mode")
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
sslobj = self.context._wrap_socket(self, False, self.server_hostname)
self._sslobj = SSLObject(sslobj, owner=self,
session=self._session)
try:
if connect_ex:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
self._connected = True
if self.do_handshake_on_connect:
self.do_handshake()
return rc
except (OSError, ValueError):
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
newsock = self.context.wrap_socket(newsock,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
server_side=True)
return newsock, addr
def get_channel_binding(self, cb_type="tls-unique"):
"""Get channel binding data for current connection. Raise ValueError
if the requested `cb_type` is not supported. Return bytes of the data
or None if the data is not available (e.g. before the handshake).
"""
if self._sslobj is None:
return None
return self._sslobj.get_channel_binding(cb_type)
def version(self):
"""
Return a string identifying the protocol version used by the
current SSL channel, or None if there is no established channel.
"""
if self._sslobj is None:
return None
return self._sslobj.version()
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_TLS, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None):
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Return the time in seconds since the Epoch, given the timestring
representing the "notBefore" or "notAfter" date from a certificate
in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale).
"notBefore" or "notAfter" dates must use UTC (RFC 5280).
Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
UTC should be specified as GMT (see ASN1_TIME_print())
"""
from time import strptime
from calendar import timegm
months = (
"Jan","Feb","Mar","Apr","May","Jun",
"Jul","Aug","Sep","Oct","Nov","Dec"
)
time_format = ' %d %H:%M:%S %Y GMT' # NOTE: no month, fixed GMT
try:
month_number = months.index(cert_time[:3].title()) + 1
except ValueError:
raise ValueError('time data %r does not match '
'format "%%b%s"' % (cert_time, time_format))
else:
# found valid month
tt = strptime(cert_time[3:], time_format)
# return an integer, the previous mktime()-based implementation
# returned a float (fractional seconds are always zero here).
return timegm((tt[0], month_number) + tt[2:6])
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict')
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodebytes(d.encode('ASCII', 'strict'))
def get_server_certificate(addr, ssl_version=PROTOCOL_TLS, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
context = _create_stdlib_context(ssl_version,
cert_reqs=cert_reqs,
cafile=ca_certs)
with create_connection(addr) as sock:
with context.wrap_socket(sock) as sslsock:
dercert = sslsock.getpeercert(True)
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
| 44,793 | 1,238 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/tracemalloc.py | from collections.abc import Sequence, Iterable
from functools import total_ordering
import fnmatch
import linecache
import os.path
import pickle
# Import types and functions implemented in C
from _tracemalloc import *
from _tracemalloc import _get_object_traceback, _get_traces
def _format_size(size, sign):
for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB'):
if abs(size) < 100 and unit != 'B':
# 3 digits (xx.x UNIT)
if sign:
return "%+.1f %s" % (size, unit)
else:
return "%.1f %s" % (size, unit)
if abs(size) < 10 * 1024 or unit == 'TiB':
# 4 or 5 digits (xxxx UNIT)
if sign:
return "%+.0f %s" % (size, unit)
else:
return "%.0f %s" % (size, unit)
size /= 1024
class Statistic:
"""
Statistic difference on memory allocations between two Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'count')
def __init__(self, traceback, size, count):
self.traceback = traceback
self.size = size
self.count = count
def __hash__(self):
return hash((self.traceback, self.size, self.count))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.count == other.count)
def __str__(self):
text = ("%s: size=%s, count=%i"
% (self.traceback,
_format_size(self.size, False),
self.count))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<Statistic traceback=%r size=%i count=%i>'
% (self.traceback, self.size, self.count))
def _sort_key(self):
return (self.size, self.count, self.traceback)
class StatisticDiff:
"""
Statistic difference on memory allocations between an old and a new
Snapshot instance.
"""
__slots__ = ('traceback', 'size', 'size_diff', 'count', 'count_diff')
def __init__(self, traceback, size, size_diff, count, count_diff):
self.traceback = traceback
self.size = size
self.size_diff = size_diff
self.count = count
self.count_diff = count_diff
def __hash__(self):
return hash((self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def __eq__(self, other):
return (self.traceback == other.traceback
and self.size == other.size
and self.size_diff == other.size_diff
and self.count == other.count
and self.count_diff == other.count_diff)
def __str__(self):
text = ("%s: size=%s (%s), count=%i (%+i)"
% (self.traceback,
_format_size(self.size, False),
_format_size(self.size_diff, True),
self.count,
self.count_diff))
if self.count:
average = self.size / self.count
text += ", average=%s" % _format_size(average, False)
return text
def __repr__(self):
return ('<StatisticDiff traceback=%r size=%i (%+i) count=%i (%+i)>'
% (self.traceback, self.size, self.size_diff,
self.count, self.count_diff))
def _sort_key(self):
return (abs(self.size_diff), self.size,
abs(self.count_diff), self.count,
self.traceback)
def _compare_grouped_stats(old_group, new_group):
statistics = []
for traceback, stat in new_group.items():
previous = old_group.pop(traceback, None)
if previous is not None:
stat = StatisticDiff(traceback,
stat.size, stat.size - previous.size,
stat.count, stat.count - previous.count)
else:
stat = StatisticDiff(traceback,
stat.size, stat.size,
stat.count, stat.count)
statistics.append(stat)
for traceback, stat in old_group.items():
stat = StatisticDiff(traceback, 0, -stat.size, 0, -stat.count)
statistics.append(stat)
return statistics
@total_ordering
class Frame:
"""
Frame of a traceback.
"""
__slots__ = ("_frame",)
def __init__(self, frame):
# frame is a tuple: (filename: str, lineno: int)
self._frame = frame
@property
def filename(self):
return self._frame[0]
@property
def lineno(self):
return self._frame[1]
def __eq__(self, other):
return (self._frame == other._frame)
def __lt__(self, other):
return (self._frame < other._frame)
def __hash__(self):
return hash(self._frame)
def __str__(self):
return "%s:%s" % (self.filename, self.lineno)
def __repr__(self):
return "<Frame filename=%r lineno=%r>" % (self.filename, self.lineno)
@total_ordering
class Traceback(Sequence):
"""
Sequence of Frame instances sorted from the most recent frame
to the oldest frame.
"""
__slots__ = ("_frames",)
def __init__(self, frames):
Sequence.__init__(self)
# frames is a tuple of frame tuples: see Frame constructor for the
# format of a frame tuple
self._frames = frames
def __len__(self):
return len(self._frames)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Frame(trace) for trace in self._frames[index])
else:
return Frame(self._frames[index])
def __contains__(self, frame):
return frame._frame in self._frames
def __hash__(self):
return hash(self._frames)
def __eq__(self, other):
return (self._frames == other._frames)
def __lt__(self, other):
return (self._frames < other._frames)
def __str__(self):
return str(self[0])
def __repr__(self):
return "<Traceback %r>" % (tuple(self),)
def format(self, limit=None):
lines = []
if limit is not None and limit < 0:
return lines
for frame in self[:limit]:
lines.append(' File "%s", line %s'
% (frame.filename, frame.lineno))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
lines.append(' %s' % line)
return lines
def get_object_traceback(obj):
"""
Get the traceback where the Python object *obj* was allocated.
Return a Traceback instance.
Return None if the tracemalloc module is not tracing memory allocations or
did not trace the allocation of the object.
"""
frames = _get_object_traceback(obj)
if frames is not None:
return Traceback(frames)
else:
return None
class Trace:
"""
Trace of a memory block.
"""
__slots__ = ("_trace",)
def __init__(self, trace):
# trace is a tuple: (domain: int, size: int, traceback: tuple).
# See Traceback constructor for the format of the traceback tuple.
self._trace = trace
@property
def domain(self):
return self._trace[0]
@property
def size(self):
return self._trace[1]
@property
def traceback(self):
return Traceback(self._trace[2])
def __eq__(self, other):
return (self._trace == other._trace)
def __hash__(self):
return hash(self._trace)
def __str__(self):
return "%s: %s" % (self.traceback, _format_size(self.size, False))
def __repr__(self):
return ("<Trace domain=%s size=%s, traceback=%r>"
% (self.domain, _format_size(self.size, False), self.traceback))
class _Traces(Sequence):
def __init__(self, traces):
Sequence.__init__(self)
# traces is a tuple of trace tuples: see Trace constructor
self._traces = traces
def __len__(self):
return len(self._traces)
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(Trace(trace) for trace in self._traces[index])
else:
return Trace(self._traces[index])
def __contains__(self, trace):
return trace._trace in self._traces
def __eq__(self, other):
return (self._traces == other._traces)
def __repr__(self):
return "<Traces len=%s>" % len(self)
def _normalize_filename(filename):
filename = os.path.normcase(filename)
if filename.endswith('.pyc'):
filename = filename[:-1]
return filename
class BaseFilter:
def __init__(self, inclusive):
self.inclusive = inclusive
def _match(self, trace):
raise NotImplementedError
class Filter(BaseFilter):
def __init__(self, inclusive, filename_pattern,
lineno=None, all_frames=False, domain=None):
super().__init__(inclusive)
self.inclusive = inclusive
self._filename_pattern = _normalize_filename(filename_pattern)
self.lineno = lineno
self.all_frames = all_frames
self.domain = domain
@property
def filename_pattern(self):
return self._filename_pattern
def _match_frame_impl(self, filename, lineno):
filename = _normalize_filename(filename)
if not fnmatch.fnmatch(filename, self._filename_pattern):
return False
if self.lineno is None:
return True
else:
return (lineno == self.lineno)
def _match_frame(self, filename, lineno):
return self._match_frame_impl(filename, lineno) ^ (not self.inclusive)
def _match_traceback(self, traceback):
if self.all_frames:
if any(self._match_frame_impl(filename, lineno)
for filename, lineno in traceback):
return self.inclusive
else:
return (not self.inclusive)
else:
filename, lineno = traceback[0]
return self._match_frame(filename, lineno)
def _match(self, trace):
domain, size, traceback = trace
res = self._match_traceback(traceback)
if self.domain is not None:
if self.inclusive:
return res and (domain == self.domain)
else:
return res or (domain != self.domain)
return res
class DomainFilter(BaseFilter):
def __init__(self, inclusive, domain):
super().__init__(inclusive)
self._domain = domain
@property
def domain(self):
return self._domain
def _match(self, trace):
domain, size, traceback = trace
return (domain == self.domain) ^ (not self.inclusive)
class Snapshot:
"""
Snapshot of traces of memory blocks allocated by Python.
"""
def __init__(self, traces, traceback_limit):
# traces is a tuple of trace tuples: see _Traces constructor for
# the exact format
self.traces = _Traces(traces)
self.traceback_limit = traceback_limit
def dump(self, filename):
"""
Write the snapshot into a file.
"""
with open(filename, "wb") as fp:
pickle.dump(self, fp, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename):
"""
Load a snapshot from a file.
"""
with open(filename, "rb") as fp:
return pickle.load(fp)
def _filter_trace(self, include_filters, exclude_filters, trace):
if include_filters:
if not any(trace_filter._match(trace)
for trace_filter in include_filters):
return False
if exclude_filters:
if any(not trace_filter._match(trace)
for trace_filter in exclude_filters):
return False
return True
def filter_traces(self, filters):
"""
Create a new Snapshot instance with a filtered traces sequence, filters
is a list of Filter or DomainFilter instances. If filters is an empty
list, return a new Snapshot instance with a copy of the traces.
"""
if not isinstance(filters, Iterable):
raise TypeError("filters must be a list of filters, not %s"
% type(filters).__name__)
if filters:
include_filters = []
exclude_filters = []
for trace_filter in filters:
if trace_filter.inclusive:
include_filters.append(trace_filter)
else:
exclude_filters.append(trace_filter)
new_traces = [trace for trace in self.traces._traces
if self._filter_trace(include_filters,
exclude_filters,
trace)]
else:
new_traces = self.traces._traces.copy()
return Snapshot(new_traces, self.traceback_limit)
def _group_by(self, key_type, cumulative):
if key_type not in ('traceback', 'filename', 'lineno'):
raise ValueError("unknown key_type: %r" % (key_type,))
if cumulative and key_type not in ('lineno', 'filename'):
raise ValueError("cumulative mode cannot by used "
"with key type %r" % key_type)
stats = {}
tracebacks = {}
if not cumulative:
for trace in self.traces._traces:
domain, size, trace_traceback = trace
try:
traceback = tracebacks[trace_traceback]
except KeyError:
if key_type == 'traceback':
frames = trace_traceback
elif key_type == 'lineno':
frames = trace_traceback[:1]
else: # key_type == 'filename':
frames = ((trace_traceback[0][0], 0),)
traceback = Traceback(frames)
tracebacks[trace_traceback] = traceback
try:
stat = stats[traceback]
stat.size += size
stat.count += 1
except KeyError:
stats[traceback] = Statistic(traceback, size, 1)
else:
# cumulative statistics
for trace in self.traces._traces:
domain, size, trace_traceback = trace
for frame in trace_traceback:
try:
traceback = tracebacks[frame]
except KeyError:
if key_type == 'lineno':
frames = (frame,)
else: # key_type == 'filename':
frames = ((frame[0], 0),)
traceback = Traceback(frames)
tracebacks[frame] = traceback
try:
stat = stats[traceback]
stat.size += size
stat.count += 1
except KeyError:
stats[traceback] = Statistic(traceback, size, 1)
return stats
def statistics(self, key_type, cumulative=False):
"""
Group statistics by key_type. Return a sorted list of Statistic
instances.
"""
grouped = self._group_by(key_type, cumulative)
statistics = list(grouped.values())
statistics.sort(reverse=True, key=Statistic._sort_key)
return statistics
def compare_to(self, old_snapshot, key_type, cumulative=False):
"""
Compute the differences with an old snapshot old_snapshot. Get
statistics as a sorted list of StatisticDiff instances, grouped by
group_by.
"""
new_group = self._group_by(key_type, cumulative)
old_group = old_snapshot._group_by(key_type, cumulative)
statistics = _compare_grouped_stats(old_group, new_group)
statistics.sort(reverse=True, key=StatisticDiff._sort_key)
return statistics
def take_snapshot():
"""
Take a snapshot of traces of memory blocks allocated by Python.
"""
if not is_tracing():
raise RuntimeError("the tracemalloc module must be tracing memory "
"allocations to take a snapshot")
traces = _get_traces()
traceback_limit = get_traceback_limit()
return Snapshot(traces, traceback_limit)
| 16,662 | 525 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/cProfile.py | #! /usr/bin/env python3
"""Python interface for the 'lsprof' profiler.
Compatible with the 'profile' module.
"""
__all__ = ["run", "runctx", "Profile"]
import _lsprof
import profile as _pyprofile
# ____________________________________________________________
# Simple interface
def run(statement, filename=None, sort=-1):
return _pyprofile._Utils(Profile).run(statement, filename, sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
return _pyprofile._Utils(Profile).runctx(statement, globals, locals,
filename, sort)
run.__doc__ = _pyprofile.run.__doc__
runctx.__doc__ = _pyprofile.runctx.__doc__
# ____________________________________________________________
class Profile(_lsprof.Profiler):
"""Profile(timer=None, timeunit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
For custom timer functions returning integers, timeunit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
# Most of the functionality is in the base class.
# This subclass only adds convenient and backward-compatible methods.
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
def dump_stats(self, file):
import marshal
with open(file, 'wb') as f:
self.create_stats()
marshal.dump(self.stats, f)
def create_stats(self):
self.disable()
self.snapshot_stats()
def snapshot_stats(self):
entries = self.getstats()
self.stats = {}
callersdicts = {}
# call information
for entry in entries:
func = label(entry.code)
nc = entry.callcount # ncalls column of pstats (before '/')
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
tt = entry.inlinetime # tottime column of pstats
ct = entry.totaltime # cumtime column of pstats
callers = {}
callersdicts[id(entry.code)] = callers
self.stats[func] = cc, nc, tt, ct, callers
# subcall information
for entry in entries:
if entry.calls:
func = label(entry.code)
for subentry in entry.calls:
try:
callers = callersdicts[id(subentry.code)]
except KeyError:
continue
nc = subentry.callcount
cc = nc - subentry.reccallcount
tt = subentry.inlinetime
ct = subentry.totaltime
if func in callers:
prev = callers[func]
nc += prev[0]
cc += prev[1]
tt += prev[2]
ct += prev[3]
callers[func] = nc, cc, tt, ct
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.enable()
try:
exec(cmd, globals, locals)
finally:
self.disable()
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.enable()
try:
return func(*args, **kw)
finally:
self.disable()
# ____________________________________________________________
def label(code):
if isinstance(code, str):
return ('~', 0, code) # built-in functions ('~' sorts at the end)
else:
return (code.co_filename, code.co_firstlineno, code.co_name)
# ____________________________________________________________
def main():
import os, sys, pstats
from optparse import OptionParser
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1,
choices=sorted(pstats.Stats.sort_arg_dict_default))
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| 5,372 | 162 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/token.py | """Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
ATEQUAL = 50
RARROW = 51
ELLIPSIS = 52
OP = 53
AWAIT = 54
ASYNC = 55
ERRORTOKEN = 56
N_TOKENS = 57
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except OSError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
with fp:
lines = fp.read().split("\n")
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except OSError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
with fp:
format = fp.read().split("\n")
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except OSError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
with fp:
fp.write("\n".join(format))
if __name__ == "__main__":
_main()
| 3,075 | 144 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/opcode.py |
"""
opcode module - potentially shared between dis and other modules which
operate on bytecodes (e.g. peephole optimizers).
"""
__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
"haslocal", "hascompare", "hasfree", "opname", "opmap",
"HAVE_ARGUMENT", "EXTENDED_ARG", "hasnargs", 'stack_effect']
# It's a chicken-and-egg I'm afraid:
# We're imported before _opcode's made.
# With exception unheeded
# (stack_effect is not needed)
# Both our chickens and eggs are allayed.
# --Larry Hastings, 2013/11/23
from _opcode import stack_effect
cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
'is not', 'exception match', 'BAD')
hasconst = []
hasname = []
hasjrel = []
hasjabs = []
haslocal = []
hascompare = []
hasfree = []
hasnargs = [] # unused
opmap = {}
opname = ['<%r>' % (op,) for op in range(256)]
def def_op(name, op):
opname[op] = name
opmap[name] = op
def name_op(name, op):
def_op(name, op)
hasname.append(op)
def jrel_op(name, op):
def_op(name, op)
hasjrel.append(op)
def jabs_op(name, op):
def_op(name, op)
hasjabs.append(op)
# Instruction opcodes for compiled code
# Blank lines correspond to available opcodes
def_op('POP_TOP', 1)
def_op('ROT_TWO', 2)
def_op('ROT_THREE', 3)
def_op('DUP_TOP', 4)
def_op('DUP_TOP_TWO', 5)
def_op('NOP', 9)
def_op('UNARY_POSITIVE', 10)
def_op('UNARY_NEGATIVE', 11)
def_op('UNARY_NOT', 12)
def_op('UNARY_INVERT', 15)
def_op('BINARY_MATRIX_MULTIPLY', 16)
def_op('INPLACE_MATRIX_MULTIPLY', 17)
def_op('BINARY_POWER', 19)
def_op('BINARY_MULTIPLY', 20)
def_op('BINARY_MODULO', 22)
def_op('BINARY_ADD', 23)
def_op('BINARY_SUBTRACT', 24)
def_op('BINARY_SUBSCR', 25)
def_op('BINARY_FLOOR_DIVIDE', 26)
def_op('BINARY_TRUE_DIVIDE', 27)
def_op('INPLACE_FLOOR_DIVIDE', 28)
def_op('INPLACE_TRUE_DIVIDE', 29)
def_op('GET_AITER', 50)
def_op('GET_ANEXT', 51)
def_op('BEFORE_ASYNC_WITH', 52)
def_op('INPLACE_ADD', 55)
def_op('INPLACE_SUBTRACT', 56)
def_op('INPLACE_MULTIPLY', 57)
def_op('INPLACE_MODULO', 59)
def_op('STORE_SUBSCR', 60)
def_op('DELETE_SUBSCR', 61)
def_op('BINARY_LSHIFT', 62)
def_op('BINARY_RSHIFT', 63)
def_op('BINARY_AND', 64)
def_op('BINARY_XOR', 65)
def_op('BINARY_OR', 66)
def_op('INPLACE_POWER', 67)
def_op('GET_ITER', 68)
def_op('GET_YIELD_FROM_ITER', 69)
def_op('PRINT_EXPR', 70)
def_op('LOAD_BUILD_CLASS', 71)
def_op('YIELD_FROM', 72)
def_op('GET_AWAITABLE', 73)
def_op('INPLACE_LSHIFT', 75)
def_op('INPLACE_RSHIFT', 76)
def_op('INPLACE_AND', 77)
def_op('INPLACE_XOR', 78)
def_op('INPLACE_OR', 79)
def_op('BREAK_LOOP', 80)
def_op('WITH_CLEANUP_START', 81)
def_op('WITH_CLEANUP_FINISH', 82)
def_op('RETURN_VALUE', 83)
def_op('IMPORT_STAR', 84)
def_op('SETUP_ANNOTATIONS', 85)
def_op('YIELD_VALUE', 86)
def_op('POP_BLOCK', 87)
def_op('END_FINALLY', 88)
def_op('POP_EXCEPT', 89)
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
name_op('STORE_NAME', 90) # Index in name list
name_op('DELETE_NAME', 91) # ""
def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
jrel_op('FOR_ITER', 93)
def_op('UNPACK_EX', 94)
name_op('STORE_ATTR', 95) # Index in name list
name_op('DELETE_ATTR', 96) # ""
name_op('STORE_GLOBAL', 97) # ""
name_op('DELETE_GLOBAL', 98) # ""
def_op('LOAD_CONST', 100) # Index in const list
hasconst.append(100)
name_op('LOAD_NAME', 101) # Index in name list
def_op('BUILD_TUPLE', 102) # Number of tuple items
def_op('BUILD_LIST', 103) # Number of list items
def_op('BUILD_SET', 104) # Number of set items
def_op('BUILD_MAP', 105) # Number of dict entries
name_op('LOAD_ATTR', 106) # Index in name list
def_op('COMPARE_OP', 107) # Comparison operator
hascompare.append(107)
name_op('IMPORT_NAME', 108) # Index in name list
name_op('IMPORT_FROM', 109) # Index in name list
jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
jabs_op('JUMP_IF_FALSE_OR_POP', 111) # Target byte offset from beginning of code
jabs_op('JUMP_IF_TRUE_OR_POP', 112) # ""
jabs_op('JUMP_ABSOLUTE', 113) # ""
jabs_op('POP_JUMP_IF_FALSE', 114) # ""
jabs_op('POP_JUMP_IF_TRUE', 115) # ""
name_op('LOAD_GLOBAL', 116) # Index in name list
jabs_op('CONTINUE_LOOP', 119) # Target address
jrel_op('SETUP_LOOP', 120) # Distance to target address
jrel_op('SETUP_EXCEPT', 121) # ""
jrel_op('SETUP_FINALLY', 122) # ""
def_op('LOAD_FAST', 124) # Local variable number
haslocal.append(124)
def_op('STORE_FAST', 125) # Local variable number
haslocal.append(125)
def_op('DELETE_FAST', 126) # Local variable number
haslocal.append(126)
name_op('STORE_ANNOTATION', 127) # Index in name list
def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
def_op('CALL_FUNCTION', 131) # #args
def_op('MAKE_FUNCTION', 132) # Flags
def_op('BUILD_SLICE', 133) # Number of items
def_op('LOAD_CLOSURE', 135)
hasfree.append(135)
def_op('LOAD_DEREF', 136)
hasfree.append(136)
def_op('STORE_DEREF', 137)
hasfree.append(137)
def_op('DELETE_DEREF', 138)
hasfree.append(138)
def_op('CALL_FUNCTION_KW', 141) # #args + #kwargs
def_op('CALL_FUNCTION_EX', 142) # Flags
jrel_op('SETUP_WITH', 143)
def_op('LIST_APPEND', 145)
def_op('SET_ADD', 146)
def_op('MAP_ADD', 147)
def_op('LOAD_CLASSDEREF', 148)
hasfree.append(148)
def_op('EXTENDED_ARG', 144)
EXTENDED_ARG = 144
def_op('BUILD_LIST_UNPACK', 149)
def_op('BUILD_MAP_UNPACK', 150)
def_op('BUILD_MAP_UNPACK_WITH_CALL', 151)
def_op('BUILD_TUPLE_UNPACK', 152)
def_op('BUILD_SET_UNPACK', 153)
jrel_op('SETUP_ASYNC_WITH', 154)
def_op('FORMAT_VALUE', 155)
def_op('BUILD_CONST_KEY_MAP', 156)
def_op('BUILD_STRING', 157)
def_op('BUILD_TUPLE_UNPACK_WITH_CALL', 158)
name_op('LOAD_METHOD', 160)
def_op('CALL_METHOD', 161)
del def_op, name_op, jrel_op, jabs_op
| 5,823 | 215 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pprint.py | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import collections as _collections
import re
import sys as _sys
import types as _types
from io import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
def pprint(object, stream=None, indent=1, width=80, depth=None, *,
compact=False):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth,
compact=compact)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None, *, compact=False):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth,
compact=compact).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to a Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
return self.obj < other.obj
except TypeError:
return ((str(type(self.obj)), id(self.obj)) < \
(str(type(other.obj)), id(other.obj)))
def _safe_tuple(t):
"Helper function for comparing 2-tuples"
return _safe_key(t[0]), _safe_key(t[1])
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
"""
indent = int(indent)
width = int(width)
if indent < 0:
raise ValueError('indent must be >= 0')
if depth is not None and depth <= 0:
raise ValueError('depth must be > 0')
if not width:
raise ValueError('width must be != 0')
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level)
max_width = self._width - indent - allowance
if len(rep) > max_width:
p = self._dispatch.get(type(object).__repr__, None)
if p is not None:
context[objid] = 1
p(self, object, stream, indent, allowance, context, level + 1)
del context[objid]
return
elif isinstance(object, dict):
context[objid] = 1
self._pprint_dict(object, stream, indent, allowance,
context, level + 1)
del context[objid]
return
stream.write(rep)
_dispatch = {}
def _pprint_dict(self, object, stream, indent, allowance, context, level):
write = stream.write
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
items = sorted(object.items(), key=_safe_tuple)
self._format_dict_items(items, stream, indent, allowance + 1,
context, level)
write('}')
_dispatch[dict.__repr__] = _pprint_dict
def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
self._format(list(object.items()), stream,
indent + len(cls.__name__) + 1, allowance + 1,
context, level)
stream.write(')')
_dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
def _pprint_list(self, object, stream, indent, allowance, context, level):
stream.write('[')
self._format_items(object, stream, indent, allowance + 1,
context, level)
stream.write(']')
_dispatch[list.__repr__] = _pprint_list
def _pprint_tuple(self, object, stream, indent, allowance, context, level):
stream.write('(')
endchar = ',)' if len(object) == 1 else ')'
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[tuple.__repr__] = _pprint_tuple
def _pprint_set(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
typ = object.__class__
if typ is set:
stream.write('{')
endchar = '}'
else:
stream.write(typ.__name__ + '({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
self._format_items(object, stream, indent, allowance + len(endchar),
context, level)
stream.write(endchar)
_dispatch[set.__repr__] = _pprint_set
_dispatch[frozenset.__repr__] = _pprint_set
def _pprint_str(self, object, stream, indent, allowance, context, level):
write = stream.write
if not len(object):
write(repr(object))
return
chunks = []
lines = object.splitlines(True)
if level == 1:
indent += 1
allowance += 1
max_width1 = max_width = self._width - indent
for i, line in enumerate(lines):
rep = repr(line)
if i == len(lines) - 1:
max_width1 -= allowance
if len(rep) <= max_width1:
chunks.append(rep)
else:
# A list of alternating (non-space, space) strings
parts = re.findall(r'\S*\s*', line)
assert parts
assert not parts[-1]
parts.pop() # drop empty last part
max_width2 = max_width
current = ''
for j, part in enumerate(parts):
candidate = current + part
if j == len(parts) - 1 and i == len(lines) - 1:
max_width2 -= allowance
if len(repr(candidate)) > max_width2:
if current:
chunks.append(repr(current))
current = part
else:
current = candidate
if current:
chunks.append(repr(current))
if len(chunks) == 1:
write(rep)
return
if level == 1:
write('(')
for i, rep in enumerate(chunks):
if i > 0:
write('\n' + ' '*indent)
write(rep)
if level == 1:
write(')')
_dispatch[str.__repr__] = _pprint_str
def _pprint_bytes(self, object, stream, indent, allowance, context, level):
write = stream.write
if len(object) <= 4:
write(repr(object))
return
parens = level == 1
if parens:
indent += 1
allowance += 1
write('(')
delim = ''
for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
write(delim)
write(rep)
if not delim:
delim = '\n' + ' '*indent
if parens:
write(')')
_dispatch[bytes.__repr__] = _pprint_bytes
def _pprint_bytearray(self, object, stream, indent, allowance, context, level):
write = stream.write
write('bytearray(')
self._pprint_bytes(bytes(object), stream, indent + 10,
allowance + 1, context, level + 1)
write(')')
_dispatch[bytearray.__repr__] = _pprint_bytearray
def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level):
stream.write('mappingproxy(')
self._format(object.copy(), stream, indent + 13, allowance + 1,
context, level)
stream.write(')')
_dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
def _format_dict_items(self, items, stream, indent, allowance, context,
level):
write = stream.write
indent += self._indent_per_level
delimnl = ',\n' + ' ' * indent
last_index = len(items) - 1
for i, (key, ent) in enumerate(items):
last = i == last_index
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent + 1
it = iter(items)
try:
next_ent = next(it)
except StopIteration:
return
last = False
while not last:
ent = next_ent
try:
next_ent = next(it)
except StopIteration:
last = True
max_width -= allowance
width -= allowance
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent,
allowance if last else 1,
context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
rdf = self._repr(object.default_factory, context, level)
cls = object.__class__
indent += len(cls.__name__) + 1
stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent))
self._pprint_dict(object, stream, indent, allowance + 1, context, level)
stream.write(')')
_dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
def _pprint_counter(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '({')
if self._indent_per_level > 1:
stream.write((self._indent_per_level - 1) * ' ')
items = object.most_common()
self._format_dict_items(items, stream,
indent + len(cls.__name__) + 1, allowance + 2,
context, level)
stream.write('})')
_dispatch[_collections.Counter.__repr__] = _pprint_counter
def _pprint_chain_map(self, object, stream, indent, allowance, context, level):
if not len(object.maps):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
for i, m in enumerate(object.maps):
if i == len(object.maps) - 1:
self._format(m, stream, indent, allowance + 1, context, level)
stream.write(')')
else:
self._format(m, stream, indent, 1, context, level)
stream.write(',\n' + ' ' * indent)
_dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
def _pprint_deque(self, object, stream, indent, allowance, context, level):
if not len(object):
stream.write(repr(object))
return
cls = object.__class__
stream.write(cls.__name__ + '(')
indent += len(cls.__name__) + 1
stream.write('[')
if object.maxlen is None:
self._format_items(object, stream, indent, allowance + 2,
context, level)
stream.write('])')
else:
self._format_items(object, stream, indent, 2,
context, level)
rml = self._repr(object.maxlen, context, level)
stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml))
_dispatch[_collections.deque.__repr__] = _pprint_deque
def _pprint_user_dict(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
def _pprint_user_list(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserList.__repr__] = _pprint_user_list
def _pprint_user_string(self, object, stream, indent, allowance, context, level):
self._format(object.data, stream, indent, allowance, context, level - 1)
_dispatch[_collections.UserString.__repr__] = _pprint_user_string
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = type(object)
if typ in _builtin_scalars:
return repr(object), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=_safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
_builtin_scalars = frozenset({str, bytes, bytearray, int, float, complex,
bool, type(None)})
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (type(object).__name__, id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print("_safe_repr:", t2 - t1)
print("pformat:", t3 - t2)
def _wrap_bytes_repr(object, width, allowance):
current = b''
last = len(object) // 4 * 4
for i in range(0, len(object), 4):
part = object[i: i+4]
candidate = current + part
if i == last:
width -= allowance
if len(repr(candidate)) > width:
if current:
yield repr(current)
current = part
else:
current = candidate
if current:
yield repr(current)
if __name__ == "__main__":
_perfcheck()
| 20,860 | 598 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/netrc.py | """An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex, stat
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
default_netrc = file is None
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise OSError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp, default_netrc)
def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
saved_lineno = lexer.lineno
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
if lexer.lineno == saved_lineno and len(tt) == 1:
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
import pwd
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep += f"machine {host}\n\tlogin {attrs[0]}\n"
if attrs[1]:
rep += f"\taccount {attrs[1]}\n"
rep += f"\tpassword {attrs[2]}\n"
for macro in self.macros.keys():
rep += f"macdef {macro}\n"
for line in self.macros[macro]:
rep += line
rep += "\n"
return rep
if __name__ == '__main__':
print(netrc())
| 5,684 | 143 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/signal.py | import _signal
from _signal import *
from functools import wraps as _wraps
from enum import IntEnum as _IntEnum
_globals = globals()
_IntEnum._convert(
'Signals', __name__,
lambda name:
name.isupper()
and (name.startswith('SIG') and not name.startswith('SIG_'))
or name.startswith('CTRL_'))
_IntEnum._convert(
'Handlers', __name__,
lambda name: name in ('SIG_DFL', 'SIG_IGN'))
if 'pthread_sigmask' in _globals:
_IntEnum._convert(
'Sigmasks', __name__,
lambda name: name in ('SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'))
def _int_to_enum(value, enum_klass):
"""Convert a numeric value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
def _enum_to_int(value):
"""Convert an IntEnum member to a numeric value.
If it's not an IntEnum member return the value itself.
"""
try:
return int(value)
except (ValueError, TypeError):
return value
@_wraps(_signal.signal)
def signal(signalnum, handler):
handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler))
return _int_to_enum(handler, Handlers)
@_wraps(_signal.getsignal)
def getsignal(signalnum):
handler = _signal.getsignal(signalnum)
return _int_to_enum(handler, Handlers)
if 'pthread_sigmask' in _globals:
@_wraps(_signal.pthread_sigmask)
def pthread_sigmask(how, mask):
sigs_set = _signal.pthread_sigmask(how, mask)
return set(_int_to_enum(x, Signals) for x in sigs_set)
pthread_sigmask.__doc__ = _signal.pthread_sigmask.__doc__
if 'sigpending' in _globals:
@_wraps(_signal.sigpending)
def sigpending():
sigs = _signal.sigpending()
return set(_int_to_enum(x, Signals) for x in sigs)
if 'sigwait' in _globals:
@_wraps(_signal.sigwait)
def sigwait(sigset):
retsig = _signal.sigwait(sigset)
return _int_to_enum(retsig, Signals)
sigwait.__doc__ = _signal.sigwait
del _globals, _wraps
if __name__ == 'PYOBJ.COM':
ITIMER_PROF = 0
ITIMER_REAL = 0
ITIMER_VIRTUAL = 0
NSIG = 0
SIGABRT = 0
SIGALRM = 0
SIGBUS = 0
SIGCHLD = 0
SIGCONT = 0
SIGEMT = 0
SIGFPE = 0
SIGHUP = 0
SIGILL = 0
SIGINFO = 0
SIGINT = 0
SIGIO = 0
SIGIOT = 0
SIGKILL = 0
SIGPIPE = 0
SIGPOLL = 0
SIGPROF = 0
SIGPWR = 0
SIGQUIT = 0
SIGRTMAX = 0
SIGRTMIN = 0
SIGSEGV = 0
SIGSTOP = 0
SIGSYS = 0
SIGTERM = 0
SIGTRAP = 0
SIGTSTP = 0
SIGTTIN = 0
SIGTTOU = 0
SIGURG = 0
SIGUSR1 = 0
SIGUSR2 = 0
SIGVTALRM = 0
SIGWINCH = 0
SIGXCPU = 0
SIGXFSZ = 0
SIG_BLOCK = 0
SIG_DFL = 0
SIG_IGN = 0
SIG_SETMASK = 0
SIG_UNBLOCK = 0
| 2,887 | 127 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/quopri.py | #! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
from encodings import quopri_codec, aliases
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def quote(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are binary file objects. The 'quotetabs' flag
indicates whether embedded tabs and spaces should be quoted. Note that
line-ending tabs and spaces are always encoded, as per RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per RFC
1522."""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + quote(s[-1:]) + lineEnd)
elif s == b'.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are binary file objects.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1:i+2] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except OSError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
| 7,299 | 245 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/sndhdr.py | """Routines to help recognizing sound files.
Function whathdr() recognizes various types of sound file headers.
It understands almost all headers that SOX can decode.
The return tuple contains the following items, in this order:
- file type (as SOX understands it)
- sampling rate (0 if unknown or hard to decode)
- number of channels (0 if unknown or hard to decode)
- number of frames in the file (-1 if unknown or hard to decode)
- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
If the file doesn't have a recognizable type, it returns None.
If the file can't be opened, OSError is raised.
To compute the total time, divide the number of frames by the
sampling rate (a frame contains a sample for each channel).
Function what() calls whathdr(). (It used to also use some
heuristics for raw data, but this doesn't work very well.)
Finally, the function test() is a simple main program that calls
what() for all files mentioned on the argument list. For directory
arguments it calls what() for all files in that directory. Default
argument is "." (testing all files in the current directory). The
option -r tells it to recurse down directories found inside
explicitly given directories.
"""
# The file structure is top-down except that the test program and its
# subroutine come last.
__all__ = ['what', 'whathdr']
from collections import namedtuple
SndHeaders = namedtuple('SndHeaders',
'filetype framerate nchannels nframes sampwidth')
SndHeaders.filetype.__doc__ = ("""The value for type indicates the data type
and will be one of the strings 'aifc', 'aiff', 'au','hcom',
'sndr', 'sndt', 'voc', 'wav', '8svx', 'sb', 'ub', or 'ul'.""")
SndHeaders.framerate.__doc__ = ("""The sampling_rate will be either the actual
value or 0 if unknown or difficult to decode.""")
SndHeaders.nchannels.__doc__ = ("""The number of channels or 0 if it cannot be
determined or if the value is difficult to decode.""")
SndHeaders.nframes.__doc__ = ("""The value for frames will be either the number
of frames or -1.""")
SndHeaders.sampwidth.__doc__ = ("""Either the sample size in bits or
'A' for A-LAW or 'U' for u-LAW.""")
def what(filename):
"""Guess the type of a sound file."""
res = whathdr(filename)
return res
def whathdr(filename):
"""Recognize sound headers."""
with open(filename, 'rb') as f:
h = f.read(512)
for tf in tests:
res = tf(h, f)
if res:
return SndHeaders(*res)
return None
#-----------------------------------#
# Subroutines per sound header type #
#-----------------------------------#
tests = []
def test_aifc(h, f):
import aifc
if not h.startswith(b'FORM'):
return None
if h[8:12] == b'AIFC':
fmt = 'aifc'
elif h[8:12] == b'AIFF':
fmt = 'aiff'
else:
return None
f.seek(0)
try:
a = aifc.open(f, 'r')
except (EOFError, aifc.Error):
return None
return (fmt, a.getframerate(), a.getnchannels(),
a.getnframes(), 8 * a.getsampwidth())
tests.append(test_aifc)
def test_au(h, f):
if h.startswith(b'.snd'):
func = get_long_be
elif h[:4] in (b'\0ds.', b'dns.'):
func = get_long_le
else:
return None
filetype = 'au'
hdr_size = func(h[4:8])
data_size = func(h[8:12])
encoding = func(h[12:16])
rate = func(h[16:20])
nchannels = func(h[20:24])
sample_size = 1 # default
if encoding == 1:
sample_bits = 'U'
elif encoding == 2:
sample_bits = 8
elif encoding == 3:
sample_bits = 16
sample_size = 2
else:
sample_bits = '?'
frame_size = sample_size * nchannels
if frame_size:
nframe = data_size / frame_size
else:
nframe = -1
return filetype, rate, nchannels, nframe, sample_bits
tests.append(test_au)
def test_hcom(h, f):
if h[65:69] != b'FSSD' or h[128:132] != b'HCOM':
return None
divisor = get_long_be(h[144:148])
if divisor:
rate = 22050 / divisor
else:
rate = 0
return 'hcom', rate, 1, -1, 8
tests.append(test_hcom)
def test_voc(h, f):
if not h.startswith(b'Creative Voice File\032'):
return None
sbseek = get_short_le(h[20:22])
rate = 0
if 0 <= sbseek < 500 and h[sbseek] == 1:
ratecode = 256 - h[sbseek+4]
if ratecode:
rate = int(1000000.0 / ratecode)
return 'voc', rate, 1, -1, 8
tests.append(test_voc)
def test_wav(h, f):
import wave
# 'RIFF' <len> 'WAVE' 'fmt ' <len>
if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':
return None
f.seek(0)
try:
w = wave.openfp(f, 'r')
except (EOFError, wave.Error):
return None
return ('wav', w.getframerate(), w.getnchannels(),
w.getnframes(), 8*w.getsampwidth())
tests.append(test_wav)
def test_8svx(h, f):
if not h.startswith(b'FORM') or h[8:12] != b'8SVX':
return None
# Should decode it to get #channels -- assume always 1
return '8svx', 0, 1, 0, 8
tests.append(test_8svx)
def test_sndt(h, f):
if h.startswith(b'SOUND'):
nsamples = get_long_le(h[8:12])
rate = get_short_le(h[20:22])
return 'sndt', rate, 1, nsamples, 8
tests.append(test_sndt)
def test_sndr(h, f):
if h.startswith(b'\0\0'):
rate = get_short_le(h[2:4])
if 4000 <= rate <= 25000:
return 'sndr', rate, 1, -1, 8
tests.append(test_sndr)
#-------------------------------------------#
# Subroutines to extract numbers from bytes #
#-------------------------------------------#
def get_long_be(b):
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]
def get_long_le(b):
return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0]
def get_short_be(b):
return (b[0] << 8) | b[1]
def get_short_le(b):
return (b[1] << 8) | b[0]
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print(filename + '/:', end=' ')
if recursive or toplevel:
print('recursing down:')
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print('*** directory (use -r) ***')
else:
print(filename + ':', end=' ')
sys.stdout.flush()
try:
print(what(filename))
except OSError:
print('*** not found ***')
if __name__ == '__main__':
test()
| 7,088 | 258 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/textwrap.py | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
import re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that
# some Unicode spaces (like \u00a0) are non-breaking whitespaces.
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 0 .. 'tabsize' spaces, depending on its position
in its line. If false, each tab is treated as a single character.
tabsize (default: 8)
Expand tabs in input text to 0 .. 'tabsize' spaces, unless
'expand_tabs' is false.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
max_lines (default: None)
Truncate wrapped lines.
placeholder (default: ' [...]')
Append to the last line of truncated text.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
word_punct = r'[\w!"\'&.,?]'
letter = r'[^\d\W]'
whitespace = r'[%s]' % re.escape(_whitespace)
nowhitespace = '[^' + whitespace[1:]
wordsep_re = re.compile(r'''
( # any whitespace
%(ws)s+
| # em-dash between words
(?<=%(wp)s) -{2,} (?=\w)
| # word, possibly hyphenated
%(nws)s+? (?:
# hyphenated word
-(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-))
(?= %(lt)s -? %(lt)s)
| # end of word
(?=%(ws)s|\Z)
| # em-dash
(?<=%(wp)s) (?=-{2,}\w)
)
)''' % {'wp': word_punct, 'lt': letter,
'ws': whitespace, 'nws': nowhitespace},
re.VERBOSE)
del word_punct, letter, nowhitespace
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(%s+)' % whitespace)
del whitespace
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True,
tabsize=8,
*,
max_lines=None,
placeholder=' [...]'):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
self.tabsize = tabsize
self.max_lines = max_lines
self.placeholder = placeholder
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs(self.tabsize)
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
if self.max_lines is not None:
if self.max_lines > 1:
indent = self.subsequent_indent
else:
indent = self.initial_indent
if len(indent) + len(self.placeholder.lstrip()) > self.width:
raise ValueError("placeholder too large for max width")
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
cur_len = sum(map(len, cur_line))
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
cur_len -= len(cur_line[-1])
del cur_line[-1]
if cur_line:
if (self.max_lines is None or
len(lines) + 1 < self.max_lines or
(not chunks or
self.drop_whitespace and
len(chunks) == 1 and
not chunks[0].strip()) and cur_len <= width):
# Convert current line back to a string and store it in
# list of all lines (return value).
lines.append(indent + ''.join(cur_line))
else:
while cur_line:
if (cur_line[-1].strip() and
cur_len + len(self.placeholder) <= width):
cur_line.append(self.placeholder)
lines.append(indent + ''.join(cur_line))
break
cur_len -= len(cur_line[-1])
del cur_line[-1]
else:
if lines:
prev_line = lines[-1].rstrip()
if (len(prev_line) + len(self.placeholder) <=
self.width):
lines[-1] = prev_line + self.placeholder
break
lines.append(indent + self.placeholder.lstrip())
break
return lines
def _split_chunks(self, text):
text = self._munge_whitespace(text)
return self._split(text)
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
chunks = self._split_chunks(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
def shorten(text, width, **kwargs):
"""Collapse and truncate the given text to fit in the given width.
The text first has its whitespace collapsed. If it then fits in
the *width*, it is returned as is. Otherwise, as many words
as possible are joined and then the placeholder is appended::
>>> textwrap.shorten("Hello world!", width=12)
'Hello world!'
>>> textwrap.shorten("Hello world!", width=11)
'Hello [...]'
"""
w = TextWrapper(width=width, max_lines=1, **kwargs)
return w.fill(' '.join(text.strip().split()))
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
else:
margin = margin[:len(indent)]
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
| 19,558 | 489 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/__phello__.foo.py | # This file exists as a helper for the test.test_frozen module.
| 64 | 2 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/optparse.py | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <[email protected]>
Originally distributed as Optik.
For support, use the [email protected] mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError',
'check_choice']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.help_position = self.max_help_position = \
min(max_help_position, max(width - 20, indent_increment * 2))
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = max(self.width - self.current_indent, 11)
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = max(self.width - self.help_position, 11)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names.
if isinstance(self.type, type):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appear in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is a Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| 60,371 | 1,682 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/cmd.py | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
try:
import readline
except ImportError:
return None
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, list(help.keys()),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError("list[i] not a string for i in %s"
% ", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| 14,941 | 405 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/copyreg.py | """Helper to provide extensibility for pickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __newobj_ex__(cls, args, kwargs):
"""Used by pickle protocol 4, instead of __newobj__ to allow classes with
keyword-only arguments to be pickled correctly.
"""
return cls.__new__(cls, *args, **kwargs)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, str):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
stripped = c.__name__.lstrip('_')
if stripped:
names.append('_%s%s' % (stripped, name))
else:
names.append(name)
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: pickling grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError("code out of range")
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| 7,007 | 207 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/weakref.py | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType,
_remove_dead_weakref)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
import sys
import itertools
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet", "WeakMethod", "finalize"]
class WeakMethod(ref):
"""
A custom `weakref.ref` subclass which simulates a weak reference to
a bound method, working around the lifetime problem of bound methods.
"""
__slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__"
def __new__(cls, meth, callback=None):
try:
obj = meth.__self__
func = meth.__func__
except AttributeError:
raise TypeError("argument should be a bound method, not {}"
.format(type(meth))) from None
def _cb(arg):
# The self-weakref trick is needed to avoid creating a reference
# cycle.
self = self_wr()
if self._alive:
self._alive = False
if callback is not None:
callback(self)
self = ref.__new__(cls, obj, _cb)
self._func_ref = ref(func, _cb)
self._meth_type = type(meth)
self._alive = True
self_wr = ref(self)
return self
def __call__(self):
obj = super().__call__()
func = self._func_ref()
if obj is None or func is None:
return None
return self._meth_type(func, obj)
def __eq__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is other
return ref.__eq__(self, other) and self._func_ref == other._func_ref
return False
def __ne__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is not other
return ref.__ne__(self, other) or self._func_ref != other._func_ref
return True
__hash__ = ref.__hash__
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(*args, **kw):
if not args:
raise TypeError("descriptor '__init__' of 'WeakValueDictionary' "
"object needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
_atomic_removal(d, wr.key)
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
key = l.pop()
_remove_dead_weakref(d, key)
def __getitem__(self, key):
if self._pending_removals:
self._commit_removals()
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
if self._pending_removals:
self._commit_removals()
return len(self.data)
def __contains__(self, key):
if self._pending_removals:
self._commit_removals()
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<%s at %#x>" % (self.__class__.__name__, id(self))
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
if self._pending_removals:
self._commit_removals()
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
if self._pending_removals:
self._commit_removals()
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
if self._pending_removals:
self._commit_removals()
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
yield from self.data.values()
def values(self):
if self._pending_removals:
self._commit_removals()
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
o = None
if o is None:
if args:
return args[0]
else:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
o = self.data[key]()
except KeyError:
o = None
if o is None:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return o
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'WeakValueDictionary' "
"object needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
dict = args[0] if args else None
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
if self._pending_removals:
self._commit_removals()
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
self._dirty_len = False
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def _scrub_removals(self):
d = self.data
self._pending_removals = [k for k in self._pending_removals if k in d]
self._dirty_len = False
def __delitem__(self, key):
self._dirty_len = True
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
if self._dirty_len and self._pending_removals:
# self._pending_removals may still contain keys which were
# explicitly removed, we have to scrub them (see issue #21173).
self._scrub_removals()
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<%s at %#x>" % (self.__class__.__name__, id(self))
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
self._dirty_len = True
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
self._dirty_len = True
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
class finalize:
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info:
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return '<%s object at %#x; dead>' % (type(self).__name__, id(self))
else:
return '<%s object at %#x; for %r at %#x>' % \
(type(self).__name__, id(self), type(obj).__name__, id(obj))
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f,i) for (f,i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item:item[1].index)
return [f for (f,i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or finalize._dirty:
pending = cls._select_for_exit()
finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
finalize._shutdown = True
if reenable_gc:
gc.enable()
| 20,466 | 633 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/stringprep.py | # This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + list(range(65024,65040)))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:'\u03bc', 0xdf:'ss', 0x130:'i\u0307', 0x149:'\u02bcn',
0x17f:'s', 0x1f0:'j\u030c', 0x345:'\u03b9', 0x37a:' \u03b9',
0x390:'\u03b9\u0308\u0301', 0x3b0:'\u03c5\u0308\u0301', 0x3c2:'\u03c3', 0x3d0:'\u03b2',
0x3d1:'\u03b8', 0x3d2:'\u03c5', 0x3d3:'\u03cd', 0x3d4:'\u03cb',
0x3d5:'\u03c6', 0x3d6:'\u03c0', 0x3f0:'\u03ba', 0x3f1:'\u03c1',
0x3f2:'\u03c3', 0x3f5:'\u03b5', 0x587:'\u0565\u0582', 0x1e96:'h\u0331',
0x1e97:'t\u0308', 0x1e98:'w\u030a', 0x1e99:'y\u030a', 0x1e9a:'a\u02be',
0x1e9b:'\u1e61', 0x1f50:'\u03c5\u0313', 0x1f52:'\u03c5\u0313\u0300', 0x1f54:'\u03c5\u0313\u0301',
0x1f56:'\u03c5\u0313\u0342', 0x1f80:'\u1f00\u03b9', 0x1f81:'\u1f01\u03b9', 0x1f82:'\u1f02\u03b9',
0x1f83:'\u1f03\u03b9', 0x1f84:'\u1f04\u03b9', 0x1f85:'\u1f05\u03b9', 0x1f86:'\u1f06\u03b9',
0x1f87:'\u1f07\u03b9', 0x1f88:'\u1f00\u03b9', 0x1f89:'\u1f01\u03b9', 0x1f8a:'\u1f02\u03b9',
0x1f8b:'\u1f03\u03b9', 0x1f8c:'\u1f04\u03b9', 0x1f8d:'\u1f05\u03b9', 0x1f8e:'\u1f06\u03b9',
0x1f8f:'\u1f07\u03b9', 0x1f90:'\u1f20\u03b9', 0x1f91:'\u1f21\u03b9', 0x1f92:'\u1f22\u03b9',
0x1f93:'\u1f23\u03b9', 0x1f94:'\u1f24\u03b9', 0x1f95:'\u1f25\u03b9', 0x1f96:'\u1f26\u03b9',
0x1f97:'\u1f27\u03b9', 0x1f98:'\u1f20\u03b9', 0x1f99:'\u1f21\u03b9', 0x1f9a:'\u1f22\u03b9',
0x1f9b:'\u1f23\u03b9', 0x1f9c:'\u1f24\u03b9', 0x1f9d:'\u1f25\u03b9', 0x1f9e:'\u1f26\u03b9',
0x1f9f:'\u1f27\u03b9', 0x1fa0:'\u1f60\u03b9', 0x1fa1:'\u1f61\u03b9', 0x1fa2:'\u1f62\u03b9',
0x1fa3:'\u1f63\u03b9', 0x1fa4:'\u1f64\u03b9', 0x1fa5:'\u1f65\u03b9', 0x1fa6:'\u1f66\u03b9',
0x1fa7:'\u1f67\u03b9', 0x1fa8:'\u1f60\u03b9', 0x1fa9:'\u1f61\u03b9', 0x1faa:'\u1f62\u03b9',
0x1fab:'\u1f63\u03b9', 0x1fac:'\u1f64\u03b9', 0x1fad:'\u1f65\u03b9', 0x1fae:'\u1f66\u03b9',
0x1faf:'\u1f67\u03b9', 0x1fb2:'\u1f70\u03b9', 0x1fb3:'\u03b1\u03b9', 0x1fb4:'\u03ac\u03b9',
0x1fb6:'\u03b1\u0342', 0x1fb7:'\u03b1\u0342\u03b9', 0x1fbc:'\u03b1\u03b9', 0x1fbe:'\u03b9',
0x1fc2:'\u1f74\u03b9', 0x1fc3:'\u03b7\u03b9', 0x1fc4:'\u03ae\u03b9', 0x1fc6:'\u03b7\u0342',
0x1fc7:'\u03b7\u0342\u03b9', 0x1fcc:'\u03b7\u03b9', 0x1fd2:'\u03b9\u0308\u0300', 0x1fd3:'\u03b9\u0308\u0301',
0x1fd6:'\u03b9\u0342', 0x1fd7:'\u03b9\u0308\u0342', 0x1fe2:'\u03c5\u0308\u0300', 0x1fe3:'\u03c5\u0308\u0301',
0x1fe4:'\u03c1\u0313', 0x1fe6:'\u03c5\u0342', 0x1fe7:'\u03c5\u0308\u0342', 0x1ff2:'\u1f7c\u03b9',
0x1ff3:'\u03c9\u03b9', 0x1ff4:'\u03ce\u03b9', 0x1ff6:'\u03c9\u0342', 0x1ff7:'\u03c9\u0342\u03b9',
0x1ffc:'\u03c9\u03b9', 0x20a8:'rs', 0x2102:'c', 0x2103:'\xb0c',
0x2107:'\u025b', 0x2109:'\xb0f', 0x210b:'h', 0x210c:'h',
0x210d:'h', 0x2110:'i', 0x2111:'i', 0x2112:'l',
0x2115:'n', 0x2116:'no', 0x2119:'p', 0x211a:'q',
0x211b:'r', 0x211c:'r', 0x211d:'r', 0x2120:'sm',
0x2121:'tel', 0x2122:'tm', 0x2124:'z', 0x2128:'z',
0x212c:'b', 0x212d:'c', 0x2130:'e', 0x2131:'f',
0x2133:'m', 0x213e:'\u03b3', 0x213f:'\u03c0', 0x2145:'d',
0x3371:'hpa', 0x3373:'au', 0x3375:'ov', 0x3380:'pa',
0x3381:'na', 0x3382:'\u03bca', 0x3383:'ma', 0x3384:'ka',
0x3385:'kb', 0x3386:'mb', 0x3387:'gb', 0x338a:'pf',
0x338b:'nf', 0x338c:'\u03bcf', 0x3390:'hz', 0x3391:'khz',
0x3392:'mhz', 0x3393:'ghz', 0x3394:'thz', 0x33a9:'pa',
0x33aa:'kpa', 0x33ab:'mpa', 0x33ac:'gpa', 0x33b4:'pv',
0x33b5:'nv', 0x33b6:'\u03bcv', 0x33b7:'mv', 0x33b8:'kv',
0x33b9:'mv', 0x33ba:'pw', 0x33bb:'nw', 0x33bc:'\u03bcw',
0x33bd:'mw', 0x33be:'kw', 0x33bf:'mw', 0x33c0:'k\u03c9',
0x33c1:'m\u03c9', 0x33c3:'bq', 0x33c6:'c\u2215kg', 0x33c7:'co.',
0x33c8:'db', 0x33c9:'gy', 0x33cb:'hp', 0x33cd:'kk',
0x33ce:'km', 0x33d7:'ph', 0x33d9:'ppm', 0x33da:'pr',
0x33dc:'sv', 0x33dd:'wb', 0xfb00:'ff', 0xfb01:'fi',
0xfb02:'fl', 0xfb03:'ffi', 0xfb04:'ffl', 0xfb05:'st',
0xfb06:'st', 0xfb13:'\u0574\u0576', 0xfb14:'\u0574\u0565', 0xfb15:'\u0574\u056b',
0xfb16:'\u057e\u0576', 0xfb17:'\u0574\u056d', 0x1d400:'a', 0x1d401:'b',
0x1d402:'c', 0x1d403:'d', 0x1d404:'e', 0x1d405:'f',
0x1d406:'g', 0x1d407:'h', 0x1d408:'i', 0x1d409:'j',
0x1d40a:'k', 0x1d40b:'l', 0x1d40c:'m', 0x1d40d:'n',
0x1d40e:'o', 0x1d40f:'p', 0x1d410:'q', 0x1d411:'r',
0x1d412:'s', 0x1d413:'t', 0x1d414:'u', 0x1d415:'v',
0x1d416:'w', 0x1d417:'x', 0x1d418:'y', 0x1d419:'z',
0x1d434:'a', 0x1d435:'b', 0x1d436:'c', 0x1d437:'d',
0x1d438:'e', 0x1d439:'f', 0x1d43a:'g', 0x1d43b:'h',
0x1d43c:'i', 0x1d43d:'j', 0x1d43e:'k', 0x1d43f:'l',
0x1d440:'m', 0x1d441:'n', 0x1d442:'o', 0x1d443:'p',
0x1d444:'q', 0x1d445:'r', 0x1d446:'s', 0x1d447:'t',
0x1d448:'u', 0x1d449:'v', 0x1d44a:'w', 0x1d44b:'x',
0x1d44c:'y', 0x1d44d:'z', 0x1d468:'a', 0x1d469:'b',
0x1d46a:'c', 0x1d46b:'d', 0x1d46c:'e', 0x1d46d:'f',
0x1d46e:'g', 0x1d46f:'h', 0x1d470:'i', 0x1d471:'j',
0x1d472:'k', 0x1d473:'l', 0x1d474:'m', 0x1d475:'n',
0x1d476:'o', 0x1d477:'p', 0x1d478:'q', 0x1d479:'r',
0x1d47a:'s', 0x1d47b:'t', 0x1d47c:'u', 0x1d47d:'v',
0x1d47e:'w', 0x1d47f:'x', 0x1d480:'y', 0x1d481:'z',
0x1d49c:'a', 0x1d49e:'c', 0x1d49f:'d', 0x1d4a2:'g',
0x1d4a5:'j', 0x1d4a6:'k', 0x1d4a9:'n', 0x1d4aa:'o',
0x1d4ab:'p', 0x1d4ac:'q', 0x1d4ae:'s', 0x1d4af:'t',
0x1d4b0:'u', 0x1d4b1:'v', 0x1d4b2:'w', 0x1d4b3:'x',
0x1d4b4:'y', 0x1d4b5:'z', 0x1d4d0:'a', 0x1d4d1:'b',
0x1d4d2:'c', 0x1d4d3:'d', 0x1d4d4:'e', 0x1d4d5:'f',
0x1d4d6:'g', 0x1d4d7:'h', 0x1d4d8:'i', 0x1d4d9:'j',
0x1d4da:'k', 0x1d4db:'l', 0x1d4dc:'m', 0x1d4dd:'n',
0x1d4de:'o', 0x1d4df:'p', 0x1d4e0:'q', 0x1d4e1:'r',
0x1d4e2:'s', 0x1d4e3:'t', 0x1d4e4:'u', 0x1d4e5:'v',
0x1d4e6:'w', 0x1d4e7:'x', 0x1d4e8:'y', 0x1d4e9:'z',
0x1d504:'a', 0x1d505:'b', 0x1d507:'d', 0x1d508:'e',
0x1d509:'f', 0x1d50a:'g', 0x1d50d:'j', 0x1d50e:'k',
0x1d50f:'l', 0x1d510:'m', 0x1d511:'n', 0x1d512:'o',
0x1d513:'p', 0x1d514:'q', 0x1d516:'s', 0x1d517:'t',
0x1d518:'u', 0x1d519:'v', 0x1d51a:'w', 0x1d51b:'x',
0x1d51c:'y', 0x1d538:'a', 0x1d539:'b', 0x1d53b:'d',
0x1d53c:'e', 0x1d53d:'f', 0x1d53e:'g', 0x1d540:'i',
0x1d541:'j', 0x1d542:'k', 0x1d543:'l', 0x1d544:'m',
0x1d546:'o', 0x1d54a:'s', 0x1d54b:'t', 0x1d54c:'u',
0x1d54d:'v', 0x1d54e:'w', 0x1d54f:'x', 0x1d550:'y',
0x1d56c:'a', 0x1d56d:'b', 0x1d56e:'c', 0x1d56f:'d',
0x1d570:'e', 0x1d571:'f', 0x1d572:'g', 0x1d573:'h',
0x1d574:'i', 0x1d575:'j', 0x1d576:'k', 0x1d577:'l',
0x1d578:'m', 0x1d579:'n', 0x1d57a:'o', 0x1d57b:'p',
0x1d57c:'q', 0x1d57d:'r', 0x1d57e:'s', 0x1d57f:'t',
0x1d580:'u', 0x1d581:'v', 0x1d582:'w', 0x1d583:'x',
0x1d584:'y', 0x1d585:'z', 0x1d5a0:'a', 0x1d5a1:'b',
0x1d5a2:'c', 0x1d5a3:'d', 0x1d5a4:'e', 0x1d5a5:'f',
0x1d5a6:'g', 0x1d5a7:'h', 0x1d5a8:'i', 0x1d5a9:'j',
0x1d5aa:'k', 0x1d5ab:'l', 0x1d5ac:'m', 0x1d5ad:'n',
0x1d5ae:'o', 0x1d5af:'p', 0x1d5b0:'q', 0x1d5b1:'r',
0x1d5b2:'s', 0x1d5b3:'t', 0x1d5b4:'u', 0x1d5b5:'v',
0x1d5b6:'w', 0x1d5b7:'x', 0x1d5b8:'y', 0x1d5b9:'z',
0x1d5d4:'a', 0x1d5d5:'b', 0x1d5d6:'c', 0x1d5d7:'d',
0x1d5d8:'e', 0x1d5d9:'f', 0x1d5da:'g', 0x1d5db:'h',
0x1d5dc:'i', 0x1d5dd:'j', 0x1d5de:'k', 0x1d5df:'l',
0x1d5e0:'m', 0x1d5e1:'n', 0x1d5e2:'o', 0x1d5e3:'p',
0x1d5e4:'q', 0x1d5e5:'r', 0x1d5e6:'s', 0x1d5e7:'t',
0x1d5e8:'u', 0x1d5e9:'v', 0x1d5ea:'w', 0x1d5eb:'x',
0x1d5ec:'y', 0x1d5ed:'z', 0x1d608:'a', 0x1d609:'b',
0x1d60a:'c', 0x1d60b:'d', 0x1d60c:'e', 0x1d60d:'f',
0x1d60e:'g', 0x1d60f:'h', 0x1d610:'i', 0x1d611:'j',
0x1d612:'k', 0x1d613:'l', 0x1d614:'m', 0x1d615:'n',
0x1d616:'o', 0x1d617:'p', 0x1d618:'q', 0x1d619:'r',
0x1d61a:'s', 0x1d61b:'t', 0x1d61c:'u', 0x1d61d:'v',
0x1d61e:'w', 0x1d61f:'x', 0x1d620:'y', 0x1d621:'z',
0x1d63c:'a', 0x1d63d:'b', 0x1d63e:'c', 0x1d63f:'d',
0x1d640:'e', 0x1d641:'f', 0x1d642:'g', 0x1d643:'h',
0x1d644:'i', 0x1d645:'j', 0x1d646:'k', 0x1d647:'l',
0x1d648:'m', 0x1d649:'n', 0x1d64a:'o', 0x1d64b:'p',
0x1d64c:'q', 0x1d64d:'r', 0x1d64e:'s', 0x1d64f:'t',
0x1d650:'u', 0x1d651:'v', 0x1d652:'w', 0x1d653:'x',
0x1d654:'y', 0x1d655:'z', 0x1d670:'a', 0x1d671:'b',
0x1d672:'c', 0x1d673:'d', 0x1d674:'e', 0x1d675:'f',
0x1d676:'g', 0x1d677:'h', 0x1d678:'i', 0x1d679:'j',
0x1d67a:'k', 0x1d67b:'l', 0x1d67c:'m', 0x1d67d:'n',
0x1d67e:'o', 0x1d67f:'p', 0x1d680:'q', 0x1d681:'r',
0x1d682:'s', 0x1d683:'t', 0x1d684:'u', 0x1d685:'v',
0x1d686:'w', 0x1d687:'x', 0x1d688:'y', 0x1d689:'z',
0x1d6a8:'\u03b1', 0x1d6a9:'\u03b2', 0x1d6aa:'\u03b3', 0x1d6ab:'\u03b4',
0x1d6ac:'\u03b5', 0x1d6ad:'\u03b6', 0x1d6ae:'\u03b7', 0x1d6af:'\u03b8',
0x1d6b0:'\u03b9', 0x1d6b1:'\u03ba', 0x1d6b2:'\u03bb', 0x1d6b3:'\u03bc',
0x1d6b4:'\u03bd', 0x1d6b5:'\u03be', 0x1d6b6:'\u03bf', 0x1d6b7:'\u03c0',
0x1d6b8:'\u03c1', 0x1d6b9:'\u03b8', 0x1d6ba:'\u03c3', 0x1d6bb:'\u03c4',
0x1d6bc:'\u03c5', 0x1d6bd:'\u03c6', 0x1d6be:'\u03c7', 0x1d6bf:'\u03c8',
0x1d6c0:'\u03c9', 0x1d6d3:'\u03c3', 0x1d6e2:'\u03b1', 0x1d6e3:'\u03b2',
0x1d6e4:'\u03b3', 0x1d6e5:'\u03b4', 0x1d6e6:'\u03b5', 0x1d6e7:'\u03b6',
0x1d6e8:'\u03b7', 0x1d6e9:'\u03b8', 0x1d6ea:'\u03b9', 0x1d6eb:'\u03ba',
0x1d6ec:'\u03bb', 0x1d6ed:'\u03bc', 0x1d6ee:'\u03bd', 0x1d6ef:'\u03be',
0x1d6f0:'\u03bf', 0x1d6f1:'\u03c0', 0x1d6f2:'\u03c1', 0x1d6f3:'\u03b8',
0x1d6f4:'\u03c3', 0x1d6f5:'\u03c4', 0x1d6f6:'\u03c5', 0x1d6f7:'\u03c6',
0x1d6f8:'\u03c7', 0x1d6f9:'\u03c8', 0x1d6fa:'\u03c9', 0x1d70d:'\u03c3',
0x1d71c:'\u03b1', 0x1d71d:'\u03b2', 0x1d71e:'\u03b3', 0x1d71f:'\u03b4',
0x1d720:'\u03b5', 0x1d721:'\u03b6', 0x1d722:'\u03b7', 0x1d723:'\u03b8',
0x1d724:'\u03b9', 0x1d725:'\u03ba', 0x1d726:'\u03bb', 0x1d727:'\u03bc',
0x1d728:'\u03bd', 0x1d729:'\u03be', 0x1d72a:'\u03bf', 0x1d72b:'\u03c0',
0x1d72c:'\u03c1', 0x1d72d:'\u03b8', 0x1d72e:'\u03c3', 0x1d72f:'\u03c4',
0x1d730:'\u03c5', 0x1d731:'\u03c6', 0x1d732:'\u03c7', 0x1d733:'\u03c8',
0x1d734:'\u03c9', 0x1d747:'\u03c3', 0x1d756:'\u03b1', 0x1d757:'\u03b2',
0x1d758:'\u03b3', 0x1d759:'\u03b4', 0x1d75a:'\u03b5', 0x1d75b:'\u03b6',
0x1d75c:'\u03b7', 0x1d75d:'\u03b8', 0x1d75e:'\u03b9', 0x1d75f:'\u03ba',
0x1d760:'\u03bb', 0x1d761:'\u03bc', 0x1d762:'\u03bd', 0x1d763:'\u03be',
0x1d764:'\u03bf', 0x1d765:'\u03c0', 0x1d766:'\u03c1', 0x1d767:'\u03b8',
0x1d768:'\u03c3', 0x1d769:'\u03c4', 0x1d76a:'\u03c5', 0x1d76b:'\u03c6',
0x1d76c:'\u03c7', 0x1d76d:'\u03c8', 0x1d76e:'\u03c9', 0x1d781:'\u03c3',
0x1d790:'\u03b1', 0x1d791:'\u03b2', 0x1d792:'\u03b3', 0x1d793:'\u03b4',
0x1d794:'\u03b5', 0x1d795:'\u03b6', 0x1d796:'\u03b7', 0x1d797:'\u03b8',
0x1d798:'\u03b9', 0x1d799:'\u03ba', 0x1d79a:'\u03bb', 0x1d79b:'\u03bc',
0x1d79c:'\u03bd', 0x1d79d:'\u03be', 0x1d79e:'\u03bf', 0x1d79f:'\u03c0',
0x1d7a0:'\u03c1', 0x1d7a1:'\u03b8', 0x1d7a2:'\u03c3', 0x1d7a3:'\u03c4',
0x1d7a4:'\u03c5', 0x1d7a5:'\u03c6', 0x1d7a6:'\u03c7', 0x1d7a7:'\u03c8',
0x1d7a8:'\u03c9', 0x1d7bb:'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = "".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == " "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != " "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + list(range(8288,8292)) + list(range(8298,8304)) + list(range(65529,65533)) + list(range(119155,119163)))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + list(range(8234,8239)) + list(range(8298,8304)))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + list(range(917536,917632)))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
| 12,917 | 273 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/profile.py | #! /usr/bin/env python3
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright Disney Enterprises, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
class _Utils:
"""Support class for utility functions which are shared by
profile.py and cProfile.py modules.
Not supposed to be used directly.
"""
def __init__(self, profiler):
self.profiler = profiler
def run(self, statement, filename, sort):
prof = self.profiler()
try:
prof.run(statement)
except SystemExit:
pass
finally:
self._show(prof, filename, sort)
def runctx(self, statement, globals, locals, filename, sort):
prof = self.profiler()
try:
prof.runctx(statement, globals, locals)
except SystemExit:
pass
finally:
self._show(prof, filename, sort)
def _show(self, prof, filename, sort):
if filename is not None:
prof.dump_stats(filename)
else:
prof.print_stats(sort)
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
return _Utils(Profile).run(statement, filename, sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
return _Utils(Profile).runctx(statement, globals, locals, filename, sort)
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
self.timer = self.get_time = time.process_time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()
else:
self.t = timer() - t # put back unrecorded delta
# Dispatch routine for macintosh (timer returns time in ticks of
# 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
timer = self.timer
t = timer()/60.0 - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()/60.0
else:
self.t = timer()/60.0 - t # put back unrecorded delta
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
get_time = self.get_time
t = get_time() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = get_time()
else:
self.t = get_time() - t # put back unrecorded delta
# In the event handlers, the first 3 elements of self.cur are unpacked
# into vrbls w/ 3-letter names. The last two characters are meant to be
# mnemonic:
# _pt self.cur[0] "parent time" time to be charged to parent frame
# _it self.cur[1] "internal time" time spent directly in the function
# _et self.cur[2] "external time" time spent in subfunctions
def trace_dispatch_exception(self, frame, t):
rpt, rit, ret, rfn, rframe, rcur = self.cur
if (rframe is not frame) and rcur:
return self.trace_dispatch_return(rframe, t)
self.cur = rpt, rit+t, ret, rfn, rframe, rcur
return 1
def trace_dispatch_call(self, frame, t):
if self.cur and frame.f_back is not self.cur[-2]:
rpt, rit, ret, rfn, rframe, rcur = self.cur
if not isinstance(rframe, Profile.fake_frame):
assert rframe.f_back is frame.f_back, ("Bad call", rfn,
rframe, rframe.f_back,
frame, frame.f_back)
self.trace_dispatch_return(rframe, 0)
assert (self.cur is None or \
frame.f_back is self.cur[-2]), ("Bad call",
self.cur[-3])
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns + 1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_c_call (self, frame, t):
fn = ("", 0, self.c_func_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns+1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
if frame is not self.cur[-2]:
assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
self.trace_dispatch_return(self.cur[-2], 0)
# Prefix "r" means part of the Returning or exiting frame.
# Prefix "p" means part of the Previous or Parent or older frame.
rpt, rit, ret, rfn, frame, rcur = self.cur
rit = rit + t
frame_total = rit + ret
ppt, pit, pet, pfn, pframe, pcur = rcur
self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
timings = self.timings
cc, ns, tt, ct, callers = timings[rfn]
if not ns:
# This is the only occurrence of the function on the stack.
# Else this is a (directly or indirectly) recursive call, and
# its cumulative time will get updated when the topmost call to
# it returns.
ct = ct + frame_total
cc = cc + 1
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
return 1
dispatch = {
"call": trace_dispatch_call,
"exception": trace_dispatch_exception,
"return": trace_dispatch_return,
"c_call": trace_dispatch_c_call,
"c_exception": trace_dispatch_return, # the C function returned
"c_return": trace_dispatch_return,
}
# The next few functions play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
self.dispatch['call'](self, frame, 0)
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
get_time = self.get_time
t = get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
self.dispatch['return'](self, self.cur[-2], t)
t = 0
self.t = get_time() - t
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort). \
print_stats()
def dump_stats(self, file):
with open(file, 'wb') as f:
self.create_stats()
marshal.dump(self.stats, f)
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func, (cc, ns, tt, ct, callers) in self.timings.items():
callers = callers.copy()
nc = 0
for callcnt in callers.values():
nc += callcnt
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec(cmd, globals, locals)
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis.
#
# Note that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#**************************************************************
def calibrate(self, m, verbose=0):
if self.__class__ is not Profile:
raise TypeError("Subclasses must override .calibrate().")
saved_bias = self.bias
self.bias = 0
try:
return self._calibrate_inner(m, verbose)
finally:
self.bias = saved_bias
def _calibrate_inner(self, m, verbose):
get_time = self.get_time
# Set up a test case to be run with and without profiling. Include
# lots of calls, because we're trying to quantify stopwatch overhead.
# Do not raise any exceptions, though, because we want to know
# exactly how many profile events are generated (one call event, +
# one return event, per Python-level call).
def f1(n):
for i in range(n):
x = 1
def f(m, f1=f1):
for i in range(m):
f1(100)
f(m) # warm up the cache
# elapsed_noprofile <- time f(m) takes without profiling.
t0 = get_time()
f(m)
t1 = get_time()
elapsed_noprofile = t1 - t0
if verbose:
print("elapsed time without profiling =", elapsed_noprofile)
# elapsed_profile <- time f(m) takes with profiling. The difference
# is profiling overhead, only some of which the profiler subtracts
# out on its own.
p = Profile()
t0 = get_time()
p.runctx('f(m)', globals(), locals())
t1 = get_time()
elapsed_profile = t1 - t0
if verbose:
print("elapsed time with profiling =", elapsed_profile)
# reported_time <- "CPU seconds" the profiler charged to f and f1.
total_calls = 0.0
reported_time = 0.0
for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
p.timings.items():
if funcname in ("f", "f1"):
total_calls += cc
reported_time += tt
if verbose:
print("'CPU seconds' profiler reported =", reported_time)
print("total # calls =", total_calls)
if total_calls != m + 1:
raise ValueError("internal error: total calls = %d" % total_calls)
# reported_time - elapsed_noprofile = overhead the profiler wasn't
# able to measure. Divide by twice the number of calls (since there
# are two profiler events per call in this test) to get the hidden
# overhead per event.
mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
if verbose:
print("mean stopwatch overhead per profile event =", mean)
return mean
#****************************************************************************
def main():
usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
| 22,021 | 590 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/sunau.py | """Stuff to parse Sun and NeXT audio files.
An audio file consists of a header followed by the data. The structure
of the header is as follows.
+---------------+
| magic word |
+---------------+
| header size |
+---------------+
| data size |
+---------------+
| encoding |
+---------------+
| sample rate |
+---------------+
| # of channels |
+---------------+
| info |
| |
+---------------+
The magic word consists of the 4 characters '.snd'. Apart from the
info field, all header fields are 4 bytes in size. They are all
32-bit unsigned integers encoded in big-endian byte order.
The header size really gives the start of the data.
The data size is the physical size of the data. From the other
parameters the number of frames can be calculated.
The encoding gives the way in which audio samples are encoded.
Possible values are listed below.
The info field currently consists of an ASCII string giving a
human-readable description of the audio file. The info field is
padded with NUL bytes to the header size.
Usage.
Reading audio files:
f = sunau.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' or 'ULAW')
getcompname() -- returns human-readable version of
compression type ('not compressed' matches 'NONE')
getparams() -- returns a namedtuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing audio files:
f = sunau.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes(b'') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
from collections import namedtuple
_sunau_params = namedtuple('_sunau_params',
'nchannels sampwidth framerate nframes comptype compname')
# from <multimedia/audio_filehdr.h>
AUDIO_FILE_MAGIC = 0x2e736e64
AUDIO_FILE_ENCODING_MULAW_8 = 1
AUDIO_FILE_ENCODING_LINEAR_8 = 2
AUDIO_FILE_ENCODING_LINEAR_16 = 3
AUDIO_FILE_ENCODING_LINEAR_24 = 4
AUDIO_FILE_ENCODING_LINEAR_32 = 5
AUDIO_FILE_ENCODING_FLOAT = 6
AUDIO_FILE_ENCODING_DOUBLE = 7
AUDIO_FILE_ENCODING_ADPCM_G721 = 23
AUDIO_FILE_ENCODING_ADPCM_G722 = 24
AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
AUDIO_FILE_ENCODING_ALAW_8 = 27
# from <multimedia/audio_hdr.h>
AUDIO_UNKNOWN_SIZE = 0xFFFFFFFF # ((unsigned)(~0))
_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_LINEAR_8,
AUDIO_FILE_ENCODING_LINEAR_16,
AUDIO_FILE_ENCODING_LINEAR_24,
AUDIO_FILE_ENCODING_LINEAR_32,
AUDIO_FILE_ENCODING_ALAW_8]
class Error(Exception):
pass
def _read_u32(file):
x = 0
for i in range(4):
byte = file.read(1)
if not byte:
raise EOFError
x = x*256 + ord(byte)
return x
def _write_u32(file, x):
data = []
for i in range(4):
d, m = divmod(x, 256)
data.insert(0, int(m))
x = d
file.write(bytes(data))
class Au_read:
def __init__(self, f):
if type(f) == type(''):
import builtins
f = builtins.open(f, 'rb')
self._opened = True
else:
self._opened = False
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def initfp(self, file):
self._file = file
self._soundpos = 0
magic = int(_read_u32(file))
if magic != AUDIO_FILE_MAGIC:
raise Error('bad magic number')
self._hdr_size = int(_read_u32(file))
if self._hdr_size < 24:
raise Error('header size too small')
if self._hdr_size > 100:
raise Error('header size ridiculously large')
self._data_size = _read_u32(file)
if self._data_size != AUDIO_UNKNOWN_SIZE:
self._data_size = int(self._data_size)
self._encoding = int(_read_u32(file))
if self._encoding not in _simple_encodings:
raise Error('encoding not (yet) supported')
if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_ALAW_8):
self._sampwidth = 2
self._framesize = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
self._framesize = self._sampwidth = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
self._framesize = self._sampwidth = 2
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
self._framesize = self._sampwidth = 3
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
self._framesize = self._sampwidth = 4
else:
raise Error('unknown encoding')
self._framerate = int(_read_u32(file))
self._nchannels = int(_read_u32(file))
self._framesize = self._framesize * self._nchannels
if self._hdr_size > 24:
self._info = file.read(self._hdr_size - 24)
self._info, _, _ = self._info.partition(b'\0')
else:
self._info = b''
try:
self._data_pos = file.tell()
except (AttributeError, OSError):
self._data_pos = None
def getfp(self):
return self._file
def getnchannels(self):
return self._nchannels
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getnframes(self):
if self._data_size == AUDIO_UNKNOWN_SIZE:
return AUDIO_UNKNOWN_SIZE
if self._encoding in _simple_encodings:
return self._data_size // self._framesize
return 0 # XXX--must do some arithmetic here
def getcomptype(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'ULAW'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'ALAW'
else:
return 'NONE'
def getcompname(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'CCITT G.711 u-law'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def getparams(self):
return _sunau_params(self.getnchannels(), self.getsampwidth(),
self.getframerate(), self.getnframes(),
self.getcomptype(), self.getcompname())
def getmarkers(self):
return None
def getmark(self, id):
raise Error('no marks')
def readframes(self, nframes):
if self._encoding in _simple_encodings:
if nframes == AUDIO_UNKNOWN_SIZE:
data = self._file.read()
else:
data = self._file.read(nframes * self._framesize)
self._soundpos += len(data) // self._framesize
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
import audioop
data = audioop.ulaw2lin(data, self._sampwidth)
return data
return None # XXX--not implemented yet
def rewind(self):
if self._data_pos is None:
raise OSError('cannot seek')
self._file.seek(self._data_pos)
self._soundpos = 0
def tell(self):
return self._soundpos
def setpos(self, pos):
if pos < 0 or pos > self.getnframes():
raise Error('position not in range')
if self._data_pos is None:
raise OSError('cannot seek')
self._file.seek(self._data_pos + pos * self._framesize)
self._soundpos = pos
def close(self):
file = self._file
if file:
self._file = None
if self._opened:
file.close()
class Au_write:
def __init__(self, f):
if type(f) == type(''):
import builtins
f = builtins.open(f, 'wb')
self._opened = True
else:
self._opened = False
self.initfp(f)
def __del__(self):
if self._file:
self.close()
self._file = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def initfp(self, file):
self._file = file
self._framerate = 0
self._nchannels = 0
self._sampwidth = 0
self._framesize = 0
self._nframes = AUDIO_UNKNOWN_SIZE
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._info = b''
self._comptype = 'ULAW' # default is U-law
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nchannels not in (1, 2, 4):
raise Error('only 1, 2, or 4 channels supported')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if sampwidth not in (1, 2, 3, 4):
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._framerate:
raise Error('sample width not specified')
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nframes < 0:
raise Error('# of frames cannot be negative')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, type, name):
if type in ('NONE', 'ULAW'):
self._comptype = type
else:
raise Error('unknown compression type')
def getcomptype(self):
return self._comptype
def getcompname(self):
if self._comptype == 'ULAW':
return 'CCITT G.711 u-law'
elif self._comptype == 'ALAW':
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
return _sunau_params(self.getnchannels(), self.getsampwidth(),
self.getframerate(), self.getnframes(),
self.getcomptype(), self.getcompname())
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
if not isinstance(data, (bytes, bytearray)):
data = memoryview(data).cast('B')
self._ensure_header_written()
if self._comptype == 'ULAW':
import audioop
data = audioop.lin2ulaw(data, self._sampwidth)
nframes = len(data) // self._framesize
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file:
try:
self._ensure_header_written()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
finally:
file = self._file
self._file = None
if self._opened:
file.close()
#
# private methods
#
def _ensure_header_written(self):
if not self._nframeswritten:
if not self._nchannels:
raise Error('# of channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('frame rate not specified')
self._write_header()
def _write_header(self):
if self._comptype == 'NONE':
if self._sampwidth == 1:
encoding = AUDIO_FILE_ENCODING_LINEAR_8
self._framesize = 1
elif self._sampwidth == 2:
encoding = AUDIO_FILE_ENCODING_LINEAR_16
self._framesize = 2
elif self._sampwidth == 3:
encoding = AUDIO_FILE_ENCODING_LINEAR_24
self._framesize = 3
elif self._sampwidth == 4:
encoding = AUDIO_FILE_ENCODING_LINEAR_32
self._framesize = 4
else:
raise Error('internal error')
elif self._comptype == 'ULAW':
encoding = AUDIO_FILE_ENCODING_MULAW_8
self._framesize = 1
else:
raise Error('internal error')
self._framesize = self._framesize * self._nchannels
_write_u32(self._file, AUDIO_FILE_MAGIC)
header_size = 25 + len(self._info)
header_size = (header_size + 7) & ~7
_write_u32(self._file, header_size)
if self._nframes == AUDIO_UNKNOWN_SIZE:
length = AUDIO_UNKNOWN_SIZE
else:
length = self._nframes * self._framesize
try:
self._form_length_pos = self._file.tell()
except (AttributeError, OSError):
self._form_length_pos = None
_write_u32(self._file, length)
self._datalength = length
_write_u32(self._file, encoding)
_write_u32(self._file, self._framerate)
_write_u32(self._file, self._nchannels)
self._file.write(self._info)
self._file.write(b'\0'*(header_size - len(self._info) - 24))
def _patchheader(self):
if self._form_length_pos is None:
raise OSError('cannot seek')
self._file.seek(self._form_length_pos)
_write_u32(self._file, self._datawritten)
self._datalength = self._datawritten
self._file.seek(0, 2)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Au_read(f)
elif mode in ('w', 'wb'):
return Au_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open
| 18,095 | 526 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_osx_support.py | """Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Furthermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overridden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overridden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall(r'-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
| 19,132 | 503 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/types.py | """
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
async def _c(): pass
_c = _c()
CoroutineType = type(_c)
_c.close() # Prevent ResourceWarning
async def _ag():
yield
_ag = _ag()
AsyncGeneratorType = type(_ag)
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, _c, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
class DynamicClassAttribute:
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
This allows one to have properties active on an instance, and have virtual
attributes on the class with the same name (see Enum for an example).
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
# next two lines make DynamicClassAttribute act the same as property
self.__doc__ = doc or fget.__doc__
self.overwrite_doc = doc is None
# support for abstract methods
self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False))
def __get__(self, instance, ownerclass=None):
if instance is None:
if self.__isabstractmethod__:
return self
raise AttributeError()
elif self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(instance)
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(instance, value)
def __delete__(self, instance):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(instance)
def getter(self, fget):
fdoc = fget.__doc__ if self.overwrite_doc else None
result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__)
result.overwrite_doc = self.overwrite_doc
return result
def setter(self, fset):
result = type(self)(self.fget, fset, self.fdel, self.__doc__)
result.overwrite_doc = self.overwrite_doc
return result
def deleter(self, fdel):
result = type(self)(self.fget, self.fset, fdel, self.__doc__)
result.overwrite_doc = self.overwrite_doc
return result
import functools as _functools
import collections.abc as _collections_abc
class _GeneratorWrapper:
# TODO: Implement this in C.
def __init__(self, gen):
self.__wrapped = gen
self.__isgen = gen.__class__ is GeneratorType
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def send(self, val):
return self.__wrapped.send(val)
def throw(self, tp, *rest):
return self.__wrapped.throw(tp, *rest)
def close(self):
return self.__wrapped.close()
@property
def gi_code(self):
return self.__wrapped.gi_code
@property
def gi_frame(self):
return self.__wrapped.gi_frame
@property
def gi_running(self):
return self.__wrapped.gi_running
@property
def gi_yieldfrom(self):
return self.__wrapped.gi_yieldfrom
cr_code = gi_code
cr_frame = gi_frame
cr_running = gi_running
cr_await = gi_yieldfrom
def __next__(self):
return next(self.__wrapped)
def __iter__(self):
if self.__isgen:
return self.__wrapped
return self
__await__ = __iter__
def coroutine(func):
"""Convert regular generator function to a coroutine."""
if not callable(func):
raise TypeError('types.coroutine() expects a callable')
if (func.__class__ is FunctionType and
getattr(func, '__code__', None).__class__ is CodeType):
co_flags = func.__code__.co_flags
# Check if 'func' is a coroutine function.
# (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE)
if co_flags & 0x180:
return func
# Check if 'func' is a generator function.
# (0x20 == CO_GENERATOR)
if co_flags & 0x20:
# TODO: Implement this in C.
co = func.__code__
func.__code__ = CodeType(
co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize,
co.co_flags | 0x100, # 0x100 == CO_ITERABLE_COROUTINE
co.co_code,
co.co_consts, co.co_names, co.co_varnames, co.co_filename,
co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars,
co.co_cellvars)
return func
# The following code is primarily to support functions that
# return generator-like objects (for instance generators
# compiled with Cython).
@_functools.wraps(func)
def wrapped(*args, **kwargs):
coro = func(*args, **kwargs)
if (coro.__class__ is CoroutineType or
coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100):
# 'coro' is a native coroutine object or an iterable coroutine
return coro
if (isinstance(coro, _collections_abc.Generator) and
not isinstance(coro, _collections_abc.Coroutine)):
# 'coro' is either a pure Python generator iterator, or it
# implements collections.abc.Generator (and does not implement
# collections.abc.Coroutine).
return _GeneratorWrapper(coro)
# 'coro' is either an instance of collections.abc.Coroutine or
# some other object -- pass it through.
return coro
return wrapped
__all__ = [n for n in globals() if n[:1] != '_']
| 8,870 | 267 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/imaplib.py | """IMAP4 client.
Based on RFC 2060.
Public class: IMAP4
Public variable: Debug
Public functions: Internaldate2tuple
Int2AP
ParseFlags
Time2Internaldate
"""
# Author: Piers Lauder <[email protected]> December 1997.
#
# Authentication code contributed by Donn Cave <[email protected]> June 1998.
# String method conversion by ESR, February 2001.
# GET/SETACL contributed by Anthony Baxter <[email protected]> April 2001.
# IMAP4_SSL contributed by Tino Lange <[email protected]> March 2002.
# GET/SETQUOTA contributed by Andreas Zeidler <[email protected]> June 2002.
# PROXYAUTH contributed by Rick Holbert <[email protected]> November 2002.
# GET/SETANNOTATION contributed by Tomas Lindroos <[email protected]> June 2005.
__version__ = "2.58"
import binascii, errno, random, re, socket, subprocess, sys, time, calendar
from datetime import datetime, timezone, timedelta
from io import DEFAULT_BUFFER_SIZE
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple",
"Int2AP", "ParseFlags", "Time2Internaldate"]
# Globals
CRLF = b'\r\n'
Debug = 0
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1)
# don't specify a line length. RFC 2683 suggests limiting client
# command lines to 1000 octets and that servers should be prepared
# to accept command lines up to 8000 octets, so we used to use 10K here.
# In the modern world (eg: gmail) the response to, for example, a
# search command can be quite large, so we now use 1M.
_MAXLINE = 1000000
# Commands
Commands = {
# name valid states
'APPEND': ('AUTH', 'SELECTED'),
'AUTHENTICATE': ('NONAUTH',),
'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'CHECK': ('SELECTED',),
'CLOSE': ('SELECTED',),
'COPY': ('SELECTED',),
'CREATE': ('AUTH', 'SELECTED'),
'DELETE': ('AUTH', 'SELECTED'),
'DELETEACL': ('AUTH', 'SELECTED'),
'ENABLE': ('AUTH', ),
'EXAMINE': ('AUTH', 'SELECTED'),
'EXPUNGE': ('SELECTED',),
'FETCH': ('SELECTED',),
'GETACL': ('AUTH', 'SELECTED'),
'GETANNOTATION':('AUTH', 'SELECTED'),
'GETQUOTA': ('AUTH', 'SELECTED'),
'GETQUOTAROOT': ('AUTH', 'SELECTED'),
'MYRIGHTS': ('AUTH', 'SELECTED'),
'LIST': ('AUTH', 'SELECTED'),
'LOGIN': ('NONAUTH',),
'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'LSUB': ('AUTH', 'SELECTED'),
'MOVE': ('SELECTED',),
'NAMESPACE': ('AUTH', 'SELECTED'),
'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'PARTIAL': ('SELECTED',), # NB: obsolete
'PROXYAUTH': ('AUTH',),
'RENAME': ('AUTH', 'SELECTED'),
'SEARCH': ('SELECTED',),
'SELECT': ('AUTH', 'SELECTED'),
'SETACL': ('AUTH', 'SELECTED'),
'SETANNOTATION':('AUTH', 'SELECTED'),
'SETQUOTA': ('AUTH', 'SELECTED'),
'SORT': ('SELECTED',),
'STARTTLS': ('NONAUTH',),
'STATUS': ('AUTH', 'SELECTED'),
'STORE': ('SELECTED',),
'SUBSCRIBE': ('AUTH', 'SELECTED'),
'THREAD': ('SELECTED',),
'UID': ('SELECTED',),
'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
}
# Patterns to match server responses
Continuation = re.compile(br'\+( (?P<data>.*))?')
Flags = re.compile(br'.*FLAGS \((?P<flags>[^\)]*)\)')
InternalDate = re.compile(br'.*INTERNALDATE "'
br'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
br' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
br' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
br'"')
# Literal is no longer used; kept for backward compatibility.
Literal = re.compile(br'.*{(?P<size>\d+)}$', re.ASCII)
MapCRLF = re.compile(br'\r\n|\r|\n')
# We no longer exclude the ']' character from the data portion of the response
# code, even though it violates the RFC. Popular IMAP servers such as Gmail
# allow flags with ']', and there are programs (including imaplib!) that can
# produce them. The problem with this is if the 'text' portion of the response
# includes a ']' we'll parse the response wrong (which is the point of the RFC
# restriction). However, that seems less likely to be a problem in practice
# than being unable to correctly parse flags that include ']' chars, which
# was reported as a real-world problem in issue #21815.
Response_code = re.compile(br'\[(?P<type>[A-Z-]+)( (?P<data>.*))?\]')
Untagged_response = re.compile(br'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
# Untagged_status is no longer used; kept for backward compatibility
Untagged_status = re.compile(
br'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?', re.ASCII)
# We compile these in _mode_xxx.
_Literal = br'.*{(?P<size>\d+)}$'
_Untagged_status = br'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?'
class IMAP4:
r"""IMAP4 client class.
Instantiate with: IMAP4([host[, port]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port).
All IMAP4rev1 commands are supported by methods of the same
name (in lower-case).
All arguments to commands are converted to strings, except for
AUTHENTICATE, and the last argument to APPEND which is passed as
an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double quotes) each string is quoted.
However, the 'password' argument to the LOGIN command is always
quoted. If you want to avoid having an argument string quoted
(eg: the 'flags' argument to STORE) then enclose the string in
parentheses (eg: "(\Deleted)").
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data'
is either a string, or a tuple. If a tuple, then the first part
is the header of the response, and the second part contains
the data (ie: 'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"),
which is a sub-class of 'error'. Mailbox status changes
from READ-WRITE to READ-ONLY raise the exception class
<instance>.readonly("<reason>"), which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
def __init__(self, host='', port=IMAP4_PORT):
self.debug = Debug
self.state = 'LOGOUT'
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = {} # {typ: [data, ...], ...}
self.continuation_response = '' # Last continuation response
self.is_readonly = False # READ-ONLY desired state
self.tagnum = 0
self._tls_established = False
self._mode_ascii()
# Open socket to server.
self.open(host, port)
try:
self._connect()
except Exception:
try:
self.shutdown()
except OSError:
pass
raise
def _mode_ascii(self):
self.utf8_enabled = False
self._encoding = 'ascii'
self.Literal = re.compile(_Literal, re.ASCII)
self.Untagged_status = re.compile(_Untagged_status, re.ASCII)
def _mode_utf8(self):
self.utf8_enabled = True
self._encoding = 'utf-8'
self.Literal = re.compile(_Literal)
self.Untagged_status = re.compile(_Untagged_status)
def _connect(self):
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(br'(?P<tag>'
+ self.tagpre
+ br'\d+) (?P<type>[A-Z]+) (?P<data>.*)', re.ASCII)
# Get server welcome message,
# request and store CAPABILITY response.
if __debug__:
self._cmd_log_len = 10
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug >= 1:
self._mesg('imaplib version %s' % __version__)
self._mesg('new IMAP4 connection, tag=%s' % self.tagpre)
self.welcome = self._get_response()
if 'PREAUTH' in self.untagged_responses:
self.state = 'AUTH'
elif 'OK' in self.untagged_responses:
self.state = 'NONAUTH'
else:
raise self.error(self.welcome)
self._get_capabilities()
if __debug__:
if self.debug >= 3:
self._mesg('CAPABILITIES: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
return
raise self.error('server not IMAP4 compliant')
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
def __enter__(self):
return self
def __exit__(self, *args):
try:
self.logout()
except OSError:
pass
# Overridable methods
def _create_socket(self):
# Default value of IMAP4.host is '', but socket.getaddrinfo()
# (which is used by socket.create_connection()) expects None
# as a default value for host.
host = None if not self.host else self.host
return socket.create_connection((host, self.port))
def open(self, host = '', port = IMAP4_PORT):
"""Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = self._create_socket()
self.file = self.sock.makefile('rb')
def read(self, size):
"""Read 'size' bytes from remote."""
return self.file.read(size)
def readline(self):
"""Read line from remote."""
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise self.error("got more than %d bytes" % _MAXLINE)
return line
def send(self, data):
"""Send data to remote."""
self.sock.sendall(data)
def shutdown(self):
"""Close I/O established in "open"."""
self.file.close()
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as exc:
# The server might already have closed the connection.
# On Windows, this may result in WSAEINVAL (error 10022):
# An invalid operation was attempted.
if (exc.errno != errno.ENOTCONN
and getattr(exc, 'winerror', 0) != 10022):
raise
finally:
self.sock.close()
def socket(self):
"""Return socket instance used to connect to IMAP4 server.
socket = <instance>.socket()
"""
return self.sock
# Utility methods
def recent(self):
"""Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
(typ, [data]) = <instance>.recent()
'data' is None if no new messages,
else list of RECENT responses, most recent last.
"""
name = 'RECENT'
typ, dat = self._untagged_response('OK', [None], name)
if dat[-1]:
return typ, dat
typ, dat = self.noop() # Prod server for response
return self._untagged_response(typ, dat, name)
def response(self, code):
"""Return data for response 'code' if received, or None.
Old value for response 'code' is cleared.
(code, [data]) = <instance>.response(code)
"""
return self._untagged_response(code, [None], code.upper())
# IMAP4 commands
def append(self, mailbox, flags, date_time, message):
"""Append message to named mailbox.
(typ, [data]) = <instance>.append(mailbox, flags, date_time, message)
All args except `message' can be None.
"""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
literal = MapCRLF.sub(CRLF, message)
if self.utf8_enabled:
literal = b'UTF8 (' + literal + b')'
self.literal = literal
return self._simple_command(name, mailbox, flags, date_time)
def authenticate(self, mechanism, authobject):
"""Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses; the
response argument it is passed will be a bytes. It should return bytes
data that will be base64 encoded and sent to the server. It should
return None if the client abort response '*' should be sent instead.
"""
mech = mechanism.upper()
# XXX: shouldn't this code be removed, not commented out?
#cap = 'AUTH=%s' % mech
#if not cap in self.capabilities: # Let the server decide!
# raise self.error("Server doesn't allow %s authentication." % mech)
self.literal = _Authenticator(authobject).process
typ, dat = self._simple_command('AUTHENTICATE', mech)
if typ != 'OK':
raise self.error(dat[-1].decode('utf-8', 'replace'))
self.state = 'AUTH'
return typ, dat
def capability(self):
"""(typ, [data]) = <instance>.capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def check(self):
"""Checkpoint mailbox on server.
(typ, [data]) = <instance>.check()
"""
return self._simple_command('CHECK')
def close(self):
"""Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'.
(typ, [data]) = <instance>.close()
"""
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = 'AUTH'
return typ, dat
def copy(self, message_set, new_mailbox):
"""Copy 'message_set' messages onto end of 'new_mailbox'.
(typ, [data]) = <instance>.copy(message_set, new_mailbox)
"""
return self._simple_command('COPY', message_set, new_mailbox)
def create(self, mailbox):
"""Create new mailbox.
(typ, [data]) = <instance>.create(mailbox)
"""
return self._simple_command('CREATE', mailbox)
def delete(self, mailbox):
"""Delete old mailbox.
(typ, [data]) = <instance>.delete(mailbox)
"""
return self._simple_command('DELETE', mailbox)
def deleteacl(self, mailbox, who):
"""Delete the ACLs (remove any rights) set for who on mailbox.
(typ, [data]) = <instance>.deleteacl(mailbox, who)
"""
return self._simple_command('DELETEACL', mailbox, who)
def enable(self, capability):
"""Send an RFC5161 enable string to the server.
(typ, [data]) = <intance>.enable(capability)
"""
if 'ENABLE' not in self.capabilities:
raise IMAP4.error("Server does not support ENABLE")
typ, data = self._simple_command('ENABLE', capability)
if typ == 'OK' and 'UTF8=ACCEPT' in capability.upper():
self._mode_utf8()
return typ, data
def expunge(self):
"""Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
(typ, [data]) = <instance>.expunge()
'data' is list of 'EXPUNGE'd message numbers in order received.
"""
name = 'EXPUNGE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def fetch(self, message_set, message_parts):
"""Fetch (parts of) messages.
(typ, [data, ...]) = <instance>.fetch(message_set, message_parts)
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data.
"""
name = 'FETCH'
typ, dat = self._simple_command(name, message_set, message_parts)
return self._untagged_response(typ, dat, name)
def getacl(self, mailbox):
"""Get the ACLs for a mailbox.
(typ, [data]) = <instance>.getacl(mailbox)
"""
typ, dat = self._simple_command('GETACL', mailbox)
return self._untagged_response(typ, dat, 'ACL')
def getannotation(self, mailbox, entry, attribute):
"""(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute)
return self._untagged_response(typ, dat, 'ANNOTATION')
def getquota(self, root):
"""Get the quota root's resource usage and limits.
Part of the IMAP4 QUOTA extension defined in rfc2087.
(typ, [data]) = <instance>.getquota(root)
"""
typ, dat = self._simple_command('GETQUOTA', root)
return self._untagged_response(typ, dat, 'QUOTA')
def getquotaroot(self, mailbox):
"""Get the list of quota roots for the named mailbox.
(typ, [[QUOTAROOT responses...], [QUOTA responses]]) = <instance>.getquotaroot(mailbox)
"""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return typ, [quotaroot, quota]
def list(self, directory='""', pattern='*'):
"""List mailbox names in directory matching pattern.
(typ, [data]) = <instance>.list(directory='""', pattern='*')
'data' is list of LIST responses.
"""
name = 'LIST'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def login(self, user, password):
"""Identify client using plaintext password.
(typ, [data]) = <instance>.login(user, password)
NB: 'password' will be quoted.
"""
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def login_cram_md5(self, user, password):
""" Force use of CRAM-MD5 authentication.
(typ, [data]) = <instance>.login_cram_md5(user, password)
"""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH)
def _CRAM_MD5_AUTH(self, challenge):
""" Authobject to use with CRAM-MD5 authentication. """
import hmac
pwd = (self.password.encode('utf-8') if isinstance(self.password, str)
else self.password)
return self.user + " " + hmac.HMAC(pwd, challenge, 'md5').hexdigest()
def logout(self):
"""Shutdown connection to server.
(typ, [data]) = <instance>.logout()
Returns server 'BYE' response.
"""
self.state = 'LOGOUT'
try: typ, dat = self._simple_command('LOGOUT')
except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
self.shutdown()
if 'BYE' in self.untagged_responses:
return 'BYE', self.untagged_responses['BYE']
return typ, dat
def lsub(self, directory='""', pattern='*'):
"""List 'subscribed' mailbox names in directory matching pattern.
(typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*')
'data' are tuples of message part envelope and data.
"""
name = 'LSUB'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def myrights(self, mailbox):
"""Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).
(typ, [data]) = <instance>.myrights(mailbox)
"""
typ,dat = self._simple_command('MYRIGHTS', mailbox)
return self._untagged_response(typ, dat, 'MYRIGHTS')
def namespace(self):
""" Returns IMAP namespaces ala rfc2342
(typ, [data, ...]) = <instance>.namespace()
"""
name = 'NAMESPACE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def noop(self):
"""Send NOOP command.
(typ, [data]) = <instance>.noop()
"""
if __debug__:
if self.debug >= 3:
self._dump_ur(self.untagged_responses)
return self._simple_command('NOOP')
def partial(self, message_num, message_part, start, length):
"""Fetch truncated part of a message.
(typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length)
'data' is tuple of message part envelope and data.
"""
name = 'PARTIAL'
typ, dat = self._simple_command(name, message_num, message_part, start, length)
return self._untagged_response(typ, dat, 'FETCH')
def proxyauth(self, user):
"""Assume authentication as "user".
Allows an authorised administrator to proxy into any user's
mailbox.
(typ, [data]) = <instance>.proxyauth(user)
"""
name = 'PROXYAUTH'
return self._simple_command('PROXYAUTH', user)
def rename(self, oldmailbox, newmailbox):
"""Rename old mailbox name to new.
(typ, [data]) = <instance>.rename(oldmailbox, newmailbox)
"""
return self._simple_command('RENAME', oldmailbox, newmailbox)
def search(self, charset, *criteria):
"""Search mailbox for matching messages.
(typ, [data]) = <instance>.search(charset, criterion, ...)
'data' is space separated list of matching message numbers.
If UTF8 is enabled, charset MUST be None.
"""
name = 'SEARCH'
if charset:
if self.utf8_enabled:
raise IMAP4.error("Non-None charset not valid in UTF8 mode")
typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria)
else:
typ, dat = self._simple_command(name, *criteria)
return self._untagged_response(typ, dat, name)
def select(self, mailbox='INBOX', readonly=False):
"""Select a mailbox.
Flush all untagged responses.
(typ, [data]) = <instance>.select(mailbox='INBOX', readonly=False)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via <instance>.response('FLAGS') etc.
"""
self.untagged_responses = {} # Flush old responses.
self.is_readonly = readonly
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
typ, dat = self._simple_command(name, mailbox)
if typ != 'OK':
self.state = 'AUTH' # Might have been 'SELECTED'
return typ, dat
self.state = 'SELECTED'
if 'READ-ONLY' in self.untagged_responses \
and not readonly:
if __debug__:
if self.debug >= 1:
self._dump_ur(self.untagged_responses)
raise self.readonly('%s is not writable' % mailbox)
return typ, self.untagged_responses.get('EXISTS', [None])
def setacl(self, mailbox, who, what):
"""Set a mailbox acl.
(typ, [data]) = <instance>.setacl(mailbox, who, what)
"""
return self._simple_command('SETACL', mailbox, who, what)
def setannotation(self, *args):
"""(typ, [data]) = <instance>.setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
typ, dat = self._simple_command('SETANNOTATION', *args)
return self._untagged_response(typ, dat, 'ANNOTATION')
def setquota(self, root, limits):
"""Set the quota root's resource limits.
(typ, [data]) = <instance>.setquota(root, limits)
"""
typ, dat = self._simple_command('SETQUOTA', root, limits)
return self._untagged_response(typ, dat, 'QUOTA')
def sort(self, sort_criteria, charset, *search_criteria):
"""IMAP4rev1 extension SORT command.
(typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...)
"""
name = 'SORT'
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unimplemented extension command: %s' % name)
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def starttls(self, ssl_context=None):
name = 'STARTTLS'
if not HAVE_SSL:
raise self.error('SSL support missing')
if self._tls_established:
raise self.abort('TLS session already established')
if name not in self.capabilities:
raise self.abort('TLS not supported by server')
# Generate a default SSL context if none was passed.
if ssl_context is None:
ssl_context = ssl._create_stdlib_context()
typ, dat = self._simple_command(name)
if typ == 'OK':
self.sock = ssl_context.wrap_socket(self.sock,
server_hostname=self.host)
self.file = self.sock.makefile('rb')
self._tls_established = True
self._get_capabilities()
else:
raise self.error("Couldn't establish TLS session")
return self._untagged_response(typ, dat, name)
def status(self, mailbox, names):
"""Request named status conditions for mailbox.
(typ, [data]) = <instance>.status(mailbox, names)
"""
name = 'STATUS'
#if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide!
# raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
typ, dat = self._simple_command(name, mailbox, names)
return self._untagged_response(typ, dat, name)
def store(self, message_set, command, flags):
"""Alters flag dispositions for messages in mailbox.
(typ, [data]) = <instance>.store(message_set, command, flags)
"""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
typ, dat = self._simple_command('STORE', message_set, command, flags)
return self._untagged_response(typ, dat, 'FETCH')
def subscribe(self, mailbox):
"""Subscribe to new mailbox.
(typ, [data]) = <instance>.subscribe(mailbox)
"""
return self._simple_command('SUBSCRIBE', mailbox)
def thread(self, threading_algorithm, charset, *search_criteria):
"""IMAPrev1 extension THREAD command.
(type, [data]) = <instance>.thread(threading_algorithm, charset, search_criteria, ...)
"""
name = 'THREAD'
typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def uid(self, command, *args):
"""Execute "command arg ..." with messages identified by UID,
rather than message number.
(typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
Returns response appropriate to 'command'.
"""
command = command.upper()
if not command in Commands:
raise self.error("Unknown IMAP4 UID command: %s" % command)
if self.state not in Commands[command]:
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(command, self.state,
', '.join(Commands[command])))
name = 'UID'
typ, dat = self._simple_command(name, command, *args)
if command in ('SEARCH', 'SORT', 'THREAD'):
name = command
else:
name = 'FETCH'
return self._untagged_response(typ, dat, name)
def unsubscribe(self, mailbox):
"""Unsubscribe from old mailbox.
(typ, [data]) = <instance>.unsubscribe(mailbox)
"""
return self._simple_command('UNSUBSCRIBE', mailbox)
def xatom(self, name, *args):
"""Allow simple extension commands
notified by server in CAPABILITY response.
Assumes command is legal in current state.
(typ, [data]) = <instance>.xatom(name, arg, ...)
Returns response appropriate to extension command `name'.
"""
name = name.upper()
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unknown extension command: %s' % name)
if not name in Commands:
Commands[name] = (self.state,)
return self._simple_command(name, *args)
# Private methods
def _append_untagged(self, typ, dat):
if dat is None:
dat = b''
ur = self.untagged_responses
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] %s += ["%r"]' %
(typ, len(ur.get(typ,'')), dat))
if typ in ur:
ur[typ].append(dat)
else:
ur[typ] = [dat]
def _check_bye(self):
bye = self.untagged_responses.get('BYE')
if bye:
raise self.abort(bye[-1].decode(self._encoding, 'replace'))
def _command(self, name, *args):
if self.state not in Commands[name]:
self.literal = None
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(name, self.state,
', '.join(Commands[name])))
for typ in ('OK', 'NO', 'BAD'):
if typ in self.untagged_responses:
del self.untagged_responses[typ]
if 'READ-ONLY' in self.untagged_responses \
and not self.is_readonly:
raise self.readonly('mailbox status changed to READ-ONLY')
tag = self._new_tag()
name = bytes(name, self._encoding)
data = tag + b' ' + name
for arg in args:
if arg is None: continue
if isinstance(arg, str):
arg = bytes(arg, self._encoding)
data = data + b' ' + arg
literal = self.literal
if literal is not None:
self.literal = None
if type(literal) is type(self._command):
literator = literal
else:
literator = None
data = data + bytes(' {%s}' % len(literal), self._encoding)
if __debug__:
if self.debug >= 4:
self._mesg('> %r' % data)
else:
self._log('> %r' % data)
try:
self.send(data + CRLF)
except OSError as val:
raise self.abort('socket error: %s' % val)
if literal is None:
return tag
while 1:
# Wait for continuation response
while self._get_response():
if self.tagged_commands[tag]: # BAD/NO?
return tag
# Send literal
if literator:
literal = literator(self.continuation_response)
if __debug__:
if self.debug >= 4:
self._mesg('write literal size %s' % len(literal))
try:
self.send(literal)
self.send(CRLF)
except OSError as val:
raise self.abort('socket error: %s' % val)
if not literator:
break
return tag
def _command_complete(self, name, tag):
# BYE is expected after LOGOUT
if name != 'LOGOUT':
self._check_bye()
try:
typ, data = self._get_tagged_response(tag)
except self.abort as val:
raise self.abort('command: %s => %s' % (name, val))
except self.error as val:
raise self.error('command: %s => %s' % (name, val))
if name != 'LOGOUT':
self._check_bye()
if typ == 'BAD':
raise self.error('%s command error: %s %s' % (name, typ, data))
return typ, data
def _get_capabilities(self):
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
dat = str(dat[-1], self._encoding)
dat = dat.upper()
self.capabilities = tuple(dat.split())
def _get_response(self):
# Read response and store.
#
# Returns None for continuation responses,
# otherwise first response line received.
resp = self._get_line()
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
if not tag in self.tagged_commands:
raise self.abort('unexpected tagged response: %r' % resp)
typ = self.mo.group('type')
typ = str(typ, self._encoding)
dat = self.mo.group('data')
self.tagged_commands[tag] = (typ, [dat])
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(Untagged_response, resp):
if self._match(self.Untagged_status, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(Continuation, resp):
self.continuation_response = self.mo.group('data')
return None # NB: indicates continuation
raise self.abort("unexpected response: %r" % resp)
typ = self.mo.group('type')
typ = str(typ, self._encoding)
dat = self.mo.group('data')
if dat is None: dat = b'' # Null untagged response
if dat2: dat = dat + b' ' + dat2
# Is there a literal to come?
while self._match(self.Literal, dat):
# Read literal direct from connection.
size = int(self.mo.group('size'))
if __debug__:
if self.debug >= 4:
self._mesg('read literal size %s' % size)
data = self.read(size)
# Store response with literal as tuple
self._append_untagged(typ, (dat, data))
# Read trailer - possibly containing another literal
dat = self._get_line()
self._append_untagged(typ, dat)
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
typ = self.mo.group('type')
typ = str(typ, self._encoding)
self._append_untagged(typ, self.mo.group('data'))
if __debug__:
if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
self._mesg('%s response: %r' % (typ, dat))
return resp
def _get_tagged_response(self, tag):
while 1:
result = self.tagged_commands[tag]
if result is not None:
del self.tagged_commands[tag]
return result
# If we've seen a BYE at this point, the socket will be
# closed, so report the BYE now.
self._check_bye()
# Some have reported "unexpected response" exceptions.
# Note that ignoring them here causes loops.
# Instead, send me details of the unexpected response and
# I'll update the code in `_get_response()'.
try:
self._get_response()
except self.abort as val:
if __debug__:
if self.debug >= 1:
self.print_log()
raise
def _get_line(self):
line = self.readline()
if not line:
raise self.abort('socket error: EOF')
# Protocol mandates all lines terminated by CRLF
if not line.endswith(b'\r\n'):
raise self.abort('socket error: unterminated line: %r' % line)
line = line[:-2]
if __debug__:
if self.debug >= 4:
self._mesg('< %r' % line)
else:
self._log('< %r' % line)
return line
def _match(self, cre, s):
# Run compiled regular expression match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
if __debug__:
if self.mo is not None and self.debug >= 5:
self._mesg("\tmatched r'%r' => %r" % (cre.pattern, self.mo.groups()))
return self.mo is not None
def _new_tag(self):
tag = self.tagpre + bytes(str(self.tagnum), self._encoding)
self.tagnum = self.tagnum + 1
self.tagged_commands[tag] = None
return tag
def _quote(self, arg):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
return '"' + arg + '"'
def _simple_command(self, name, *args):
return self._command_complete(name, self._command(name, *args))
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
if not name in self.untagged_responses:
return typ, [None]
data = self.untagged_responses.pop(name)
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] => %s' % (name, data))
return typ, data
if __debug__:
def _mesg(self, s, secs=None):
if secs is None:
secs = time.time()
tm = time.strftime('%M:%S', time.localtime(secs))
sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s))
sys.stderr.flush()
def _dump_ur(self, dict):
# Dump untagged responses (in `dict').
l = dict.items()
if not l: return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
def _log(self, line):
# Keep log of last `_cmd_log_len' interactions for debugging.
self._cmd_log[self._cmd_log_idx] = (line, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
def print_log(self):
self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log))
i, n = self._cmd_log_idx, self._cmd_log_len
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
if HAVE_SSL:
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile[, ssl_context]]]]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port);
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
ssl_context - a SSLContext object that contains your certificate chain
and private key (default: None)
Note: if ssl_context is provided, then parameters keyfile or
certfile should not be set otherwise ValueError is raised.
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host='', port=IMAP4_SSL_PORT, keyfile=None,
certfile=None, ssl_context=None):
if ssl_context is not None and keyfile is not None:
raise ValueError("ssl_context and keyfile arguments are mutually "
"exclusive")
if ssl_context is not None and certfile is not None:
raise ValueError("ssl_context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom ssl_context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if ssl_context is None:
ssl_context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.ssl_context = ssl_context
IMAP4.__init__(self, host, port)
def _create_socket(self):
sock = IMAP4._create_socket(self)
return self.ssl_context.wrap_socket(sock,
server_hostname=self.host)
def open(self, host='', port=IMAP4_SSL_PORT):
"""Setup connection to remote server on "host:port".
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
IMAP4.open(self, host, port)
__all__.append("IMAP4_SSL")
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with: IMAP4_stream(command)
"command" - a string that can be passed to subprocess.Popen()
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command):
self.command = command
IMAP4.__init__(self)
def open(self, host = None, port = None):
"""Setup a stream connection.
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = None # For compatibility with parent class
self.port = None
self.sock = None
self.file = None
self.process = subprocess.Popen(self.command,
bufsize=DEFAULT_BUFFER_SIZE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=True, close_fds=True)
self.writefile = self.process.stdin
self.readfile = self.process.stdout
def read(self, size):
"""Read 'size' bytes from remote."""
return self.readfile.read(size)
def readline(self):
"""Read line from remote."""
return self.readfile.readline()
def send(self, data):
"""Send data to remote."""
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
self.process.wait()
class _Authenticator:
"""Private class to provide en/decoding
for base64-based authentication conversation.
"""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data):
ret = self.mech(self.decode(data))
if ret is None:
return b'*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = b''
if isinstance(inp, str):
inp = inp.encode('utf-8')
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = b''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return b''
return binascii.a2b_base64(inp)
Months = ' Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split(' ')
Mon2num = {s.encode():n+1 for n, s in enumerate(Months[1:])}
def Internaldate2tuple(resp):
"""Parse an IMAP4 INTERNALDATE string.
Return corresponding local time. The return value is a
time.struct_time tuple or None if the string has wrong format.
"""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == b'-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = calendar.timegm(tt) - zone
return time.localtime(utc)
def Int2AP(num):
"""Convert integer to A-P string representation."""
val = b''; AP = b'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val = AP[mod:mod+1] + val
return val
def ParseFlags(resp):
"""Convert IMAP4 flags response to python tuple."""
mo = Flags.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
def Time2Internaldate(date_time):
"""Convert date_time to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The
date_time argument can be a number (int or float) representing
seconds since epoch (as returned by time.time()), a 9-tuple
representing local time, an instance of time.struct_time (as
returned by time.localtime()), an aware datetime instance or a
double-quoted string. In the last case, it is assumed to already
be in the correct format.
"""
if isinstance(date_time, (int, float)):
dt = datetime.fromtimestamp(date_time,
timezone.utc).astimezone()
elif isinstance(date_time, tuple):
try:
gmtoff = date_time.tm_gmtoff
except AttributeError:
if time.daylight:
dst = date_time[8]
if dst == -1:
dst = time.localtime(time.mktime(date_time))[8]
gmtoff = -(time.timezone, time.altzone)[dst]
else:
gmtoff = -time.timezone
delta = timedelta(seconds=gmtoff)
dt = datetime(*date_time[:6], tzinfo=timezone(delta))
elif isinstance(date_time, datetime):
if date_time.tzinfo is None:
raise ValueError("date_time must be aware")
dt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
fmt = '"%d-{}-%Y %H:%M:%S %z"'.format(Months[dt.month])
return dt.strftime(fmt)
if __name__ == '__main__':
# To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]'
# or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"'
# to test the IMAP4_stream class
import getopt, getpass
try:
optlist, args = getopt.getopt(sys.argv[1:], 'd:s:')
except getopt.error as val:
optlist, args = (), ()
stream_command = None
for opt,val in optlist:
if opt == '-d':
Debug = int(val)
elif opt == '-s':
stream_command = val
if not args: args = (stream_command,)
if not args: args = ('',)
host = args[0]
USER = getpass.getuser()
PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'}
test_seq1 = (
('login', (USER, PASSWD)),
('create', ('/tmp/xxx 1',)),
('rename', ('/tmp/xxx 1', '/tmp/yyy')),
('CREATE', ('/tmp/yyz 2',)),
('append', ('/tmp/yyz 2', None, None, test_mesg)),
('list', ('/tmp', 'yy*')),
('select', ('/tmp/yyz 2',)),
('search', (None, 'SUBJECT', 'test')),
('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
('store', ('1', 'FLAGS', r'(\Deleted)')),
('namespace', ()),
('expunge', ()),
('recent', ()),
('close', ()),
)
test_seq2 = (
('select', ()),
('response',('UIDVALIDITY',)),
('uid', ('SEARCH', 'ALL')),
('response', ('EXISTS',)),
('append', (None, None, None, test_mesg)),
('recent', ()),
('logout', ()),
)
def run(cmd, args):
M._mesg('%s %s' % (cmd, args))
typ, dat = getattr(M, cmd)(*args)
M._mesg('%s => %s %s' % (cmd, typ, dat))
if typ == 'NO': raise dat[0]
return dat
try:
if stream_command:
M = IMAP4_stream(stream_command)
else:
M = IMAP4(host)
if M.state == 'AUTH':
test_seq1 = test_seq1[1:] # Login not needed
M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
M._mesg('CAPABILITIES = %r' % (M.capabilities,))
for cmd,args in test_seq1:
run(cmd, args)
for ml in run('list', ('/tmp/', 'yy%')):
mo = re.match(r'.*"([^"]+)"$', ml)
if mo: path = mo.group(1)
else: path = ml.split()[-1]
run('delete', (path,))
for cmd,args in test_seq2:
dat = run(cmd, args)
if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
continue
uid = dat[-1].split()
if not uid: continue
run('uid', ('FETCH', '%s' % uid[-1],
'(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
print('\nAll tests OK.')
except:
print('\nTests failed.')
if not Debug:
print('''
If you would like to see debugging output,
try: %s -d5
''' % sys.argv[0])
raise
| 53,295 | 1,615 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/gzip.py | """Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import builtins
import io
import _compression
__all__ = ["GzipFile", "open", "compress", "decompress"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def open(filename, mode="rb", compresslevel=9,
encoding=None, errors=None, newline=None):
"""Open a gzip-compressed file in binary or text mode.
The filename argument can be an actual filename (a str or bytes object), or
an existing file object to read from or write to.
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for
binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is
"rb", and the default compresslevel is 9.
For binary mode, this function is equivalent to the GzipFile constructor:
GzipFile(filename, mode, compresslevel). In this case, the encoding, errors
and newline arguments must not be provided.
For text mode, a GzipFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error handling
behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
gz_mode = mode.replace("t", "")
if isinstance(filename, (str, bytes, os.PathLike)):
binary_file = GzipFile(filename, gz_mode, compresslevel)
elif hasattr(filename, "read") or hasattr(filename, "write"):
binary_file = GzipFile(None, gz_mode, compresslevel, filename)
else:
raise TypeError("filename must be a str or bytes object, or a file")
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
class _PaddedFile:
"""Minimal read-only file object that prepends a string to the contents
of an actual file. Shouldn't be used outside of gzip.py, as it lacks
essential functionality."""
def __init__(self, f, prepend=b''):
self._buffer = prepend
self._length = len(prepend)
self.file = f
self._read = 0
def read(self, size):
if self._read is None:
return self.file.read(size)
if self._read + size <= self._length:
read = self._read
self._read += size
return self._buffer[read:self._read]
else:
read = self._read
self._read = None
return self._buffer[read:] + \
self.file.read(size-self._length+read)
def prepend(self, prepend=b''):
if self._read is None:
self._buffer = prepend
else: # Assume data was read since the last prepend() call
self._read -= len(prepend)
return
self._length = len(self._buffer)
self._read = 0
def seek(self, off):
self._read = None
self._buffer = None
return self.file.seek(off)
def seekable(self):
return True # Allows fast-forwarding even in unseekable streams
class GzipFile(_compression.BaseStream):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the truncate() method.
This class only supports opening files in binary mode. If you need to open a
compressed file in text mode, use the gzip.open() function.
"""
# Overridden with internal file object to be closed, if only a filename
# is passed in
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, an io.BytesIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may include the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
'xb' depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
'wb', 'a' and 'ab', and 'x' and 'xb'.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the last modification time field in the stream when compressing.
If omitted or None, the current time is used.
"""
if mode and ('t' in mode or 'U' in mode):
raise ValueError("Invalid mode: {!r}".format(mode))
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
if filename is None:
filename = getattr(fileobj, 'name', '')
if not isinstance(filename, (str, bytes)):
filename = ''
else:
filename = os.fspath(filename)
if mode is None:
mode = getattr(fileobj, 'mode', 'rb')
if mode.startswith('r'):
self.mode = READ
raw = _GzipReader(fileobj)
self._buffer = io.BufferedReader(raw)
self.name = filename
elif mode.startswith(('w', 'a', 'x')):
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._write_mtime = mtime
else:
raise ValueError("Invalid mode: {!r}".format(mode))
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
@property
def mtime(self):
"""Last modification time read from stream, or None"""
return self._buffer.raw._last_mtime
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32(b"")
self.size = 0
self.writebuf = []
self.bufsize = 0
self.offset = 0 # Current file offset for seek(), tell(), etc
def _write_gzip_header(self):
self.fileobj.write(b'\037\213') # magic header
self.fileobj.write(b'\010') # compression method
try:
# RFC 1952 requires the FNAME field to be Latin-1. Do not
# include filenames that cannot be represented that way.
fname = os.path.basename(self.name)
if not isinstance(fname, bytes):
fname = fname.encode('latin-1')
if fname.endswith(b'.gz'):
fname = fname[:-3]
except UnicodeEncodeError:
fname = b''
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags).encode('latin-1'))
mtime = self._write_mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, int(mtime))
self.fileobj.write(b'\002')
self.fileobj.write(b'\377')
if fname:
self.fileobj.write(fname + b'\000')
def write(self,data):
self._check_not_closed()
if self.mode != WRITE:
import errno
raise OSError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError("write() on closed GzipFile object")
if isinstance(data, bytes):
length = len(data)
else:
# accept any data that supports the buffer protocol
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(self.compress.compress(data))
self.size += length
self.crc = zlib.crc32(data, self.crc)
self.offset += length
return length
def read(self, size=-1):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read() on write-only GzipFile object")
return self._buffer.read(size)
def read1(self, size=-1):
"""Implements BufferedIOBase.read1()
Reads up to a buffer's worth of data is size is negative."""
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read1() on write-only GzipFile object")
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def peek(self, n):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "peek() on write-only GzipFile object")
return self._buffer.peek(n)
@property
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj = None
try:
if self.mode == WRITE:
fileobj.write(self.compress.flush())
write32u(fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(fileobj, self.size & 0xffffffff)
elif self.mode == READ:
self._buffer.close()
finally:
myfileobj = self.myfileobj
if myfileobj:
self.myfileobj = None
myfileobj.close()
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_not_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise OSError("Can't rewind in write mode")
self._buffer.seek(0)
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if self.mode == WRITE:
if whence != io.SEEK_SET:
if whence == io.SEEK_CUR:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if offset < self.offset:
raise OSError('Negative seek in write mode')
count = offset - self.offset
chunk = b'\0' * 1024
for i in range(count // 1024):
self.write(chunk)
self.write(b'\0' * (count % 1024))
elif self.mode == READ:
self._check_not_closed()
return self._buffer.seek(offset, whence)
return self.offset
def readline(self, size=-1):
self._check_not_closed()
return self._buffer.readline(size)
class _GzipReader(_compression.DecompressReader):
def __init__(self, fp):
super().__init__(_PaddedFile(fp), zlib.decompressobj,
wbits=-zlib.MAX_WBITS)
# Set flag indicating start of a new member
self._new_member = True
self._last_mtime = None
def _init_read(self):
self._crc = zlib.crc32(b"")
self._stream_size = 0 # Decompressed size of unconcatenated stream
def _read_exact(self, n):
'''Read exactly *n* bytes from `self._fp`
This method is required because self._fp may be unbuffered,
i.e. return short reads.
'''
data = self._fp.read(n)
while len(data) < n:
b = self._fp.read(n - len(data))
if not b:
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
data += b
return data
def _read_gzip_header(self):
magic = self._fp.read(2)
if magic == b'':
return False
if magic != b'\037\213':
raise OSError('Not a gzipped file (%r)' % magic)
(method, flag,
self._last_mtime) = struct.unpack("<BBIxx", self._read_exact(8))
if method != 8:
raise OSError('Unknown compression method')
if flag & FEXTRA:
# Read & discard the extra field, if present
extra_len, = struct.unpack("<H", self._read_exact(2))
self._read_exact(extra_len)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self._fp.read(1)
if not s or s==b'\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self._fp.read(1)
if not s or s==b'\000':
break
if flag & FHCRC:
self._read_exact(2) # Read & discard the 16-bit header CRC
return True
def read(self, size=-1):
if size < 0:
return self.readall()
# size=0 is special because decompress(max_length=0) is not supported
if not size:
return b""
# For certain input data, a single
# call to decompress() may not return
# any data. In this case, retry until we get some data or reach EOF.
while True:
if self._decompressor.eof:
# Ending case: we've come to the end of a member in the file,
# so finish up this member, and read a new gzip header.
# Check the CRC and file size, and set the flag so we read
# a new member
self._read_eof()
self._new_member = True
self._decompressor = self._decomp_factory(
**self._decomp_args)
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
self._init_read()
if not self._read_gzip_header():
self._size = self._pos
return b""
self._new_member = False
# Read a chunk of data from the file
buf = self._fp.read(io.DEFAULT_BUFFER_SIZE)
uncompress = self._decompressor.decompress(buf, size)
if self._decompressor.unconsumed_tail != b"":
self._fp.prepend(self._decompressor.unconsumed_tail)
elif self._decompressor.unused_data != b"":
# Prepend the already read bytes to the fileobj so they can
# be seen by _read_eof() and _read_gzip_header()
self._fp.prepend(self._decompressor.unused_data)
if uncompress != b"":
break
if buf == b"":
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
self._add_read_data( uncompress )
self._pos += len(uncompress)
return uncompress
def _add_read_data(self, data):
self._crc = zlib.crc32(data, self._crc)
self._stream_size = self._stream_size + len(data)
def _read_eof(self):
# We've read to the end of the file
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
crc32, isize = struct.unpack("<II", self._read_exact(8))
if crc32 != self._crc:
raise OSError("CRC check failed %s != %s" % (hex(crc32),
hex(self._crc)))
elif isize != (self._stream_size & 0xffffffff):
raise OSError("Incorrect length of data produced")
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = b"\x00"
while c == b"\x00":
c = self._fp.read(1)
if c:
self._fp.prepend(c)
def _rewind(self):
super()._rewind()
self._new_member = True
def compress(data, compresslevel=9):
"""Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9.
"""
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f:
f.write(data)
return buf.getvalue()
def decompress(data):
"""Decompress a gzip compressed string in one shot.
Return the decompressed string.
"""
with GzipFile(fileobj=io.BytesIO(data)) as f:
return f.read()
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin.buffer)
g = sys.stdout.buffer
else:
if arg[-3:] != ".gz":
print("filename doesn't end in .gz:", repr(arg))
continue
f = open(arg, "rb")
g = builtins.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin.buffer
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout.buffer)
else:
f = builtins.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout.buffer:
g.close()
if f is not sys.stdin.buffer:
f.close()
if __name__ == '__main__':
_test()
| 20,334 | 575 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/sre_parse.py | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = frozenset("0123456789")
OCTDIGITS = frozenset("01234567")
HEXDIGITS = frozenset("0123456789abcdefABCDEF")
ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
WHITESPACE = frozenset(" \t\n\r\v\f")
_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT})
_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY})
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
GLOBAL_FLAGS = (SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE |
SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE)
class Verbose(Exception):
pass
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.groupdict = {}
self.groupwidths = [None] # group 0
self.lookbehindgroups = None
@property
def groups(self):
return len(self.groupwidths)
def opengroup(self, name=None):
gid = self.groups
self.groupwidths.append(None)
if self.groups > MAXGROUPS:
raise error("too many groups")
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %r as group %d; "
"was group %d" % (name, gid, ogid))
self.groupdict[name] = gid
return gid
def closegroup(self, gid, p):
self.groupwidths[gid] = p.getwidth()
def checkgroup(self, gid):
return gid < self.groups and self.groupwidths[gid] is not None
def checklookbehindgroup(self, gid, source):
if self.lookbehindgroups is not None:
if not self.checkgroup(gid):
raise source.error('cannot refer to an open group')
if gid >= self.lookbehindgroups:
raise source.error('cannot refer to group defined in the same '
'lookbehind subpattern')
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = True
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + str(op), end='')
if op is IN:
# member sublanguage
print()
for op, a in av:
print((level+1)*" " + str(op), a)
elif op is BRANCH:
print()
for i, a in enumerate(av[1]):
if i:
print(level*" " + "OR")
a.dump(level+1)
elif op is GROUPREF_EXISTS:
condgroup, item_yes, item_no = av
print('', condgroup)
item_yes.dump(level+1)
if item_no:
print(level*" " + "ELSE")
item_no.dump(level+1)
elif isinstance(av, seqtypes):
nl = False
for a in av:
if isinstance(a, SubPattern):
if not nl:
print()
a.dump(level+1)
nl = True
else:
if not nl:
print(' ', end='')
print(a, end='')
nl = False
if not nl:
print()
else:
print('', av)
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width is not None:
return self.width
lo = hi = 0
for op, av in self.data:
if op is BRANCH:
i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[-1].getwidth()
lo = lo + i
hi = hi + j
elif op in _REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
hi = hi + j * av[1]
elif op in _UNITCODES:
lo = lo + 1
hi = hi + 1
elif op is GROUPREF:
i, j = self.pattern.groupwidths[av]
lo = lo + i
hi = hi + j
elif op is GROUPREF_EXISTS:
i, j = av[1].getwidth()
if av[2] is not None:
l, h = av[2].getwidth()
i = min(i, l)
j = max(j, h)
else:
i = 0
lo = lo + i
hi = hi + j
elif op is SUCCESS:
break
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
if not self.istext:
string = str(string, 'latin1')
self.decoded_string = string
self.index = 0
self.next = None
self.__next()
def __next(self):
index = self.index
try:
char = self.decoded_string[index]
except IndexError:
self.next = None
return
if char == "\\":
index += 1
try:
char += self.decoded_string[index]
except IndexError:
raise error("bad escape (end of pattern)",
self.string, len(self.string) - 1) from None
self.index = index + 1
self.next = char
def match(self, char):
if char == self.next:
self.__next()
return True
return False
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def getuntil(self, terminator):
result = ''
while True:
c = self.next
self.__next()
if c is None:
if not result:
raise self.error("missing group name")
raise self.error("missing %s, unterminated name" % terminator,
len(result))
if c == terminator:
if not result:
raise self.error("missing group name", 1)
break
result += c
return result
@property
def pos(self):
return self.index - len(self.next or '')
def tell(self):
return self.index - len(self.next or '')
def seek(self, index):
self.index = index
self.__next()
def error(self, msg, offset=0):
return error(msg, self.string, self.tell() - offset)
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] is IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape, len(escape))
return LITERAL, c
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error('bad escape %s' % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8)
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape += source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape += source.get()
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape,
len(escape))
return LITERAL, c
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise source.error("cannot refer to an open group",
len(escape))
state.checklookbehindgroup(group, source)
return GROUPREF, group
raise source.error("invalid group reference %d" % group, len(escape) - 1)
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error("bad escape %s" % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _parse_sub(source, state, verbose, nested):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
start = source.tell()
while True:
itemsappend(_parse(source, state, verbose, nested + 1,
not nested and not items))
if not sourcematch("|"):
break
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while True:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] is not LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
subpatternappend((IN, [item[0] for item in items]))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup, verbose, nested):
item_yes = _parse(source, state, verbose, nested + 1)
if source.match("|"):
item_no = _parse(source, state, verbose, nested + 1)
if source.next == "|":
raise source.error("conditional backref with more than two branches")
else:
item_no = None
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
def _parse(source, state, verbose, nested, first=False):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
_ord = ord
while True:
this = source.next
if this is None:
break # end of pattern
if this in "|)":
break # end of subpattern
sourceget()
if verbose:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while True:
this = sourceget()
if this is None or this == "\n":
break
continue
if this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
elif this not in SPECIAL_CHARS:
subpatternappend((LITERAL, _ord(this)))
elif this == "[":
here = source.tell() - 1
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while True:
this = sourceget()
if this is None:
raise source.error("unterminated character set",
source.tell() - here)
if this == "]" and set != start:
break
elif this[0] == "\\":
code1 = _class_escape(source, this)
else:
code1 = LITERAL, _ord(this)
if sourcematch("-"):
# potential range
that = sourceget()
if that is None:
raise source.error("unterminated character set",
source.tell() - here)
if that == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, _ord("-")))
break
if that[0] == "\\":
code2 = _class_escape(source, that)
else:
code2 = LITERAL, _ord(that)
if code1[0] != LITERAL or code2[0] != LITERAL:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
lo = code1[1]
hi = code2[1]
if hi < lo:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
setappend((RANGE, (lo, hi)))
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this in REPEAT_CHARS:
# repeat previous item
here = source.tell()
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, _ord(this)))
continue
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo += sourceget()
if sourcematch(","):
while source.next in DIGITS:
hi += sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, _ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise source.error("min repeat greater than max repeat",
source.tell() - here)
else:
raise AssertionError("unsupported quantifier %r" % (char,))
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] is AT):
raise source.error("nothing to repeat",
source.tell() - here + len(this))
if item[0][0] in _REPEATCODES:
raise source.error("multiple repeat",
source.tell() - here + len(this))
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
start = source.tell() - 1
group = True
name = None
condgroup = None
add_flags = 0
del_flags = 0
if sourcematch("?"):
# options
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char == "P":
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = source.getuntil(">")
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise source.error(msg, len(name) + 1)
elif sourcematch("="):
# named backreference
name = source.getuntil(")")
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise source.error(msg, len(name) + 1)
gid = state.groupdict.get(name)
if gid is None:
msg = "unknown group name %r" % name
raise source.error(msg, len(name) + 1)
if not state.checkgroup(gid):
raise source.error("cannot refer to an open group",
len(name) + 1)
state.checklookbehindgroup(gid, source)
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
raise source.error("unknown extension ?P" + char,
len(char) + 2)
elif char == ":":
# non-capturing group
group = None
elif char == "#":
# comment
while True:
if source.next is None:
raise source.error("missing ), unterminated comment",
source.tell() - start)
if sourceget() == ")":
break
continue
elif char in "=!<":
# lookahead assertions
dir = 1
if char == "<":
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char not in "=!":
raise source.error("unknown extension ?<" + char,
len(char) + 2)
dir = -1 # lookbehind
lookbehindgroups = state.lookbehindgroups
if lookbehindgroups is None:
state.lookbehindgroups = state.groups
p = _parse_sub(source, state, verbose, nested + 1)
if dir < 0:
if lookbehindgroups is None:
state.lookbehindgroups = None
if not sourcematch(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif char == "(":
# conditional backreference group
condname = source.getuntil(")")
group = None
if condname.isidentifier():
condgroup = state.groupdict.get(condname)
if condgroup is None:
msg = "unknown group name %r" % condname
raise source.error(msg, len(condname) + 1)
else:
try:
condgroup = int(condname)
if condgroup < 0:
raise ValueError
except ValueError:
msg = "bad character in group name %r" % condname
raise source.error(msg, len(condname) + 1) from None
if not condgroup:
raise source.error("bad group number",
len(condname) + 1)
if condgroup >= MAXGROUPS:
msg = "invalid group reference %d" % condgroup
raise source.error(msg, len(condname) + 1)
state.checklookbehindgroup(condgroup, source)
elif char in FLAGS or char == "-":
# flags
flags = _parse_flags(source, state, char)
if flags is None: # global flags
if not first or subpattern:
import warnings
warnings.warn(
'Flags not at the start of the expression %r%s' % (
source.string[:20], # truncate long regexes
' (truncated)' if len(source.string) > 20 else '',
),
DeprecationWarning, stacklevel=nested + 6
)
if (state.flags & SRE_FLAG_VERBOSE) and not verbose:
raise Verbose
continue
add_flags, del_flags = flags
group = None
else:
raise source.error("unknown extension ?" + char,
len(char) + 1)
# parse group contents
if group is not None:
try:
group = state.opengroup(name)
except error as err:
raise source.error(err.msg, len(name) + 1) from None
if condgroup:
p = _parse_sub_cond(source, state, condgroup, verbose, nested + 1)
else:
sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and
not (del_flags & SRE_FLAG_VERBOSE))
p = _parse_sub(source, state, sub_verbose, nested + 1)
if not source.match(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if group is not None:
state.closegroup(group, p)
subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p)))
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
else:
raise AssertionError("unsupported special character %r" % (char,))
return subpattern
def _parse_flags(source, state, char):
sourceget = source.get
add_flags = 0
del_flags = 0
if char != "-":
while True:
add_flags |= FLAGS[char]
char = sourceget()
if char is None:
raise source.error("missing -, : or )")
if char in ")-:":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing -, : or )"
raise source.error(msg, len(char))
if char == ")":
state.flags |= add_flags
return None
if add_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn on global flag", 1)
if char == "-":
char = sourceget()
if char is None:
raise source.error("missing flag")
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing flag"
raise source.error(msg, len(char))
while True:
del_flags |= FLAGS[char]
char = sourceget()
if char is None:
raise source.error("missing :")
if char == ":":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing :"
raise source.error(msg, len(char))
assert char == ":"
if del_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn off global flag", 1)
if add_flags & del_flags:
raise source.error("bad inline flags: flag turned on and off", 1)
return add_flags, del_flags
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if flags & SRE_FLAG_LOCALE:
raise ValueError("cannot use LOCALE flag with a str pattern")
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("cannot use UNICODE flag with a bytes pattern")
if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII:
raise ValueError("ASCII and LOCALE flags are incompatible")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
try:
p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)
except Verbose:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
pattern = Pattern()
pattern.flags = flags | SRE_FLAG_VERBOSE
pattern.str = str
source.seek(0)
p = _parse_sub(source, pattern, True, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
if source.next is not None:
assert source.next == ")"
raise source.error("unbalanced parenthesis")
if flags & SRE_FLAG_DEBUG:
p.dump()
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
groups = []
literals = []
literal = []
lappend = literal.append
def addgroup(index, pos):
if index > pattern.groups:
raise s.error("invalid group reference %d" % index, pos)
if literal:
literals.append(''.join(literal))
del literal[:]
groups.append((len(literals), index))
literals.append(None)
groupindex = pattern.groupindex
while True:
this = sget()
if this is None:
break # end of replacement string
if this[0] == "\\":
# group
c = this[1]
if c == "g":
name = ""
if not s.match("<"):
raise s.error("missing <")
name = s.getuntil(">")
if name.isidentifier():
try:
index = groupindex[name]
except KeyError:
raise IndexError("unknown group name %r" % name)
else:
try:
index = int(name)
if index < 0:
raise ValueError
except ValueError:
raise s.error("bad character in group name %r" % name,
len(name) + 1) from None
if index >= MAXGROUPS:
raise s.error("invalid group reference %d" % index,
len(name) + 1)
addgroup(index, len(name) + 1)
elif c == "0":
if s.next in OCTDIGITS:
this += sget()
if s.next in OCTDIGITS:
this += sget()
lappend(chr(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this += sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this += sget()
isoctal = True
c = int(this[1:], 8)
if c > 0o377:
raise s.error('octal escape value %s outside of '
'range 0-0o377' % this, len(this))
lappend(chr(c))
if not isoctal:
addgroup(int(this[1:]), len(this) - 1)
else:
try:
this = chr(ESCAPES[this][1])
except KeyError:
if c in ASCIILETTERS:
import warnings
warnings.warn('bad escape %s' % this,
DeprecationWarning, stacklevel=4)
lappend(this)
else:
lappend(this)
if literal:
literals.append(''.join(literal))
if not isinstance(source, str):
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
literals = [None if s is None else s.encode('latin-1') for s in literals]
return groups, literals
def expand_template(template, match):
g = match.group
empty = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = g(group) or empty
except IndexError:
raise error("invalid group reference %d" % index)
return empty.join(literals)
| 36,536 | 974 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/threading.py | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
from time import monotonic as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
from itertools import islice as _islice, count as _count
try:
import _thread
except ImportError:
import _dummy_thread as _thread
try:
from _collections import deque as _deque
except ImportError:
from collections import deque as _deque
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = [
"get_ident",
"active_count",
"Condition",
"current_thread",
"enumerate",
"main_thread",
"TIMEOUT_MAX",
"Event",
"Lock",
"RLock",
"Semaphore",
"BoundedSemaphore",
"Thread",
"Barrier",
"BrokenBarrierError",
"Timer",
"ThreadError",
"setprofile",
"settrace",
"local",
"stack_size",
]
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s %s.%s object owner=%r count=%d at %s>" % (
"locked" if self._block.locked() else "unlocked",
self.__class__.__module__,
self.__class__.__qualname__,
owner,
self._count,
hex(id(self)),
)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count += 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = _deque()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if _lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
gotit = False
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
return gotit
finally:
self._acquire_restore(saved_state)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
all_waiters = self._waiters
waiters_to_notify = _deque(_islice(all_waiters, n))
if not waiters_to_notify:
return
for waiter in waiters_to_notify:
waiter.release()
try:
all_waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value += 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__(Lock())
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
with self._cond:
self._flag = True
self._cond.notify_all()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
with self._cond:
self._flag = False
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
with self._cond:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 # 0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
# see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
# an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are released. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda: self._state != 0, timeout):
# timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
# resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
# reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
# was broken, set it to reset state
# which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = _count().__next__
_counter() # Consume 0 so first non-main thread has id 1.
def _newname(template="Thread-%d"):
return template % _counter()
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
_initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
_exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
# XXX __exc_clear = _sys.exc_clear
def __init__(
self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None
):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._tstate_lock = None
self._started = Event()
self._is_stopped = False
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
# For debugging and _after_fork()
_dangling.add(self)
def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
self._started._reset_internal_locks()
if is_alive:
self._set_tstate_lock()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
self._is_stopped = True
self._tstate_lock = None
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
self.is_alive() # easy way to get ._is_stopped set when appropriate
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
def _bootstrap_inner(self):
try:
self._set_ident()
self._set_tstate_lock()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys and _sys.stderr is not None:
print(
"Exception in thread %s:\n%s" % (self.name, _format_exc()),
file=_sys.stderr,
)
elif self._stderr is not None:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print(
(
"Exception in thread "
+ self.name
+ " (most likely raised during interpreter shutdown):"
),
file=self._stderr,
)
print(("Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print(
(
' File "%s", line %s, in %s'
% (
exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name,
)
),
file=self._stderr,
)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
# XXX self._exc_clear()
pass
finally:
with _active_limbo_lock:
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
# After calling ._stop(), .is_alive() returns False and .join() returns
# immediately. ._tstate_lock must be released before calling ._stop().
#
# Normal case: C code at the end of the thread's life
# (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
# that's detected by our ._wait_for_tstate_lock(), called by .join()
# and .is_alive(). Any number of threads _may_ call ._stop()
# simultaneously (for example, if multiple threads are blocked in
# .join() calls), and they're not serialized. That's harmless -
# they'll just make redundant rebindings of ._is_stopped and
# ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the
# "assert self._is_stopped" in ._wait_for_tstate_lock() always works
# (the assert is executed only if ._tstate_lock is None).
#
# Special case: _main_thread releases ._tstate_lock via this
# module's _shutdown() function.
lock = self._tstate_lock
if lock is not None:
assert not lock.locked()
self._is_stopped = True
self._tstate_lock = None
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if "dummy_threading" not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
# the behavior of a negative timeout isn't documented, but
# historically .join(timeout=x) for x<0 has acted as if timeout=0
self._wait_for_tstate_lock(timeout=max(timeout, 0))
def _wait_for_tstate_lock(self, block=True, timeout=-1):
# Issue #18808: wait for the thread state to be gone.
# At the end of the thread's life, after all knowledge of the thread
# is removed from C data structures, C code releases our _tstate_lock.
# This method passes its arguments to _tstate_lock.acquire().
# If the lock is acquired, the C code is done, and self._stop() is
# called. That sets ._is_stopped to True, and ._tstate_lock to None.
lock = self._tstate_lock
if lock is None: # already determined that the C code is done
assert self._is_stopped
elif lock.acquire(block, timeout):
lock.release()
self._stop()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
if self._is_stopped or not self._started.is_set():
return False
self._wait_for_tstate_lock(False)
return not self._is_stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread")
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._set_tstate_lock()
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def is_alive(self):
assert not self._is_stopped and self._started.is_set()
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_main_thread = _MainThread()
def _shutdown():
# Obscure: other threads may be waiting to join _main_thread. That's
# dubious, but some code does it. We can't wait for C code to release
# the main thread's tstate_lock - that won't happen until the interpreter
# is nearly dead. So we release it here. Note that just calling _stop()
# isn't enough: other threads may already be waiting on _tstate_lock.
tlock = _main_thread._tstate_lock
# The main thread isn't finished yet, so its thread state lock can't have
# been released.
assert tlock is not None
assert tlock.locked()
tlock.release()
_main_thread._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
def main_thread():
"""Return the main thread object.
In normal conditions, the main thread is the thread from which the
Python interpreter was started.
"""
return _main_thread
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock, _main_thread
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
_main_thread = current
with _active_limbo_lock:
# Dangling thread instances must still have their locks reset,
# because someone may join() them.
threads = set(_enumerate())
threads.update(_dangling)
for thread in threads:
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
thread._reset_internal_locks(True)
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._reset_internal_locks(False)
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| 49,518 | 1,417 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/abc.py | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
# Note: this counter is private. Use `abc.get_cache_token()` for
# external code.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace, **kwargs):
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__qualname__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__):
if name.startswith("_abc_"):
value = getattr(cls, name)
if isinstance(value, WeakSet):
value = set(value)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
class ABC(metaclass=ABCMeta):
"""Helper class that provides a standard way to create an ABC using
inheritance.
"""
pass
def get_cache_token():
"""Returns the current ABC cache token.
The token is an opaque object (supporting equality testing) identifying the
current version of the ABC cache for virtual subclasses. The token changes
with every call to ``register()`` on any ABC.
"""
return ABCMeta._abc_invalidation_counter
| 8,727 | 251 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_strptime.py | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
# import locale
import calendar
from datetime import (date as datetime_date,
timedelta as datetime_timedelta,
timezone as datetime_timezone)
try:
from _thread import allocate_lock as _thread_allocate_lock
except ImportError:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
# return locale.getlocale(locale.LC_TIME)
return (None, None)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
if time.tzname != self.tzname or time.daylight != self.daylight:
raise ValueError("timezone changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == time.tzname[1]
# and time.daylight; handle that in strptime.
try:
time.tzset()
except AttributeError:
pass
self.tzname = time.tzname
self.daylight = time.daylight
no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
if self.daylight:
has_saving = frozenset({self.tzname[1].lower()})
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'G': r"(?P<G>\d\d\d\d)",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
'u': r"(?P<u>[1-7])",
'V': r"(?P<V>5[0-3]|0[1-9]|[1-4]\d|\d)",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
try:
from re import escape as re_escape
except ImportError:
raise ImportError('cosmopolitan _strptime.TimeRE() '
'requires manually yoinking re')
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
try:
from re import compile as re_compile
except ImportError:
raise ImportError('cosmopolitan _strptime.TimeRE() '
'requires manually yoinking re')
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile(r'\s+')
format = whitespace_replacement.sub(r'\\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
try:
from re import compile as re_compile
from re import IGNORECASE
except ImportError:
raise ImportError('cosmopolitan _strptime.TimeRE() '
'requires manually yoinking re')
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _calc_julian_from_V(iso_year, iso_week, iso_weekday):
"""Calculate the Julian day based on the ISO 8601 year, week, and weekday.
ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
ISO week days range from 1 (Monday) to 7 (Sunday).
"""
correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
ordinal = (iso_week * 7) + iso_weekday - correction
# ordinal may be negative or 0 now, which means the date is in the previous
# calendar year
if ordinal < 1:
ordinal += datetime_date(iso_year, 1, 1).toordinal()
iso_year -= 1
ordinal -= datetime_date(iso_year, 1, 1).toordinal()
return iso_year, ordinal
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = "strptime() argument {} must be str, not {}"
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
locale_time = _TimeRE_cache.locale_time
if (_getlang() != locale_time.lang or
time.tzname != locale_time.tzname or
time.daylight != locale_time.daylight):
_TimeRE_cache = TimeRE()
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
iso_year = year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
# Default to -1 to signify that values not known; not critical to have,
# though
iso_week = week_of_year = None
week_of_year_start = None
# weekday and julian defaulted to None so as to signal need to calculate
# values
weekday = julian = None
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'G':
iso_year = int(found_dict['G'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'u':
weekday = int(found_dict['u'])
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'V':
iso_week = int(found_dict['V'])
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith("-"):
tzoffset = -tzoffset
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# Deal with the cases where ambiguities arize
# don't assume default values for ISO week/year
if year is None and iso_year is not None:
if iso_week is None or weekday is None:
raise ValueError("ISO year directive '%G' must be used with "
"the ISO week directive '%V' and a weekday "
"directive ('%A', '%a', '%w', or '%u').")
if julian is not None:
raise ValueError("Day of the year directive '%j' is not "
"compatible with ISO year directive '%G'. "
"Use '%Y' instead.")
elif week_of_year is None and iso_week is not None:
if weekday is None:
raise ValueError("ISO week directive '%V' must be used with "
"the ISO year directive '%G' and a weekday "
"directive ('%A', '%a', '%w', or '%u').")
else:
raise ValueError("ISO week directive '%V' is incompatible with "
"the year directive '%Y'. Use the ISO year '%G' "
"instead.")
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904 # 1904 is first leap year of 20th century
leap_year_fix = True
elif year is None:
year = 1900
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian is None and weekday is not None:
if week_of_year is not None:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
elif iso_year is not None and iso_week is not None:
year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
if julian is not None and julian <= 0:
year -= 1
yday = 366 if calendar.isleap(year) else 365
julian += yday
if julian is None:
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of
# the week calculation.
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day (or if it was
# calculated above with year/week/weekday) it will be accurate.
datetime_result = datetime_date.fromordinal(
(julian - 1) +
datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday is None:
weekday = datetime_date(year, month, day).weekday()
# Add timezone info
tzname = found_dict.get("Z")
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
# the caller didn't supply a year but asked for Feb 29th. We couldn't
# use the default of 1900 for computations. We set it back to ensure
# that February 29th is smaller than March 1st.
year = 1900
return (year, month, day,
hour, minute, second,
weekday, julian, tz, tzname, gmtoff), fraction
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
tzname, gmtoff = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += (tz,)
return cls(*args)
| 25,366 | 590 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/trace.py | #!/usr/bin/env python3
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:[email protected]
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
__all__ = ['Trace', 'CoverageResults']
import argparse
import linecache
import os
import re
import sys
import token
import tokenize
import inspect
import gc
import dis
import pickle
from time import monotonic as _time
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
PRAGMA_NOCOVER = "#pragma NO COVER"
class _Ignore:
def __init__(self, modules=None, dirs=None):
self._mods = set() if not modules else set(modules)
self._dirs = [] if not dirs else [os.path.normpath(d)
for d in dirs]
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list.
if modulename in self._mods: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
for mod in self._mods:
# Need to take some care since ignoring
# "cmp" mustn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
if modulename.startswith(mod + '.'):
self._ignore[modulename] = 1
return 1
# Now check that filename isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def _modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def _fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
with open(self.infile, 'rb') as f:
counts, calledfuncs, callers = pickle.load(f)
self.update(self.__class__(counts, calledfuncs, callers))
except (OSError, EOFError, ValueError) as err:
print(("Skipping counts file %r: %s"
% (self.infile, err)), file=sys.stderr)
def is_ignored_filename(self, filename):
"""Return True if the filename does not refer to a file
we want to have reported.
"""
return filename.startswith('<') and filename.endswith('>')
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts:
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs:
calledfuncs[key] = 1
for key in other_callers:
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
Write the coverage results.
:param show_missing: Show lines that had no hits.
:param summary: Include coverage summary per module.
:param coverdir: If None, the results of each module are placed in its
directory, otherwise it is included in the directory
specified.
"""
if self.calledfuncs:
print()
print("functions called:")
calls = self.calledfuncs
for filename, modulename, funcname in sorted(calls):
print(("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname)))
if self.callers:
print()
print("calling relationships:")
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
in sorted(self.callers):
if pfile != lastfile:
print()
print("***", pfile, "***")
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print(" -->", cfile)
lastcfile = cfile
print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc))
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts:
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.items():
if self.is_ignored_filename(filename):
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = _modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = _fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = _find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count, encoding)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
print("lines cov% module (path)")
for m in sorted(sums):
n_lines, percent, modulename, filename = sums[m]
print("%5d %3d%% %s (%s)" % sums[m])
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except OSError as err:
print("Can't save counts files because %s" % err, file=sys.stderr)
def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None):
"""Return a coverage results file in path."""
# ``lnotab`` is a dict of executable lines, or a line number "table"
try:
outfile = open(path, "w", encoding=encoding)
except OSError as err:
print(("trace: Could not open %r for writing: %s "
"- skipping" % (path, err)), file=sys.stderr)
return 0, 0
n_lines = 0
n_hits = 0
with outfile:
for lineno, line in enumerate(lines, 1):
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif lineno in lnotab and not PRAGMA_NOCOVER in line:
# Highlight never-executed lines, unless the line contains
# #pragma: NO COVER
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(line.expandtabs(8))
return n_hits, n_lines
def _find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def _find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = _find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(_find_lines(c, strs))
return linenos
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
def _find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
with tokenize.open(filename) as f:
prog = f.read()
encoding = f.encoding
except OSError as err:
print(("Not printing coverage data for %r: %s"
% (filename, err)), file=sys.stderr)
return {}
code = compile(prog, filename, "exec")
strs = _find_strings(filename, encoding)
return _find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = _Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = _time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec(cmd, globals, locals)
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = _modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX _modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = _modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print((" --- modulename: %s, funcname: %s"
% (modulename, code.co_name)))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='trace 2.0')
grp = parser.add_argument_group('Main options',
'One of these (or --report) must be given')
grp.add_argument('-c', '--count', action='store_true',
help='Count the number of times each line is executed and write '
'the counts to <module>.cover for each module executed, in '
'the module\'s directory. See also --coverdir, --file, '
'--no-report below.')
grp.add_argument('-t', '--trace', action='store_true',
help='Print each line to sys.stdout before it is executed')
grp.add_argument('-l', '--listfuncs', action='store_true',
help='Keep track of which functions are executed at least once '
'and write the results to sys.stdout after the program exits. '
'Cannot be specified alongside --trace or --count.')
grp.add_argument('-T', '--trackcalls', action='store_true',
help='Keep track of caller/called pairs and write the results to '
'sys.stdout after the program exits.')
grp = parser.add_argument_group('Modifiers')
_grp = grp.add_mutually_exclusive_group()
_grp.add_argument('-r', '--report', action='store_true',
help='Generate a report from a counts file; does not execute any '
'code. --file must specify the results file to read, which '
'must have been created in a previous run with --count '
'--file=FILE')
_grp.add_argument('-R', '--no-report', action='store_true',
help='Do not generate the coverage report files. '
'Useful if you want to accumulate over several runs.')
grp.add_argument('-f', '--file',
help='File to accumulate counts over several runs')
grp.add_argument('-C', '--coverdir',
help='Directory where the report files go. The coverage report '
'for <package>.<module> will be written to file '
'<dir>/<package>/<module>.cover')
grp.add_argument('-m', '--missing', action='store_true',
help='Annotate executable lines that were not executed with '
'">>>>>> "')
grp.add_argument('-s', '--summary', action='store_true',
help='Write a brief summary for each file to sys.stdout. '
'Can only be used with --count or --report')
grp.add_argument('-g', '--timing', action='store_true',
help='Prefix each line with the time since the program started. '
'Only used while tracing')
grp = parser.add_argument_group('Filters',
'Can be specified multiple times')
grp.add_argument('--ignore-module', action='append', default=[],
help='Ignore the given module(s) and its submodules '
'(if it is a package). Accepts comma separated list of '
'module names.')
grp.add_argument('--ignore-dir', action='append', default=[],
help='Ignore files in the given directory '
'(multiple directories can be joined by os.pathsep).')
parser.add_argument('filename', nargs='?',
help='file to run as main program')
parser.add_argument('arguments', nargs=argparse.REMAINDER,
help='arguments to the program')
opts = parser.parse_args()
if opts.ignore_dir:
rel_path = 'lib', 'python{0.major}.{0.minor}'.format(sys.version_info)
_prefix = os.path.join(sys.base_prefix, *rel_path)
_exec_prefix = os.path.join(sys.base_exec_prefix, *rel_path)
def parse_ignore_dir(s):
s = os.path.expanduser(os.path.expandvars(s))
s = s.replace('$prefix', _prefix).replace('$exec_prefix', _exec_prefix)
return os.path.normpath(s)
opts.ignore_module = [mod.strip()
for i in opts.ignore_module for mod in i.split(',')]
opts.ignore_dir = [parse_ignore_dir(s)
for i in opts.ignore_dir for s in i.split(os.pathsep)]
if opts.report:
if not opts.file:
parser.error('-r/--report requires -f/--file')
results = CoverageResults(infile=opts.file, outfile=opts.file)
return results.write_results(opts.missing, opts.summary, opts.coverdir)
if not any([opts.trace, opts.count, opts.listfuncs, opts.trackcalls]):
parser.error('must specify one of --trace, --count, --report, '
'--listfuncs, or --trackcalls')
if opts.listfuncs and (opts.count or opts.trace):
parser.error('cannot specify both --listfuncs and (--trace or --count)')
if opts.summary and not opts.count:
parser.error('--summary can only be used with --count or --report')
if opts.filename is None:
parser.error('filename is missing: required with the main options')
sys.argv = [opts.filename, *opts.arguments]
sys.path[0] = os.path.dirname(opts.filename)
t = Trace(opts.count, opts.trace, countfuncs=opts.listfuncs,
countcallers=opts.trackcalls, ignoremods=opts.ignore_module,
ignoredirs=opts.ignore_dir, infile=opts.file,
outfile=opts.file, timing=opts.timing)
try:
with open(opts.filename) as fp:
code = compile(fp.read(), opts.filename, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': opts.filename,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except OSError as err:
sys.exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not opts.no_report:
results.write_results(opts.missing, opts.summary, opts.coverdir)
if __name__=='__main__':
main()
| 28,724 | 736 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/sched.py | """A generally useful event scheduler class.
Each instance of this class manages its own queue.
No multi-threading is implied; you are supposed to hack that
yourself, or use a single instance per application.
Each instance is parametrized with two functions, one that is
supposed to return the current time, one that is supposed to
implement a delay. You can implement real-time scheduling by
substituting time and sleep from built-in module time, or you can
implement simulated time by writing your own functions. This can
also be used to integrate scheduling with STDWIN events; the delay
function is allowed to modify the queue. Time can be expressed as
integers or floating point numbers, as long as it is consistent.
Events are specified by tuples (time, priority, action, argument, kwargs).
As in UNIX, lower priority numbers mean higher priority; in this
way the queue can be maintained as a priority queue. Execution of the
event means calling the action function, passing it the argument
sequence in "argument" (remember that in Python, multiple function
arguments are be packed in a sequence) and keyword parameters in "kwargs".
The action function may be an instance method so it
has another way to reference private data (besides global variables).
"""
import time
import heapq
from collections import namedtuple
try:
import threading
except ImportError:
import dummy_threading as threading
from time import monotonic as _time
__all__ = ["scheduler"]
class Event(namedtuple('Event', 'time, priority, action, argument, kwargs')):
__slots__ = []
def __eq__(s, o): return (s.time, s.priority) == (o.time, o.priority)
def __lt__(s, o): return (s.time, s.priority) < (o.time, o.priority)
def __le__(s, o): return (s.time, s.priority) <= (o.time, o.priority)
def __gt__(s, o): return (s.time, s.priority) > (o.time, o.priority)
def __ge__(s, o): return (s.time, s.priority) >= (o.time, o.priority)
Event.time.__doc__ = ('''Numeric type compatible with the return value of the
timefunc function passed to the constructor.''')
Event.priority.__doc__ = ('''Events scheduled for the same time will be executed
in the order of their priority.''')
Event.action.__doc__ = ('''Executing the event means executing
action(*argument, **kwargs)''')
Event.argument.__doc__ = ('''argument is a sequence holding the positional
arguments for the action.''')
Event.kwargs.__doc__ = ('''kwargs is a dictionary holding the keyword
arguments for the action.''')
_sentinel = object()
class scheduler:
def __init__(self, timefunc=_time, delayfunc=time.sleep):
"""Initialize a new instance, passing the time and delay
functions"""
self._queue = []
self._lock = threading.RLock()
self.timefunc = timefunc
self.delayfunc = delayfunc
def enterabs(self, time, priority, action, argument=(), kwargs=_sentinel):
"""Enter a new event in the queue at an absolute time.
Returns an ID for the event which can be used to remove it,
if necessary.
"""
if kwargs is _sentinel:
kwargs = {}
event = Event(time, priority, action, argument, kwargs)
with self._lock:
heapq.heappush(self._queue, event)
return event # The ID
def enter(self, delay, priority, action, argument=(), kwargs=_sentinel):
"""A variant that specifies the time as a relative time.
This is actually the more commonly used interface.
"""
time = self.timefunc() + delay
return self.enterabs(time, priority, action, argument, kwargs)
def cancel(self, event):
"""Remove an event from the queue.
This must be presented the ID as returned by enter().
If the event is not in the queue, this raises ValueError.
"""
with self._lock:
self._queue.remove(event)
heapq.heapify(self._queue)
def empty(self):
"""Check whether the queue is empty."""
with self._lock:
return not self._queue
def run(self, blocking=True):
"""Execute events until the queue is empty.
If blocking is False executes the scheduled events due to
expire soonest (if any) and then return the deadline of the
next scheduled call in the scheduler.
When there is a positive delay until the first event, the
delay function is called and the event is left in the queue;
otherwise, the event is removed from the queue and executed
(its action function is called, passing it the argument). If
the delay function returns prematurely, it is simply
restarted.
It is legal for both the delay function and the action
function to modify the queue or to raise an exception;
exceptions are not caught but the scheduler's state remains
well-defined so run() may be called again.
A questionable hack is added to allow other threads to run:
just after an event is executed, a delay of 0 is executed, to
avoid monopolizing the CPU when other threads are also
runnable.
"""
# localize variable access to minimize overhead
# and to improve thread safety
lock = self._lock
q = self._queue
delayfunc = self.delayfunc
timefunc = self.timefunc
pop = heapq.heappop
while True:
with lock:
if not q:
break
time, priority, action, argument, kwargs = q[0]
now = timefunc()
if time > now:
delay = True
else:
delay = False
pop(q)
if delay:
if not blocking:
return time - now
delayfunc(time - now)
else:
action(*argument, **kwargs)
delayfunc(0) # Let other threads run
@property
def queue(self):
"""An ordered list of upcoming events.
Events are named tuples with fields for:
time, priority, action, arguments, kwargs
"""
# Use heapq to sort the queue rather than using 'sorted(self._queue)'.
# With heapq, two events scheduled at the same time will show in
# the actual order they would be retrieved.
with self._lock:
events = self._queue[:]
return list(map(heapq.heappop, [events]*len(events)))
| 6,511 | 171 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/copy.py | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class
issc = False
if issc:
# treat it as a regular class:
return _copy_immutable(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(4)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
if isinstance(rv, str):
return x
return _reconstruct(x, None, *rv)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, complex, str, tuple,
bytes, frozenset, type, range, slice,
types.BuiltinFunctionType, type(Ellipsis), type(NotImplemented),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
d[list] = list.copy
d[dict] = dict.copy
d[set] = set.copy
d[bytearray] = bytearray.copy
if PyStringMap is not None:
d[PyStringMap] = PyStringMap.copy
del d, t
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(4)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
if isinstance(rv, str):
y = x
else:
y = _reconstruct(x, memo, *rv)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[type(NotImplemented)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
d[complex] = _deepcopy_atomic
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo, deepcopy=deepcopy):
y = []
memo[id(x)] = y
append = y.append
for a in x:
append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo, deepcopy=deepcopy):
y = [deepcopy(a, memo) for a in x]
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for k, j in zip(x, y):
if k is not j:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo, deepcopy=deepcopy):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
d[types.MethodType] = _deepcopy_method
del d
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, memo, func, args,
state=None, listiter=None, dictiter=None,
deepcopy=deepcopy):
deep = memo is not None
if deep and args:
args = (deepcopy(arg, memo) for arg in args)
y = func(*args)
if deep:
memo[id(x)] = y
if state is not None:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
if deep:
for item in listiter:
item = deepcopy(item, memo)
y.append(item)
else:
for item in listiter:
y.append(item)
if dictiter is not None:
if deep:
for key, value in dictiter:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
else:
for key, value in dictiter:
y[key] = value
return y
del types, weakref, PyStringMap
| 8,815 | 314 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pickle.py | """Create portable serialized representations of Python objects.
See module copyreg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
from types import FunctionType
from copyreg import dispatch_table
from copyreg import _extension_registry, _inverted_registry, _extension_cache
from itertools import islice
from functools import partial
from encodings import raw_unicode_escape
import sys
from sys import maxsize
from struct import pack, unpack
import re
import io
import codecs
import _compat_pickle
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# Shortcut for use in isinstance testing
bytes_types = (bytes, bytearray)
# These are purely informational; no code uses these.
format_version = "4.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
"3.0", # Protocol 3
"4.0", # Protocol 4
] # Old format versions we can read
# This is the highest protocol number we know how to read.
HIGHEST_PROTOCOL = 4
# The protocol we write by default. May be less than HIGHEST_PROTOCOL.
# We intentionally write a protocol that Python 2.x cannot read;
# there are too many issues with that.
DEFAULT_PROTOCOL = 3
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = b'(' # push special markobject on stack
STOP = b'.' # every pickle ends with STOP
POP = b'0' # discard topmost stack item
POP_MARK = b'1' # discard stack top through topmost markobject
DUP = b'2' # duplicate top stack item
FLOAT = b'F' # push float object; decimal string argument
INT = b'I' # push integer or bool; decimal string argument
BININT = b'J' # push four-byte signed int
BININT1 = b'K' # push 1-byte unsigned int
LONG = b'L' # push long; decimal string argument
BININT2 = b'M' # push 2-byte unsigned int
NONE = b'N' # push None
PERSID = b'P' # push persistent object; id is taken from string arg
BINPERSID = b'Q' # " " " ; " " " " stack
REDUCE = b'R' # apply callable to argtuple, both on stack
STRING = b'S' # push string; NL-terminated string argument
BINSTRING = b'T' # push string; counted binary string argument
SHORT_BINSTRING= b'U' # " " ; " " " " < 256 bytes
UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = b'X' # " " " ; counted UTF-8 string argument
APPEND = b'a' # append stack top to list below it
BUILD = b'b' # call __setstate__ or __dict__.update()
GLOBAL = b'c' # push self.find_class(modname, name); 2 string args
DICT = b'd' # build a dict from stack items
EMPTY_DICT = b'}' # push empty dict
APPENDS = b'e' # extend list on stack by topmost stack slice
GET = b'g' # push item from memo on stack; index is string arg
BINGET = b'h' # " " " " " " ; " " 1-byte arg
INST = b'i' # build & push class instance
LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg
LIST = b'l' # build list from topmost stack items
EMPTY_LIST = b']' # push empty list
OBJ = b'o' # build & push class instance
PUT = b'p' # store stack top in memo; index is string arg
BINPUT = b'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg
SETITEM = b's' # add key+value pair to dict
TUPLE = b't' # build tuple from topmost stack items
EMPTY_TUPLE = b')' # push empty tuple
SETITEMS = b'u' # modify dict by adding topmost key+value pairs
BINFLOAT = b'G' # push float; arg is 8-byte float encoding
TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = b'\x80' # identify pickle protocol
NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple
EXT1 = b'\x82' # push object from extension registry; 1-byte index
EXT2 = b'\x83' # ditto, but 2-byte index
EXT4 = b'\x84' # ditto, but 4-byte index
TUPLE1 = b'\x85' # build 1-tuple from stack top
TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items
TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items
NEWTRUE = b'\x88' # push True
NEWFALSE = b'\x89' # push False
LONG1 = b'\x8a' # push long from < 256 bytes
LONG4 = b'\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
# Protocol 3 (Python 3.x)
BINBYTES = b'B' # push bytes; counted binary string argument
SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes
# Protocol 4
SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes
BINUNICODE8 = b'\x8d' # push very long string
BINBYTES8 = b'\x8e' # push very long bytes string
EMPTY_SET = b'\x8f' # push empty set on the stack
ADDITEMS = b'\x90' # modify set by adding topmost stack items
FROZENSET = b'\x91' # build frozenset from topmost stack items
NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments
STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks
MEMOIZE = b'\x94' # store top of the stack in memo
FRAME = b'\x95' # indicate the beginning of a new frame
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)])
class _Framer:
_FRAME_SIZE_TARGET = 64 * 1024
def __init__(self, file_write):
self.file_write = file_write
self.current_frame = None
def start_framing(self):
self.current_frame = io.BytesIO()
def end_framing(self):
if self.current_frame and self.current_frame.tell() > 0:
self.commit_frame(force=True)
self.current_frame = None
def commit_frame(self, force=False):
if self.current_frame:
f = self.current_frame
if f.tell() >= self._FRAME_SIZE_TARGET or force:
with f.getbuffer() as data:
n = len(data)
write = self.file_write
write(FRAME)
write(pack("<Q", n))
write(data)
f.seek(0)
f.truncate()
def write(self, data):
if self.current_frame:
return self.current_frame.write(data)
else:
return self.file_write(data)
class _Unframer:
def __init__(self, file_read, file_readline, file_tell=None):
self.file_read = file_read
self.file_readline = file_readline
self.current_frame = None
def read(self, n):
if self.current_frame:
data = self.current_frame.read(n)
if not data and n != 0:
self.current_frame = None
return self.file_read(n)
if len(data) < n:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_read(n)
def readline(self):
if self.current_frame:
data = self.current_frame.readline()
if not data:
self.current_frame = None
return self.file_readline()
if data[-1] != b'\n'[0]:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_readline()
def load_frame(self, frame_size):
if self.current_frame and self.current_frame.read() != b'':
raise UnpicklingError(
"beginning of a new frame before end of current frame")
self.current_frame = io.BytesIO(self.file_read(frame_size))
# Tools used for pickling.
def _getattribute(obj, name):
for subpath in name.split('.'):
if subpath == '<locals>':
raise AttributeError("Can't get local attribute {!r} on {!r}"
.format(name, obj))
try:
parent = obj
obj = getattr(obj, subpath)
except AttributeError:
raise AttributeError("Can't get attribute {!r} on {!r}"
.format(name, obj))
return obj, parent
def whichmodule(obj, name):
"""Find the module an object belong to."""
module_name = getattr(obj, '__module__', None)
if module_name is not None:
return module_name
# Protect the iteration by using a list copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
for module_name, module in list(sys.modules.items()):
if module_name == '__main__' or module is None:
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except AttributeError:
pass
return '__main__'
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0 is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0)
b''
>>> encode_long(255)
b'\xff\x00'
>>> encode_long(32767)
b'\xff\x7f'
>>> encode_long(-256)
b'\x00\xff'
>>> encode_long(-32768)
b'\x00\x80'
>>> encode_long(-128)
b'\x80'
>>> encode_long(127)
b'\x7f'
>>>
"""
if x == 0:
return b''
nbytes = (x.bit_length() >> 3) + 1
result = x.to_bytes(nbytes, byteorder='little', signed=True)
if x < 0 and nbytes > 1:
if result[-1] == 0xff and (result[-2] & 0x80) != 0:
result = result[:-1]
return result
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long(b'')
0
>>> decode_long(b"\xff\x00")
255
>>> decode_long(b"\xff\x7f")
32767
>>> decode_long(b"\x00\xff")
-256
>>> decode_long(b"\x00\x80")
-32768
>>> decode_long(b"\x80")
-128
>>> decode_long(b"\x7f")
127
"""
return int.from_bytes(data, byteorder='little', signed=True)
# Pickling machinery
class _Pickler:
def __init__(self, file, protocol=None, *, fix_imports=True):
"""This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2, 3 and 4. The
default protocol is 3; a backward-incompatible protocol designed
for Python 3.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The *file* argument must have a write() method that accepts a
single bytes argument. It can thus be a file object opened for
binary writing, an io.BytesIO instance, or any other custom
object that meets this interface.
If *fix_imports* is True and *protocol* is less than 3, pickle
will try to map the new Python 3 names to the old module names
used in Python 2, so that the pickle data stream is readable
with Python 2.
"""
if protocol is None:
protocol = DEFAULT_PROTOCOL
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
try:
self._file_write = file.write
except AttributeError:
raise TypeError("file must have a 'write' attribute")
self.framer = _Framer(self._file_write)
self.write = self.framer.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
self.fix_imports = fix_imports and protocol < 3
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects
are pickled by reference and not by value. This method is
useful when re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
# Check whether Pickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Pickler.dump().
if not hasattr(self, "_file_write"):
raise PicklingError("Pickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
if self.proto >= 2:
self.write(PROTO + pack("<B", self.proto))
if self.proto >= 4:
self.framer.start_framing()
self.save(obj)
self.write(STOP)
self.framer.end_framing()
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
idx = len(self.memo)
self.write(self.put(idx))
self.memo[id(obj)] = idx, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, idx):
if self.proto >= 4:
return MEMOIZE
elif self.bin:
if idx < 256:
return BINPUT + pack("<B", idx)
else:
return LONG_BINPUT + pack("<I", idx)
else:
return PUT + repr(idx).encode("ascii") + b'\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i):
if self.bin:
if i < 256:
return BINGET + pack("<B", i)
else:
return LONG_BINGET + pack("<I", i)
return GET + repr(i).encode("ascii") + b'\n'
def save(self, obj, save_persistent_id=True):
self.framer.commit_frame()
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid is not None and save_persistent_id:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x is not None:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f is not None:
f(self, obj) # Call unbound method with explicit self
return
# Check private dispatch table if any, or else copyreg.dispatch_table
reduce = getattr(self, 'dispatch_table', dispatch_table).get(t)
if reduce is not None:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = False
if issc:
self.save_global(obj)
return
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce is not None:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if isinstance(rv, str):
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if not isinstance(rv, tuple):
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid, save_persistent_id=False)
self.write(BINPERSID)
else:
try:
self.write(PERSID + str(pid).encode("ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
def save_reduce(self, func, args, state=None, listitems=None,
dictitems=None, obj=None):
# This API is called by some subclasses
if not isinstance(args, tuple):
raise PicklingError("args from save_reduce() must be a tuple")
if not callable(func):
raise PicklingError("func from save_reduce() must be callable")
save = self.save
write = self.write
func_name = getattr(func, "__name__", "")
if self.proto >= 2 and func_name == "__newobj_ex__":
cls, args, kwargs = args
if not hasattr(cls, "__new__"):
raise PicklingError("args[0] from {} args has no __new__"
.format(func_name))
if obj is not None and cls is not obj.__class__:
raise PicklingError("args[0] from {} args has the wrong class"
.format(func_name))
if self.proto >= 4:
save(cls)
save(args)
save(kwargs)
write(NEWOBJ_EX)
else:
func = partial(cls.__new__, cls, *args, **kwargs)
save(func)
save(())
write(REDUCE)
elif self.proto >= 2 and func_name == "__newobj__":
# A __reduce__ implementation can direct protocol 2 or newer to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
if id(obj) in self.memo:
write(POP + self.get(self.memo[id(obj)][0]))
else:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[type(None)] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(NEWTRUE if obj else NEWFALSE)
else:
self.write(TRUE if obj else FALSE)
dispatch[bool] = save_bool
def save_long(self, obj):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + pack("<B", obj))
return
if obj <= 0xffff:
self.write(BININT2 + pack("<H", obj))
return
# Next check for 4-byte signed ints:
if -0x80000000 <= obj <= 0x7fffffff:
self.write(BININT + pack("<i", obj))
return
if self.proto >= 2:
encoded = encode_long(obj)
n = len(encoded)
if n < 256:
self.write(LONG1 + pack("<B", n) + encoded)
else:
self.write(LONG4 + pack("<i", n) + encoded)
return
self.write(LONG + repr(obj).encode("ascii") + b'L\n')
dispatch[int] = save_long
def save_float(self, obj):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj).encode("ascii") + b'\n')
dispatch[float] = save_float
def save_bytes(self, obj):
if self.proto < 3:
if not obj: # bytes object is empty
self.save_reduce(bytes, (), obj=obj)
else:
self.save_reduce(codecs.encode,
(str(obj, 'latin1'), 'latin1'), obj=obj)
return
n = len(obj)
if n <= 0xff:
self.write(SHORT_BINBYTES + pack("<B", n) + obj)
elif n > 0xffffffff and self.proto >= 4:
self.write(BINBYTES8 + pack("<Q", n) + obj)
else:
self.write(BINBYTES + pack("<I", n) + obj)
self.memoize(obj)
dispatch[bytes] = save_bytes
def save_str(self, obj):
if self.bin:
encoded = obj.encode('utf-8', 'surrogatepass')
n = len(encoded)
if n <= 0xff and self.proto >= 4:
self.write(SHORT_BINUNICODE + pack("<B", n) + encoded)
elif n > 0xffffffff and self.proto >= 4:
self.write(BINUNICODE8 + pack("<Q", n) + encoded)
else:
self.write(BINUNICODE + pack("<I", n) + encoded)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') +
b'\n')
self.memoize(obj)
dispatch[str] = save_str
def save_tuple(self, obj):
if not obj: # tuple is empty
if self.bin:
self.write(EMPTY_TUPLE)
else:
self.write(MARK + TUPLE)
return
n = len(obj)
save = self.save
memo = self.memo
if n <= 3 and self.proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
self.write(POP * n + get)
else:
self.write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write = self.write
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if self.bin:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
write(TUPLE)
self.memoize(obj)
dispatch[tuple] = save_tuple
def save_list(self, obj):
if self.bin:
self.write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
self.write(MARK + LIST)
self.memoize(obj)
self._batch_appends(obj)
dispatch[list] = save_list
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_dict(self, obj):
if self.bin:
self.write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
self.write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.items())
dispatch[dict] = save_dict
if PyStringMap is not None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_set(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(set, (list(obj),), obj=obj)
return
write(EMPTY_SET)
self.memoize(obj)
it = iter(obj)
while True:
batch = list(islice(it, self._BATCHSIZE))
n = len(batch)
if n > 0:
write(MARK)
for item in batch:
save(item)
write(ADDITEMS)
if n < self._BATCHSIZE:
return
dispatch[set] = save_set
def save_frozenset(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(frozenset, (list(obj),), obj=obj)
return
write(MARK)
for item in obj:
save(item)
if id(obj) in self.memo:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
write(POP_MARK + self.get(self.memo[id(obj)][0]))
return
write(FROZENSET)
self.memoize(obj)
dispatch[frozenset] = save_frozenset
def save_global(self, obj, name=None):
write = self.write
memo = self.memo
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None:
name = obj.__name__
module_name = whichmodule(obj, name)
try:
__import__(module_name, level=0)
module = sys.modules[module_name]
obj2, parent = _getattribute(module, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module_name, name))
else:
if obj2 is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module_name, name))
if self.proto >= 2:
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + pack("<B", code))
elif code <= 0xffff:
write(EXT2 + pack("<H", code))
else:
write(EXT4 + pack("<i", code))
return
lastname = name.rpartition('.')[2]
if parent is module:
name = lastname
# Non-ASCII identifiers are supported only with protocols >= 3.
if self.proto >= 4:
self.save(module_name)
self.save(name)
write(STACK_GLOBAL)
elif parent is not module:
self.save_reduce(getattr, (parent, lastname))
elif self.proto >= 3:
write(GLOBAL + bytes(module_name, "utf-8") + b'\n' +
bytes(name, "utf-8") + b'\n')
else:
if self.fix_imports:
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
elif module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
write(GLOBAL + bytes(module_name, "ascii") + b'\n' +
bytes(name, "ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, self.proto))
self.memoize(obj)
def save_type(self, obj):
if obj is type(None):
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj is type(...):
return self.save_reduce(type, (...,), obj=obj)
return self.save_global(obj)
dispatch[FunctionType] = save_global
dispatch[type] = save_type
# Unpickling machinery
class _Unpickler:
def __init__(self, file, *, fix_imports=True,
encoding="ASCII", errors="strict"):
"""This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so
no proto argument is needed.
The argument *file* must have two methods, a read() method that
takes an integer argument, and a readline() method that requires
no arguments. Both methods should return bytes. Thus *file*
can be a binary file object opened for reading, an io.BytesIO
object, or any other custom object that meets this interface.
The file-like object must have two methods, a read() method
that takes an integer argument, and a readline() method that
requires no arguments. Both methods should return bytes.
Thus file-like object can be a binary file object opened for
reading, a BytesIO object, or any other custom object that
meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and
*errors*, which are used to control compatibility support for
pickle stream generated by Python 2. If *fix_imports* is True,
pickle will try to map the old Python 2 names to the new names
used in Python 3. The *encoding* and *errors* tell pickle how
to decode 8-bit string instances pickled by Python 2; these
default to 'ASCII' and 'strict', respectively. *encoding* can be
'bytes' to read theses 8-bit string instances as bytes objects.
"""
self._file_readline = file.readline
self._file_read = file.read
self.memo = {}
self.encoding = encoding
self.errors = errors
self.proto = 0
self.fix_imports = fix_imports
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
# Check whether Unpickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Unpickler.dump().
if not hasattr(self, "_file_read"):
raise UnpicklingError("Unpickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
self._unframer = _Unframer(self._file_read, self._file_readline)
self.read = self._unframer.read
self.readline = self._unframer.readline
self.metastack = []
self.stack = []
self.append = self.stack.append
self.proto = 0
read = self.read
dispatch = self.dispatch
try:
while True:
key = read(1)
if not key:
raise EOFError
assert isinstance(key, bytes_types)
dispatch[key[0]](self)
except _Stop as stopinst:
return stopinst.value
# Return a list of items pushed in the stack after last MARK instruction.
def pop_mark(self):
items = self.stack
self.stack = self.metastack.pop()
self.append = self.stack.append
return items
def persistent_load(self, pid):
raise UnpicklingError("unsupported persistent id encountered")
dispatch = {}
def load_proto(self):
proto = self.read(1)[0]
if not 0 <= proto <= HIGHEST_PROTOCOL:
raise ValueError("unsupported pickle protocol: %d" % proto)
self.proto = proto
dispatch[PROTO[0]] = load_proto
def load_frame(self):
frame_size, = unpack('<Q', self.read(8))
if frame_size > sys.maxsize:
raise ValueError("frame size > sys.maxsize: %d" % frame_size)
self._unframer.load_frame(frame_size)
dispatch[FRAME[0]] = load_frame
def load_persid(self):
try:
pid = self.readline()[:-1].decode("ascii")
except UnicodeDecodeError:
raise UnpicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
self.append(self.persistent_load(pid))
dispatch[PERSID[0]] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID[0]] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE[0]] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE[0]] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE[0]] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
val = int(data, 0)
self.append(val)
dispatch[INT[0]] = load_int
def load_binint(self):
self.append(unpack('<i', self.read(4))[0])
dispatch[BININT[0]] = load_binint
def load_binint1(self):
self.append(self.read(1)[0])
dispatch[BININT1[0]] = load_binint1
def load_binint2(self):
self.append(unpack('<H', self.read(2))[0])
dispatch[BININT2[0]] = load_binint2
def load_long(self):
val = self.readline()[:-1]
if val and val[-1] == b'L'[0]:
val = val[:-1]
self.append(int(val, 0))
dispatch[LONG[0]] = load_long
def load_long1(self):
n = self.read(1)[0]
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG1[0]] = load_long1
def load_long4(self):
n, = unpack('<i', self.read(4))
if n < 0:
# Corrupt or hostile pickle -- we never write one like this
raise UnpicklingError("LONG pickle has negative byte count")
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG4[0]] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT[0]] = load_float
def load_binfloat(self):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT[0]] = load_binfloat
def _decode_string(self, value):
# Used to allow strings from Python 2 to be decoded either as
# bytes or Unicode strings. This should be used only with the
# STRING, BINSTRING and SHORT_BINSTRING opcodes.
if self.encoding == "bytes":
return value
else:
return value.decode(self.encoding, self.errors)
def load_string(self):
data = self.readline()[:-1]
# Strip outermost quotes
if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
data = data[1:-1]
else:
raise UnpicklingError("the STRING opcode argument must be quoted")
self.append(self._decode_string(codecs.escape_decode(data)[0]))
dispatch[STRING[0]] = load_string
def load_binstring(self):
# Deprecated BINSTRING uses signed 32-bit length
len, = unpack('<i', self.read(4))
if len < 0:
raise UnpicklingError("BINSTRING pickle has negative byte count")
data = self.read(len)
self.append(self._decode_string(data))
dispatch[BINSTRING[0]] = load_binstring
def load_binbytes(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINBYTES exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES[0]] = load_binbytes
def load_unicode(self):
self.append(str(self.readline()[:-1], 'raw-unicode-escape'))
dispatch[UNICODE[0]] = load_unicode
def load_binunicode(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINUNICODE exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE[0]] = load_binunicode
def load_binunicode8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINUNICODE8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE8[0]] = load_binunicode8
def load_binbytes8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINBYTES8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES8[0]] = load_binbytes8
def load_short_binstring(self):
len = self.read(1)[0]
data = self.read(len)
self.append(self._decode_string(data))
dispatch[SHORT_BINSTRING[0]] = load_short_binstring
def load_short_binbytes(self):
len = self.read(1)[0]
self.append(self.read(len))
dispatch[SHORT_BINBYTES[0]] = load_short_binbytes
def load_short_binunicode(self):
len = self.read(1)[0]
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode
def load_tuple(self):
items = self.pop_mark()
self.append(tuple(items))
dispatch[TUPLE[0]] = load_tuple
def load_empty_tuple(self):
self.append(())
dispatch[EMPTY_TUPLE[0]] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1[0]] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2[0]] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3[0]] = load_tuple3
def load_empty_list(self):
self.append([])
dispatch[EMPTY_LIST[0]] = load_empty_list
def load_empty_dictionary(self):
self.append({})
dispatch[EMPTY_DICT[0]] = load_empty_dictionary
def load_empty_set(self):
self.append(set())
dispatch[EMPTY_SET[0]] = load_empty_set
def load_frozenset(self):
items = self.pop_mark()
self.append(frozenset(items))
dispatch[FROZENSET[0]] = load_frozenset
def load_list(self):
items = self.pop_mark()
self.append(items)
dispatch[LIST[0]] = load_list
def load_dict(self):
items = self.pop_mark()
d = {items[i]: items[i+1]
for i in range(0, len(items), 2)}
self.append(d)
dispatch[DICT[0]] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, args):
if (args or not isinstance(klass, type) or
hasattr(klass, "__getinitargs__")):
try:
value = klass(*args)
except TypeError as err:
raise TypeError("in constructor for %s: %s" %
(klass.__name__, str(err)), sys.exc_info()[2])
else:
value = klass.__new__(klass)
self.append(value)
def load_inst(self):
module = self.readline()[:-1].decode("ascii")
name = self.readline()[:-1].decode("ascii")
klass = self.find_class(module, name)
self._instantiate(klass, self.pop_mark())
dispatch[INST[0]] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
args = self.pop_mark()
cls = args.pop(0)
self._instantiate(cls, args)
dispatch[OBJ[0]] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args)
self.append(obj)
dispatch[NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
dispatch[NEWOBJ_EX[0]] = load_newobj_ex
def load_global(self):
module = self.readline()[:-1].decode("utf-8")
name = self.readline()[:-1].decode("utf-8")
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL[0]] = load_global
def load_stack_global(self):
name = self.stack.pop()
module = self.stack.pop()
if type(name) is not str or type(module) is not str:
raise UnpicklingError("STACK_GLOBAL requires str")
self.append(self.find_class(module, name))
dispatch[STACK_GLOBAL[0]] = load_stack_global
def load_ext1(self):
code = self.read(1)[0]
self.get_extension(code)
dispatch[EXT1[0]] = load_ext1
def load_ext2(self):
code, = unpack('<H', self.read(2))
self.get_extension(code)
dispatch[EXT2[0]] = load_ext2
def load_ext4(self):
code, = unpack('<i', self.read(4))
self.get_extension(code)
dispatch[EXT4[0]] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
if code <= 0: # note that 0 is forbidden
# Corrupt or hostile pickle.
raise UnpicklingError("EXT specifies code <= 0")
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this.
if self.proto < 3 and self.fix_imports:
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
elif module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
__import__(module, level=0)
if self.proto >= 4:
return _getattribute(sys.modules[module], name)[0]
else:
return getattr(sys.modules[module], name)
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
stack[-1] = func(*args)
dispatch[REDUCE[0]] = load_reduce
def load_pop(self):
if self.stack:
del self.stack[-1]
else:
self.pop_mark()
dispatch[POP[0]] = load_pop
def load_pop_mark(self):
self.pop_mark()
dispatch[POP_MARK[0]] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP[0]] = load_dup
def load_get(self):
i = int(self.readline()[:-1])
self.append(self.memo[i])
dispatch[GET[0]] = load_get
def load_binget(self):
i = self.read(1)[0]
self.append(self.memo[i])
dispatch[BINGET[0]] = load_binget
def load_long_binget(self):
i, = unpack('<I', self.read(4))
self.append(self.memo[i])
dispatch[LONG_BINGET[0]] = load_long_binget
def load_put(self):
i = int(self.readline()[:-1])
if i < 0:
raise ValueError("negative PUT argument")
self.memo[i] = self.stack[-1]
dispatch[PUT[0]] = load_put
def load_binput(self):
i = self.read(1)[0]
if i < 0:
raise ValueError("negative BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[BINPUT[0]] = load_binput
def load_long_binput(self):
i, = unpack('<I', self.read(4))
if i > maxsize:
raise ValueError("negative LONG_BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[LONG_BINPUT[0]] = load_long_binput
def load_memoize(self):
memo = self.memo
memo[len(memo)] = self.stack[-1]
dispatch[MEMOIZE[0]] = load_memoize
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND[0]] = load_append
def load_appends(self):
items = self.pop_mark()
list_obj = self.stack[-1]
try:
extend = list_obj.extend
except AttributeError:
pass
else:
extend(items)
return
append = list_obj.append
for item in items:
append(item)
dispatch[APPENDS[0]] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM[0]] = load_setitem
def load_setitems(self):
items = self.pop_mark()
dict = self.stack[-1]
for i in range(0, len(items), 2):
dict[items[i]] = items[i + 1]
dispatch[SETITEMS[0]] = load_setitems
def load_additems(self):
items = self.pop_mark()
set_obj = self.stack[-1]
if isinstance(set_obj, set):
set_obj.update(items)
else:
add = set_obj.add
for item in items:
add(item)
dispatch[ADDITEMS[0]] = load_additems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate is not None:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
inst_dict = inst.__dict__
intern = sys.intern
for k, v in state.items():
if type(k) is str:
inst_dict[intern(k)] = v
else:
inst_dict[k] = v
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD[0]] = load_build
def load_mark(self):
self.metastack.append(self.stack)
self.stack = []
self.append = self.stack.append
dispatch[MARK[0]] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP[0]] = load_stop
# Shorthands
def _dump(obj, file, protocol=None, *, fix_imports=True):
_Pickler(file, protocol, fix_imports=fix_imports).dump(obj)
def _dumps(obj, protocol=None, *, fix_imports=True):
f = io.BytesIO()
_Pickler(f, protocol, fix_imports=fix_imports).dump(obj)
res = f.getvalue()
assert isinstance(res, bytes_types)
return res
def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict"):
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
def _loads(s, *, fix_imports=True, encoding="ASCII", errors="strict"):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(s)
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
# Use the faster _pickle if possible
try:
from _pickle import (
PickleError,
PicklingError,
UnpicklingError,
Pickler,
Unpickler,
dump,
dumps,
load,
loads
)
except ImportError:
Pickler, Unpickler = _Pickler, _Unpickler
dump, dumps, load, loads = _dump, _dumps, _load, _loads
# Doctest
def _test():
import sys
try:
import doctest
except ImportError:
sys.exit(1)
print(doctest.testmod())
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='display contents of the pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
if not args.pickle_file:
parser.print_help()
else:
import pprint
for f in args.pickle_file:
obj = load(f)
pprint.pprint(obj)
| 55,861 | 1,615 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/uu.py | #! /usr/bin/env python3
# Copyright 1994 by Lance Ellinghouse
# Cathedral City, California Republic, United States of America.
# All Rights Reserved
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Lance Ellinghouse
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Modified by Jack Jansen, CWI, July 1995:
# - Use binascii module to do the actual line-by-line conversion
# between ascii and binary. This results in a 1000-fold speedup. The C
# version is still 5 times faster, though.
# - Arguments more compliant with python standard
"""Implementation of the UUencode and UUdecode functions.
encode(in_file, out_file [,name, mode])
decode(in_file [, out_file, mode])
"""
import binascii
import os
import sys
__all__ = ["Error", "encode", "decode"]
class Error(Exception):
pass
def encode(in_file, out_file, name=None, mode=None):
"""Uuencode file"""
#
# If in_file is a pathname open it and change defaults
#
opened_files = []
try:
if in_file == '-':
in_file = sys.stdin.buffer
elif isinstance(in_file, str):
if name is None:
name = os.path.basename(in_file)
if mode is None:
try:
mode = os.stat(in_file).st_mode
except AttributeError:
pass
in_file = open(in_file, 'rb')
opened_files.append(in_file)
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout.buffer
elif isinstance(out_file, str):
out_file = open(out_file, 'wb')
opened_files.append(out_file)
#
# Set defaults for name and mode
#
if name is None:
name = '-'
if mode is None:
mode = 0o666
#
# Remove newline chars from name
#
name = name.replace('\n','\\n')
name = name.replace('\r','\\r')
#
# Write the data
#
out_file.write(('begin %o %s\n' % ((mode & 0o777), name)).encode("ascii"))
data = in_file.read(45)
while len(data) > 0:
out_file.write(binascii.b2a_uu(data))
data = in_file.read(45)
out_file.write(b' \nend\n')
finally:
for f in opened_files:
f.close()
def decode(in_file, out_file=None, mode=None, quiet=False):
"""Decode uuencoded file"""
#
# Open the input file, if needed.
#
opened_files = []
if in_file == '-':
in_file = sys.stdin.buffer
elif isinstance(in_file, str):
in_file = open(in_file, 'rb')
opened_files.append(in_file)
try:
#
# Read until a begin is encountered or we've exhausted the file
#
while True:
hdr = in_file.readline()
if not hdr:
raise Error('No valid begin line found in input file')
if not hdr.startswith(b'begin'):
continue
hdrfields = hdr.split(b' ', 2)
if len(hdrfields) == 3 and hdrfields[0] == b'begin':
try:
int(hdrfields[1], 8)
break
except ValueError:
pass
if out_file is None:
# If the filename isn't ASCII, what's up with that?!?
out_file = hdrfields[2].rstrip(b' \t\r\n\f').decode("ascii")
if os.path.exists(out_file):
raise Error('Cannot overwrite existing file: %s' % out_file)
if mode is None:
mode = int(hdrfields[1], 8)
#
# Open the output file
#
if out_file == '-':
out_file = sys.stdout.buffer
elif isinstance(out_file, str):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
opened_files.append(out_file)
#
# Main decoding loop
#
s = in_file.readline()
while s and s.strip(b' \t\r\n\f') != b'end':
try:
data = binascii.a2b_uu(s)
except binascii.Error as v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((s[0]-32) & 63) * 4 + 5) // 3
data = binascii.a2b_uu(s[:nbytes])
if not quiet:
sys.stderr.write("Warning: %s\n" % v)
out_file.write(data)
s = in_file.readline()
if not s:
raise Error('Truncated input file')
finally:
for f in opened_files:
f.close()
def test():
"""uuencode/uudecode main program"""
import optparse
parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
(options, args) = parser.parse_args()
if len(args) > 2:
parser.error('incorrect number of arguments')
sys.exit(1)
# Use the binary streams underlying stdin/stdout
input = sys.stdin.buffer
output = sys.stdout.buffer
if len(args) > 0:
input = args[0]
if len(args) > 1:
output = args[1]
if options.decode:
if options.text:
if isinstance(output, str):
output = open(output, 'wb')
else:
print(sys.argv[0], ': cannot do -t to stdout')
sys.exit(1)
decode(input, output)
else:
if options.text:
if isinstance(input, str):
input = open(input, 'rb')
else:
print(sys.argv[0], ': cannot do -t from stdin')
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()
| 6,898 | 207 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/poplib.py | """A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <[email protected]>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <[email protected]> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <[email protected]> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import errno
import re
import socket
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
CAPA capa()
STLS stls()
UTF8 utf8()
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self._tls_established = False
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[:1] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
try:
file = self.file
self.file = None
if file is not None:
file.close()
finally:
sock = self.sock
self.sock = None
if sock is not None:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError as exc:
# The server might already have closed the connection.
# On Windows, this may result in WSAEINVAL (error 10022):
# An invalid operation was attempted.
if (exc.errno != errno.ENOTCONN
and getattr(exc, 'winerror', 0) != 10022):
raise
finally:
sock.close()
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.[^<]*(<.*>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
def utf8(self):
"""Try to enter UTF-8 mode (see RFC 6856). Returns server response.
"""
return self._shortcmd('UTF8')
def capa(self):
"""Return server capabilities (RFC 2449) as a dictionary
>>> c=poplib.POP3('localhost')
>>> c.capa()
{'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
'UIDL': [], 'RESP-CODES': []}
>>>
Really, according to RFC 2449, the cyrus folks should avoid
having the implementation split into multiple arguments...
"""
def _parsecap(line):
lst = line.decode('ascii').split()
return lst[0], lst[1:]
caps = {}
try:
resp = self._longcmd('CAPA')
rawcaps = resp[1]
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
except error_proto as _err:
raise error_proto('-ERR CAPA not supported by server')
return caps
def stls(self, context=None):
"""Start a TLS session on the active connection as specified in RFC 2595.
context - a ssl.SSLContext
"""
if not HAVE_SSL:
raise error_proto('-ERR TLS support missing')
if self._tls_established:
raise error_proto('-ERR TLS session already established')
caps = self.capa()
if not 'STLS' in caps:
raise error_proto('-ERR STLS not supported by server')
if context is None:
context = ssl._create_stdlib_context()
resp = self._shortcmd('STLS')
self.sock = context.wrap_socket(self.sock,
server_hostname=self.host)
self.file = self.sock.makefile('rb')
self._tls_established = True
return resp
if HAVE_SSL:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
context=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
context - a ssl.SSLContext
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a "
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
sock = self.context.wrap_socket(sock,
server_hostname=self.host)
return sock
def stls(self, keyfile=None, certfile=None, context=None):
"""The method unconditionally raises an exception since the
STLS command doesn't make any sense on an already established
SSL/TLS session.
"""
raise error_proto('-ERR TLS session already established')
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
| 14,964 | 479 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/asynchat.py | # -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import asyncore
from collections import deque
class async_chat(asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 65536
ac_out_buffer_size = 65536
# we don't want to enable the use of encoding by default, because that is a
# sign of an application bug that we don't want to pass silently
use_encoding = 0
encoding = 'latin-1'
def __init__(self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = b''
# we use a list here rather than io.BytesIO for a few reasons...
# del lst[:] is faster than bio.truncate(0)
# lst = [] is faster than bio.truncate(0)
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__(self, sock, map)
def collect_incoming_data(self, data):
raise NotImplementedError("must be implemented in subclass")
def _collect_incoming_data(self, data):
self.incoming.append(data)
def _get_data(self):
d = b''.join(self.incoming)
del self.incoming[:]
return d
def found_terminator(self):
raise NotImplementedError("must be implemented in subclass")
def set_terminator(self, term):
"""Set the input delimiter.
Can be a fixed string of any length, an integer, or None.
"""
if isinstance(term, str) and self.use_encoding:
term = bytes(term, self.encoding)
elif isinstance(term, int) and term < 0:
raise ValueError('the number of received bytes must be positive')
self.terminator = term
def get_terminator(self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read(self):
try:
data = self.recv(self.ac_in_buffer_size)
except BlockingIOError:
return
except OSError as why:
self.handle_error()
return
if isinstance(data, str) and self.use_encoding:
data = bytes(str, self.encoding)
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(4096).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer = b''
elif isinstance(terminator, int):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer = b''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data(self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string
# (source of subtle bugs)
self.collect_incoming_data(self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator
# is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end(self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data(self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data(self.ac_in_buffer)
self.ac_in_buffer = b''
def handle_write(self):
self.initiate_send()
def handle_close(self):
self.close()
def push(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
sabs = self.ac_out_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.producer_fifo.append(data[i:i+sabs])
else:
self.producer_fifo.append(data)
self.initiate_send()
def push_with_producer(self, producer):
self.producer_fifo.append(producer)
self.initiate_send()
def readable(self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1
def writable(self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
def close_when_done(self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.append(None)
def initiate_send(self):
while self.producer_fifo and self.connected:
first = self.producer_fifo[0]
# handle empty string/buffer or None entry
if not first:
del self.producer_fifo[0]
if first is None:
self.handle_close()
return
# handle classic producer behavior
obs = self.ac_out_buffer_size
try:
data = first[:obs]
except TypeError:
data = first.more()
if data:
self.producer_fifo.appendleft(data)
else:
del self.producer_fifo[0]
continue
if isinstance(data, str) and self.use_encoding:
data = bytes(data, self.encoding)
# send the data
try:
num_sent = self.send(data)
except OSError:
self.handle_error()
return
if num_sent:
if num_sent < len(data) or obs < len(first):
self.producer_fifo[0] = first[num_sent:]
else:
del self.producer_fifo[0]
# we tried to send some actual data
return
def discard_buffers(self):
# Emergencies only!
self.ac_in_buffer = b''
del self.incoming[:]
self.producer_fifo.clear()
class simple_producer:
def __init__(self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more(self):
if len(self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = b''
return result
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e("qwerty\r", "\r\n") => 1
# f_p_a_e("qwertydkjf", "\r\n") => 0
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end(haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| 11,328 | 308 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/cgi.py | #! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from io import StringIO, BytesIO, TextIOWrapper
from collections.abc import Mapping
import sys
import os
import urllib.parse
from email.parser import FeedParser
from email.message import Message
from warnings import warn
import html
import locale
import tempfile
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "test", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global log, logfile, logfp
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except OSError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
def closelog():
"""Close the log file."""
global log, logfile, logfp
logfile = ''
if logfp:
logfp.close()
logfp = None
log = initlog
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0,
strict_parsing=0, separator='&'):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
separator: str. The symbol to use for separating the query arguments.
Defaults to &.
"""
if fp is None:
fp = sys.stdin
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
encoding = fp.encoding
else:
encoding = 'latin-1'
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
fp = fp.buffer
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
encoding=encoding, separator=separator)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatibility.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
DeprecationWarning, 2)
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
DeprecationWarning, 2)
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
import http.client
boundary = b""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = b"--" + boundary
lastpart = b"--" + boundary + b"--"
partdict = {}
terminator = b""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
data = fp.read(bytes)
else:
data = b""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line.startswith(b"--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == b"\r\n":
line = line[:-2]
elif line[-1:] == b"\n":
line = line[:-1]
lines[-1] = line
data = b"".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.__next__()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
and returns *bytes*
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace',
max_num_fields=None, separator='&'):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
Can be :
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
read
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
header)
max_num_fields: int. If set, then __init__ throws a ValueError
if there are more than n fields read by parse_qsl().
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
self.max_num_fields = max_num_fields
self.separator = separator
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
fp = BytesIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
else:
if not (isinstance(headers, (Mapping, Message))):
raise TypeError("headers must be mapping or an instance of "
"email.message.Message")
self.headers = headers
if fp is None:
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
self.fp = fp.buffer
else:
if not (hasattr(fp, 'read') and hasattr(fp, 'readline')):
raise TypeError("fp must be file pointer")
self.fp = fp
self.encoding = encoding
self.errors = errors
if not isinstance(outerboundary, bytes):
raise TypeError('outerboundary must be bytes, not %s'
% type(outerboundary).__name__)
self.outerboundary = outerboundary
self.bytes_read = 0
self.limit = limit
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
self._binary_file = self.filename is not None
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
if 'boundary' in pdict:
self.innerboundary = pdict['boundary'].encode(self.encoding)
else:
self.innerboundary = b""
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError('Maximum content length exceeded')
self.length = clen
if self.limit is None and clen:
self.limit = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __del__(self):
try:
self.file.close()
except AttributeError:
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.file.close()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError(name)
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError("not indexable")
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError(key)
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if isinstance(value, list):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if isinstance(value, list):
return [x.value for x in value]
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError("not indexable")
return list(set(item.name for item in self.list))
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError("not indexable")
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __bool__(self):
if self.list is None:
raise TypeError("Cannot be converted to bool.")
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if not isinstance(qs, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(qs).__name__))
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors,
max_num_fields=self.max_num_fields, separator=self.separator)
self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
self.list = []
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
encoding=self.encoding, errors=self.errors,
max_num_fields=self.max_num_fields, separator=self.separator)
self.list.extend(MiniFieldStorage(key, value) for key, value in query)
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
if not isinstance(first_line, bytes):
raise ValueError("%s should return bytes, got %s" \
% (self.fp, type(first_line).__name__))
self.bytes_read += len(first_line)
# Ensure that we consume the file until we've hit our inner boundary
while (first_line.strip() != (b"--" + self.innerboundary) and
first_line):
first_line = self.fp.readline()
self.bytes_read += len(first_line)
# Propagate max_num_fields into the sub class appropriately
max_num_fields = self.max_num_fields
if max_num_fields is not None:
max_num_fields -= len(self.list)
while True:
parser = FeedParser()
hdr_text = b""
while True:
data = self.fp.readline()
hdr_text += data
if not data.strip():
break
if not hdr_text:
break
# parser takes strings, not bytes
self.bytes_read += len(hdr_text)
parser.feed(hdr_text.decode(self.encoding, self.errors))
headers = parser.close()
# Some clients add Content-Length for part headers, ignore them
if 'content-length' in headers:
del headers['content-length']
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
self.encoding, self.errors, max_num_fields, self.separator)
if max_num_fields is not None:
max_num_fields -= 1
if part.list:
max_num_fields -= len(part.list)
if max_num_fields < 0:
raise ValueError('Max number of fields exceeded')
self.bytes_read += part.bytes_read
self.list.append(part)
if part.done or self.bytes_read >= self.length > 0:
break
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize)) # bytes
if not isinstance(data, bytes):
raise ValueError("%s should return bytes, got %s"
% (self.fp, type(data).__name__))
self.bytes_read += len(data)
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
if self._binary_file:
self.file = self.__file = BytesIO() # store data as bytes for files
else:
self.file = self.__file = StringIO() # as strings for other fields
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
"""line is always bytes, not string"""
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file()
data = self.__file.getvalue()
self.file.write(data)
self.__file = None
if self._binary_file:
# keep bytes
self.file.write(line)
else:
# decode to string
self.file.write(line.decode(self.encoding, self.errors))
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary.
Data is read as bytes: boundaries and line ends must be converted
to bytes for comparisons.
"""
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
delim = b""
last_line_lfend = True
_read = 0
while 1:
if _read >= self.limit:
break
line = self.fp.readline(1<<16) # bytes
self.bytes_read += len(line)
_read += len(line)
if not line:
self.done = -1
break
if delim == b"\r":
line = delim + line
delim = b""
if line.startswith(b"--") and last_line_lfend:
strippedline = line.rstrip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
odelim = delim
if line.endswith(b"\r\n"):
delim = b"\r\n"
line = line[:-2]
last_line_lfend = True
elif line.endswith(b"\n"):
delim = b"\n"
line = line[:-1]
last_line_lfend = True
elif line.endswith(b"\r"):
# We may interrupt \r\n sequences if they span the 2**16
# byte boundary
delim = b"\r"
line = line[:-1]
last_line_lfend = False
else:
delim = b""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next_boundary = b"--" + self.outerboundary
last_boundary = next_boundary + b"--"
last_line_lfend = True
while True:
line = self.fp.readline(1<<16)
self.bytes_read += len(line)
if not line:
self.done = -1
break
if line.endswith(b"--") and last_line_lfend:
strippedline = line.strip()
if strippedline == next_boundary:
break
if strippedline == last_boundary:
self.done = 1
break
last_line_lfend = line.endswith(b'\n')
def make_file(self):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The file is opened in binary mode for files, in text mode
for other fields
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
if self._binary_file:
return tempfile.TemporaryFile("wb+")
else:
return tempfile.TemporaryFile("w+",
encoding=self.encoding, newline = '\n')
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print("Content-type: text/html")
print()
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec("testing print_exception() -- <I>italics?</I>")
def g(f=f):
f()
print("<H3>What follows is a test, not an actual exception:</H3>")
g()
except:
print_exception()
print("<H1>Second try with a small maxlen...</H1>")
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print()
print("<H3>Traceback (most recent call last):</H3>")
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print("<PRE>%s<B>%s</B></PRE>" % (
html.escape("".join(list[:-1])),
html.escape(list[-1]),
))
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = sorted(environ.keys())
print()
print("<H3>Shell Environment:</H3>")
print("<DL>")
for key in keys:
print("<DT>", html.escape(key), "<DD>", html.escape(environ[key]))
print("</DL>")
print()
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = sorted(form.keys())
print()
print("<H3>Form Contents:</H3>")
if not keys:
print("<P>No form fields.")
print("<DL>")
for key in keys:
print("<DT>" + html.escape(key) + ":", end=' ')
value = form[key]
print("<i>" + html.escape(repr(type(value))) + "</i>")
print("<DD>" + html.escape(repr(value)))
print("</DL>")
print()
def print_directory():
"""Dump the current directory as HTML."""
print()
print("<H3>Current Working Directory:</H3>")
try:
pwd = os.getcwd()
except OSError as msg:
print("OSError:", html.escape(str(msg)))
else:
print(html.escape(pwd))
print()
def print_arguments():
print()
print("<H3>Command Line Arguments:</H3>")
print()
print(sys.argv)
print()
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print("""
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
""")
# Utilities
# =========
def escape(s, quote=None):
"""Deprecated API."""
warn("cgi.escape is deprecated, use html.escape instead",
DeprecationWarning, stacklevel=2)
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s):
import re
if isinstance(s, bytes):
_vb_pattern = b"^[ -~]{0,200}[!-~]$"
else:
_vb_pattern = "^[ -~]{0,200}[!-~]$"
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| 37,068 | 1,100 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/uuid.py | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1() # doctest: +SKIP
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4() # doctest: +SKIP
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
import os
__author__ = 'Ka-Ping Yee <[email protected]>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
int_ = int # The built-in int type
bytes_ = bytes # The built-in bytes type
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('one of the hex, bytes, bytes_le, fields, '
'or int arguments must be given')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = int_(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] +
bytes_le[8-1:6-1:-1] + bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
assert isinstance(bytes, bytes_), repr(bytes)
int = int_.from_bytes(bytes, byteorder='big')
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low
int = ((time_low << 96) | (time_mid << 80) |
(time_hi_version << 64) | (clock_seq << 48) | node)
if int is not None:
if not 0 <= int < 1<<128:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48)
int |= 0x8000 << 48
# Set the version number.
int &= ~(0xf000 << 64)
int |= version << 76
self.__dict__['int'] = int
def __eq__(self, other):
if isinstance(other, UUID):
return self.int == other.int
return NotImplemented
# Q. What's the value of being able to sort UUIDs?
# A. Use them as keys in a B-Tree or similar mapping.
def __lt__(self, other):
if isinstance(other, UUID):
return self.int < other.int
return NotImplemented
def __gt__(self, other):
if isinstance(other, UUID):
return self.int > other.int
return NotImplemented
def __le__(self, other):
if isinstance(other, UUID):
return self.int <= other.int
return NotImplemented
def __ge__(self, other):
if isinstance(other, UUID):
return self.int >= other.int
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, str(self))
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
@property
def bytes(self):
return self.int.to_bytes(16, 'big')
@property
def bytes_le(self):
bytes = self.bytes
return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] +
bytes[8:])
@property
def fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
@property
def time_low(self):
return self.int >> 96
@property
def time_mid(self):
return (self.int >> 80) & 0xffff
@property
def time_hi_version(self):
return (self.int >> 64) & 0xffff
@property
def clock_seq_hi_variant(self):
return (self.int >> 56) & 0xff
@property
def clock_seq_low(self):
return (self.int >> 48) & 0xff
@property
def time(self):
return (((self.time_hi_version & 0x0fff) << 48) |
(self.time_mid << 32) | self.time_low)
@property
def clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3f) << 8) |
self.clock_seq_low)
@property
def node(self):
return self.int & 0xffffffffffff
@property
def hex(self):
return '%032x' % self.int
@property
def urn(self):
return 'urn:uuid:' + str(self)
@property
def variant(self):
if not self.int & (0x8000 << 48):
return RESERVED_NCS
elif not self.int & (0x4000 << 48):
return RFC_4122
elif not self.int & (0x2000 << 48):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
@property
def version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76) & 0xf)
def _popen(command, *args):
import os, shutil, subprocess
executable = shutil.which(command)
if executable is None:
path = os.pathsep.join(('/sbin', '/usr/sbin'))
executable = shutil.which(command, path=path)
if executable is None:
return None
# LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output
# on stderr (Note: we don't have an example where the words we search
# for are actually localized, but in theory some system could do so.)
env = dict(os.environ)
env['LC_ALL'] = 'C'
proc = subprocess.Popen((executable,) + args,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
return proc
def _find_mac(command, args, hw_identifiers, get_index):
try:
proc = _popen(command, *args.split())
if not proc:
return
with proc:
for line in proc.stdout:
words = line.lower().rstrip().split()
for i in range(len(words)):
if words[i] in hw_identifiers:
try:
word = words[get_index(i)]
mac = int(word.replace(b':', b''), 16)
if mac:
return mac
except (ValueError, IndexError):
# Virtual interfaces, such as those provided by
# VPNs, do not have a colon-delimited MAC address
# as expected, but a 16-byte HWAddr separated by
# dashes. These should be ignored in favor of a
# real MAC address
pass
except OSError:
pass
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
keywords = (b'hwaddr', b'ether', b'address:', b'lladdr')
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, keywords, lambda i: i+1)
if mac:
return mac
def _ip_getnode():
"""Get the hardware address on Unix by running ip."""
# This works on Linux with iproute2.
mac = _find_mac('ip', 'link', [b'link/ether'], lambda i: i+1)
if mac:
return mac
def _arp_getnode():
"""Get the hardware address on Unix by running arp."""
import os, socket
try:
ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
return None
# Try getting the MAC addr from arp based on our IP address (Solaris).
mac = _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1)
if mac:
return mac
# This works on OpenBSD
mac = _find_mac('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1)
if mac:
return mac
# This works on Linux, FreeBSD and NetBSD
mac = _find_mac('arp', '-an', [os.fsencode('(%s)' % ip_addr)],
lambda i: i+2)
if mac:
return mac
def _lanscan_getnode():
"""Get the hardware address on Unix by running lanscan."""
# This might work on HP-UX.
return _find_mac('lanscan', '-ai', [b'lan0'], lambda i: 0)
def _netstat_getnode():
"""Get the hardware address on Unix by running netstat."""
# This might work on AIX, Tru64 UNIX and presumably on IRIX.
try:
proc = _popen('netstat', '-ia')
if not proc:
return
with proc:
words = proc.stdout.readline().rstrip().split()
try:
i = words.index(b'Address')
except ValueError:
return
for line in proc.stdout:
try:
words = line.rstrip().split()
word = words[i]
if len(word) == 17 and word.count(b':') == 5:
mac = int(word.replace(b':', b''), 16)
if mac:
return mac
except (ValueError, IndexError):
pass
except OSError:
pass
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re, subprocess
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
proc = subprocess.Popen([os.path.join(dir, 'ipconfig'), '/all'],
stdout=subprocess.PIPE,
encoding="oem")
except OSError:
continue
with proc:
for line in proc.stdout:
value = line.split(':')[-1].strip().lower()
if re.fullmatch('(?:[0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
# import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = status.adapter_address[:6]
if len(bytes) != 6:
continue
return int.from_bytes(bytes, 'big')
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
# XXX This makes the module non-thread-safe!
_uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
import sys
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
_libnames = ['uuid']
if not sys.platform.startswith('win'):
_libnames.append('c')
for libname in _libnames:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except Exception:
continue
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
break
del _libnames
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
# in the parent process and all children created using fork (unless
# those children use exec as well).
#
# Assume that the uuid_generate functions are broken from 10.5 onward,
# the test can be adjusted when a later version is fixed.
if sys.platform == 'darwin':
if int(os.uname().release.split('.')[0]) >= 9:
_uuid_generate_time = None
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
# NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
# 6 bytes returned by UuidCreateSequential are fixed, they don't appear
# to bear any relationship to the MAC address of any network device
# on the box.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=bytes_(_buffer.raw)).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
_buffer = ctypes.create_string_buffer(16)
if _UuidCreate(_buffer) == 0:
return UUID(bytes=bytes_(_buffer.raw)).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.getrandbits(48) | 0x010000000000
_node = None
_NODE_GETTERS_WIN32 = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
_NODE_GETTERS_UNIX = [_unixdll_getnode, _ifconfig_getnode, _ip_getnode,
_arp_getnode, _lanscan_getnode, _netstat_getnode]
def getnode():
"""Get the hardware address as a 48-bit positive integer.
The first time this runs, it may launch a separate program, which could
be quite slow. If all attempts to obtain the hardware address fail, we
choose a random 48-bit number with its eighth bit set to 1 as recommended
in RFC 4122.
"""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = _NODE_GETTERS_WIN32
else:
getters = _NODE_GETTERS_UNIX
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if (_node is not None) and (0 <= _node < (1 << 48)):
return _node
assert False, '_random_getnode() returned invalid value: {}'.format(_node)
_last_timestamp = None
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=bytes_(_buffer.raw))
global _last_timestamp
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000
if _last_timestamp is not None and timestamp <= _last_timestamp:
timestamp = _last_timestamp + 1
_last_timestamp = timestamp
if clock_seq is None:
import random
clock_seq = random.getrandbits(14) # instead of stable storage
time_low = timestamp & 0xffffffff
time_mid = (timestamp >> 32) & 0xffff
time_hi_version = (timestamp >> 48) & 0x0fff
clock_seq_low = clock_seq & 0xff
clock_seq_hi_variant = (clock_seq >> 8) & 0x3f
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
from hashlib import md5
hash = md5(namespace.bytes + bytes(name, "utf-8")).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
return UUID(bytes=os.urandom(16), version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + bytes(name, "utf-8")).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| 23,973 | 637 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/fractions.py | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Fraction, infinite-precision, real numbers."""
from decimal import Decimal
import math
import numbers
import operator
import re
import sys
__all__ = ['Fraction', 'gcd']
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
import warnings
warnings.warn('fractions.gcd() is deprecated. Use math.gcd() instead.',
DeprecationWarning, 2)
if type(a) is int is type(b):
if (b or a) < 0:
return -math.gcd(a, b)
return math.gcd(a, b)
return _gcd(a, b)
def _gcd(a, b):
# Supports non-integers for backward compatibility.
while b:
a, b = b, a%b
return a
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
# Value to be used for rationals that reduce to infinity modulo
# _PyHASH_MODULUS.
_PyHASH_INF = sys.hash_info.inf
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(numbers.Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None, *, _normalize=True):
"""Constructs a Rational.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if type(numerator) is int:
self._numerator = numerator
self._denominator = 1
return self
elif isinstance(numerator, numbers.Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, (float, Decimal)):
# Exact conversion
self._numerator, self._denominator = numerator.as_integer_ratio()
return self
elif isinstance(numerator, str):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif type(numerator) is int is type(denominator):
pass # *very* normal case
elif (isinstance(numerator, numbers.Rational) and
isinstance(denominator, numbers.Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
if _normalize:
if type(numerator) is int is type(denominator):
# *very* normal case
g = math.gcd(numerator, denominator)
if denominator < 0:
g = -g
else:
g = _gcd(numerator, denominator)
numerator //= g
denominator //= g
self._numerator = numerator
self._denominator = denominator
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
return cls(*dec.as_integer_ratio())
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return '%s(%s, %s)' % (self.__class__.__name__,
self._numerator, self._denominator)
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, numbers.Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
da, db = a.denominator, b.denominator
return Fraction(a.numerator * db + b.numerator * da,
da * db)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
da, db = a.denominator, b.denominator
return Fraction(a.numerator * db - b.numerator * da,
da * db)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
def __floordiv__(a, b):
"""a // b"""
return math.floor(a / b)
def __rfloordiv__(b, a):
"""a // b"""
return math.floor(a / b)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power,
_normalize=False)
elif a._numerator >= 0:
return Fraction(a._denominator ** -power,
a._numerator ** -power,
_normalize=False)
else:
return Fraction((-a._denominator) ** -power,
(-a._numerator) ** -power,
_normalize=False)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, numbers.Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator, _normalize=False)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator, _normalize=False)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator, _normalize=False)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __floor__(a):
"""Will be math.floor(a) in 3.0."""
return a.numerator // a.denominator
def __ceil__(a):
"""Will be math.ceil(a) in 3.0."""
# The negations cleverly convince floordiv to return the ceiling.
return -(-a.numerator // a.denominator)
def __round__(self, ndigits=None):
"""Will be round(self, ndigits) in 3.0.
Rounds half toward even.
"""
if ndigits is None:
floor, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return floor
elif remainder * 2 > self.denominator:
return floor + 1
# Deal with the half case:
elif floor % 2 == 0:
return floor
else:
return floor + 1
shift = 10**abs(ndigits)
# See _operator_fallbacks.forward to check that the results of
# these operations will always be Fraction and therefore have
# round().
if ndigits > 0:
return Fraction(round(self * shift), shift)
else:
return Fraction(round(self / shift) * shift)
def __hash__(self):
"""hash(self)"""
# XXX since this method is expensive, consider caching the result
# In order to make sure that the hash of a Fraction agrees
# with the hash of a numerically equal integer, float or
# Decimal instance, we follow the rules for numeric hashes
# outlined in the documentation. (See library docs, 'Built-in
# Types').
# dinv is the inverse of self._denominator modulo the prime
# _PyHASH_MODULUS, or 0 if self._denominator is divisible by
# _PyHASH_MODULUS.
dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
"""a == b"""
if type(b) is int:
return a._numerator == b and a._denominator == 1
if isinstance(b, numbers.Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, numbers.Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __bool__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| 23,639 | 644 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/nturl2path.py | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# and
# ///C:/foo/bar/spam.foo
# become
# C:\foo\bar\spam.foo
import string, urllib.parse
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.parse.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise OSError(error)
drive = comp[0][-1].upper()
components = comp[1].split('/')
path = drive + ':'
for comp in components:
if comp:
path = path + '\\' + urllib.parse.unquote(comp)
# Issue #11474 - handing url such as |c/|
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C:/foo/bar/spam.foo
import urllib.parse
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.parse.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise OSError(error)
drive = urllib.parse.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.parse.quote(comp)
return path
| 2,444 | 69 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/tty.py | """Terminal utilities."""
# Author: Steen Lumholt.
from termios import *
__all__ = ["setraw", "setcbreak"]
# Indexes for termios list.
IFLAG = 0
OFLAG = 1
CFLAG = 2
LFLAG = 3
ISPEED = 4
OSPEED = 5
CC = 6
def setraw(fd, when=TCSAFLUSH):
"""Put terminal into a raw mode."""
mode = tcgetattr(fd)
mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON)
mode[OFLAG] = mode[OFLAG] & ~(OPOST)
mode[CFLAG] = mode[CFLAG] & ~(CSIZE | PARENB)
mode[CFLAG] = mode[CFLAG] | CS8
mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON | IEXTEN | ISIG)
mode[CC][VMIN] = 1
mode[CC][VTIME] = 0
tcsetattr(fd, when, mode)
def setcbreak(fd, when=TCSAFLUSH):
"""Put terminal into a cbreak mode."""
mode = tcgetattr(fd)
mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON)
mode[CC][VMIN] = 1
mode[CC][VTIME] = 0
tcsetattr(fd, when, mode)
| 879 | 37 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/statistics.py | """
Basic statistics module.
This module provides functions for calculating statistics of data, including
averages, variance, and standard deviation.
Calculating averages
--------------------
================== =============================================
Function Description
================== =============================================
mean Arithmetic mean (average) of data.
harmonic_mean Harmonic mean of data.
median Median (middle value) of data.
median_low Low median of data.
median_high High median of data.
median_grouped Median, or 50th percentile, of grouped data.
mode Mode (most common value) of data.
================== =============================================
Calculate the arithmetic mean ("the average") of data:
>>> mean([-1.0, 2.5, 3.25, 5.75])
2.625
Calculate the standard median of discrete data:
>>> median([2, 3, 4, 5])
3.5
Calculate the median, or 50th percentile, of data grouped into class intervals
centred on the data values provided. E.g. if your data points are rounded to
the nearest whole number:
>>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS
2.8333333333...
This should be interpreted in this way: you have two data points in the class
interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in
the class interval 3.5-4.5. The median of these data points is 2.8333...
Calculating variability or spread
---------------------------------
================== =============================================
Function Description
================== =============================================
pvariance Population variance of data.
variance Sample variance of data.
pstdev Population standard deviation of data.
stdev Sample standard deviation of data.
================== =============================================
Calculate the standard deviation of sample data:
>>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS
4.38961843444...
If you have previously calculated the mean, you can pass it as the optional
second argument to the four "spread" functions to avoid recalculating it:
>>> data = [1, 2, 2, 4, 4, 4, 5, 6]
>>> mu = mean(data)
>>> pvariance(data, mu)
2.5
Exceptions
----------
A single exception is defined: StatisticsError is a subclass of ValueError.
"""
__all__ = [ 'StatisticsError',
'pstdev', 'pvariance', 'stdev', 'variance',
'median', 'median_low', 'median_high', 'median_grouped',
'mean', 'mode', 'harmonic_mean',
]
import collections
import decimal
import math
import numbers
from fractions import Fraction
from decimal import Decimal
from itertools import groupby, chain
from bisect import bisect_left, bisect_right
# === Exceptions ===
class StatisticsError(ValueError):
pass
# === Private utilities ===
def _sum(data, start=0):
"""_sum(data [, start]) -> (type, sum, count)
Return a high-precision sum of the given numeric data as a fraction,
together with the type to be converted to and the count of items.
If optional argument ``start`` is given, it is added to the total.
If ``data`` is empty, ``start`` (defaulting to 0) is returned.
Examples
--------
>>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
(<class 'float'>, Fraction(11, 1), 5)
Some sources of round-off error will be avoided:
# Built-in sum returns zero.
>>> _sum([1e50, 1, -1e50] * 1000)
(<class 'float'>, Fraction(1000, 1), 3000)
Fractions and Decimals are also supported:
>>> from fractions import Fraction as F
>>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)])
(<class 'fractions.Fraction'>, Fraction(63, 20), 4)
>>> from decimal import Decimal as D
>>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")]
>>> _sum(data)
(<class 'decimal.Decimal'>, Fraction(6963, 10000), 4)
Mixed types are currently treated as an error, except that int is
allowed.
"""
count = 0
n, d = _exact_ratio(start)
partials = {d: n}
partials_get = partials.get
T = _coerce(int, type(start))
for typ, values in groupby(data, type):
T = _coerce(T, typ) # or raise TypeError
for n,d in map(_exact_ratio, values):
count += 1
partials[d] = partials_get(d, 0) + n
if None in partials:
# The sum will be a NAN or INF. We can ignore all the finite
# partials, and just look at this special one.
total = partials[None]
assert not _isfinite(total)
else:
# Sum all the partial sums using builtin sum.
# FIXME is this faster if we sum them in order of the denominator?
total = sum(Fraction(n, d) for d, n in sorted(partials.items()))
return (T, total, count)
def _isfinite(x):
try:
return x.is_finite() # Likely a Decimal.
except AttributeError:
return math.isfinite(x) # Coerces to float first.
def _coerce(T, S):
"""Coerce types T and S to a common type, or raise TypeError.
Coercion rules are currently an implementation detail. See the CoerceTest
test class in test_statistics for details.
"""
# See http://bugs.python.org/issue24068.
assert T is not bool, "initial type T is bool"
# If the types are the same, no need to coerce anything. Put this
# first, so that the usual case (no coercion needed) happens as soon
# as possible.
if T is S: return T
# Mixed int & other coerce to the other type.
if S is int or S is bool: return T
if T is int: return S
# If one is a (strict) subclass of the other, coerce to the subclass.
if issubclass(S, T): return S
if issubclass(T, S): return T
# Ints coerce to the other type.
if issubclass(T, int): return S
if issubclass(S, int): return T
# Mixed fraction & float coerces to float (or float subclass).
if issubclass(T, Fraction) and issubclass(S, float):
return S
if issubclass(T, float) and issubclass(S, Fraction):
return T
# Any other combination is disallowed.
msg = "don't know how to coerce %s and %s"
raise TypeError(msg % (T.__name__, S.__name__))
def _exact_ratio(x):
"""Return Real number x to exact (numerator, denominator) pair.
>>> _exact_ratio(0.25)
(1, 4)
x is expected to be an int, Fraction, Decimal or float.
"""
try:
# Optimise the common case of floats. We expect that the most often
# used numeric type will be builtin floats, so try to make this as
# fast as possible.
if type(x) is float or type(x) is Decimal:
return x.as_integer_ratio()
try:
# x may be an int, Fraction, or Integral ABC.
return (x.numerator, x.denominator)
except AttributeError:
try:
# x may be a float or Decimal subclass.
return x.as_integer_ratio()
except AttributeError:
# Just give up?
pass
except (OverflowError, ValueError):
# float NAN or INF.
assert not _isfinite(x)
return (x, None)
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
def _convert(value, T):
"""Convert value to given numeric type T."""
if type(value) is T:
# This covers the cases where T is Fraction, or where value is
# a NAN or INF (Decimal or float).
return value
if issubclass(T, int) and value.denominator != 1:
T = float
try:
# FIXME: what do we do if this overflows?
return T(value)
except TypeError:
if issubclass(T, Decimal):
return T(value.numerator)/T(value.denominator)
else:
raise
def _counts(data):
# Generate a table of sorted (value, frequency) pairs.
table = collections.Counter(iter(data)).most_common()
if not table:
return table
# Extract the values with the highest frequency.
maxfreq = table[0][1]
for i in range(1, len(table)):
if table[i][1] != maxfreq:
table = table[:i]
break
return table
def _find_lteq(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
def _find_rteq(a, l, x):
'Locate the rightmost value exactly equal to x'
i = bisect_right(a, x, lo=l)
if i != (len(a)+1) and a[i-1] == x:
return i-1
raise ValueError
def _fail_neg(values, errmsg='negative value'):
"""Iterate over values, failing if any are less than zero."""
for x in values:
if x < 0:
raise StatisticsError(errmsg)
yield x
# === Measures of central tendency (averages) ===
def mean(data):
"""Return the sample arithmetic mean of data.
>>> mean([1, 2, 3, 4, 4])
2.8
>>> from fractions import Fraction as F
>>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)])
Fraction(13, 21)
>>> from decimal import Decimal as D
>>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")])
Decimal('0.5625')
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
T, total, count = _sum(data)
assert count == n
return _convert(total/n, T)
def harmonic_mean(data):
"""Return the harmonic mean of data.
The harmonic mean, sometimes called the subcontrary mean, is the
reciprocal of the arithmetic mean of the reciprocals of the data,
and is often appropriate when averaging quantities which are rates
or ratios, for example speeds. Example:
Suppose an investor purchases an equal value of shares in each of
three companies, with P/E (price/earning) ratios of 2.5, 3 and 10.
What is the average P/E ratio for the investor's portfolio?
>>> harmonic_mean([2.5, 3, 10]) # For an equal investment portfolio.
3.6
Using the arithmetic mean would give an average of about 5.167, which
is too high.
If ``data`` is empty, or any element is less than zero,
``harmonic_mean`` will raise ``StatisticsError``.
"""
# For a justification for using harmonic mean for P/E ratios, see
# http://fixthepitch.pellucid.com/comps-analysis-the-missing-harmony-of-summary-statistics/
# http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621087
if iter(data) is data:
data = list(data)
errmsg = 'harmonic mean does not support negative values'
n = len(data)
if n < 1:
raise StatisticsError('harmonic_mean requires at least one data point')
elif n == 1:
x = data[0]
if isinstance(x, (numbers.Real, Decimal)):
if x < 0:
raise StatisticsError(errmsg)
return x
else:
raise TypeError('unsupported type')
try:
T, total, count = _sum(1/x for x in _fail_neg(data, errmsg))
except ZeroDivisionError:
return 0
assert count == n
return _convert(n/total, T)
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n%2 == 1:
return data[n//2]
else:
i = n//2
return (data[i - 1] + data[i])/2
def median_low(data):
"""Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
>>> median_low([1, 3, 5])
3
>>> median_low([1, 3, 5, 7])
3
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n%2 == 1:
return data[n//2]
else:
return data[n//2 - 1]
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
>>> median_high([1, 3, 5])
3
>>> median_high([1, 3, 5, 7])
5
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n//2]
def median_grouped(data, interval=1):
"""Return the 50th percentile (median) of grouped continuous data.
>>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5])
3.7
>>> median_grouped([52, 52, 53, 54])
52.5
This calculates the median as the 50th percentile, and should be
used when your data is continuous and grouped. In the above example,
the values 1, 2, 3, etc. actually represent the midpoint of classes
0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in
class 3.5-4.5, and interpolation is used to estimate it.
Optional argument ``interval`` represents the class interval, and
defaults to 1. Changing the class interval naturally will change the
interpolated 50th percentile value:
>>> median_grouped([1, 3, 3, 5, 7], interval=1)
3.25
>>> median_grouped([1, 3, 3, 5, 7], interval=2)
3.5
This function does not check whether the data points are at least
``interval`` apart.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
elif n == 1:
return data[0]
# Find the value at the midpoint. Remember this corresponds to the
# centre of the class interval.
x = data[n//2]
for obj in (x, interval):
if isinstance(obj, (str, bytes)):
raise TypeError('expected number but got %r' % obj)
try:
L = x - interval/2 # The lower limit of the median interval.
except TypeError:
# Mixed type. For now we just coerce to float.
L = float(x) - float(interval)/2
# Uses bisection search to search for x in data with log(n) time complexity
# Find the position of leftmost occurrence of x in data
l1 = _find_lteq(data, x)
# Find the position of rightmost occurrence of x in data[l1...len(data)]
# Assuming always l1 <= l2
l2 = _find_rteq(data, l1, x)
cf = l1
f = l2 - l1 + 1
return L + interval*(n/2 - cf)/f
def mode(data):
"""Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
>>> mode([1, 1, 2, 3, 3, 3, 3, 4])
3
This also works with nominal (non-numeric) data:
>>> mode(["red", "blue", "blue", "red", "green", "red", "red"])
'red'
If there is not exactly one most common value, ``mode`` will raise
StatisticsError.
"""
# Generate a table of sorted (value, frequency) pairs.
table = _counts(data)
if len(table) == 1:
return table[0][0]
elif table:
raise StatisticsError(
'no unique mode; found %d equally common values' % len(table)
)
else:
raise StatisticsError('no mode for empty data')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
"""Return sum of square deviations of sequence data.
If ``c`` is None, the mean is calculated in one pass, and the deviations
from the mean are calculated in a second pass. Otherwise, deviations are
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
if c is None:
c = mean(data)
T, total, count = _sum((x-c)**2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
U, total2, count2 = _sum((x-c) for x in data)
assert T == U and count == count2
total -= total2**2/len(data)
assert not total < 0, 'negative sum of square deviations: %f' % total
return (T, total)
def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
Examples:
>>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
>>> variance(data)
1.3720238095238095
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
>>> m = mean(data)
>>> variance(data, m)
1.3720238095238095
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('31.01875')
>>> from fractions import Fraction as F
>>> variance([F(1, 6), F(1, 2), F(5, 3)])
Fraction(67, 108)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
T, ss = _ss(data, xbar)
return _convert(ss/(n-1), T)
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
Examples:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> pvariance(data)
1.25
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
>>> mu = mean(data)
>>> pvariance(data, mu)
1.25
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('24.815')
>>> from fractions import Fraction as F
>>> pvariance([F(1, 4), F(5, 4), F(1, 2)])
Fraction(13, 72)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
T, ss = _ss(data, mu)
return _convert(ss/n, T)
def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
1.0810874155219827
"""
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
0.986893273527251
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
| 20,673 | 671 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/argparse.py | # Author: Steven J. Bethard <[email protected]>.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'MetavarTypeHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import collections as _collections
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _, ngettext
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
star_args = {}
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
if name.isidentifier():
arg_strings.append('%s=%r' % (name, value))
else:
star_args[name] = value
if star_args:
arg_strings.append('**%s' % repr(star_args))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._max_help_position = min(max_help_position,
max(width - 20, indent_increment * 2))
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII)
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = (
r'\(.*?\)+(?=\s|$)|'
r'\[.*?\]+(?=\s|$)|'
r'\S+'
)
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width and line:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
default = self._get_default_metavar_for_positional(action)
part = self._format_args(action, default)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = max(self._width - self._current_indent, 11)
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = max(self._width - help_position, 11)
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# no help; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
metavar, = self._metavar_formatter(action, default)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
yield from get_subactions()
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
def _get_default_metavar_for_optional(self, action):
return action.dest.upper()
def _get_default_metavar_for_positional(self, action):
return action.dest
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join(indent + line for line in text.splitlines(keepends=True))
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
class MetavarTypeHelpFormatter(HelpFormatter):
"""Help message formatter which uses the argument 'type' as the default
metavar value (instead of the argument 'dest')
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_default_metavar_for_optional(self, action):
return action.type.__name__
def _get_default_metavar_for_positional(self, action):
return action.type.__name__
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- A callable that accepts a single string argument, and
returns the converted value. The standard Python types str, int,
float, and complex are useful examples of such callables. If None,
str is used.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser._print_message(formatter.format_help(), _sys.stdout)
parser.exit()
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help,
metavar=metavar)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = _collections.OrderedDict()
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
aliases = kwargs.pop('aliases', ())
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
# make parser available under aliases also
for alias in aliases:
self._name_parser_map[alias] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
args = {'parser_name': parser_name,
'choices': ', '.join(self._name_parser_map)}
msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
# In case this subparser defines new defaults, we parse them
# in a new namespace object and then update the original
# namespace for the relevant parts.
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
for key, value in vars(subnamespace).items():
setattr(namespace, key, value)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
- encoding -- The file's encoding. Accepts the same values as the
builtin open() function.
- errors -- A string indicating how encoding and decoding errors are to
be handled. Accepts the same value as the builtin open() function.
"""
def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None):
self._mode = mode
self._bufsize = bufsize
self._encoding = encoding
self._errors = errors
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r') % self._mode
raise ValueError(msg)
# all other arguments are used as file names
try:
return open(string, self._mode, self._bufsize, self._encoding,
self._errors)
except OSError as e:
message = _("can't open '%s': %s")
raise ArgumentTypeError(message % (string, e))
def __repr__(self):
args = self._mode, self._bufsize
kwargs = [('encoding', self._encoding), ('errors', self._errors)]
args_str = ', '.join([repr(arg) for arg in args if arg != -1] +
['%s=%r' % (kw, arg) for kw, arg in kwargs
if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
if not isinstance(other, Namespace):
return NotImplemented
return vars(self) == vars(other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not callable(action_class):
raise ValueError('unknown action "%s"' % (action_class,))
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
raise ValueError('%r is not callable' % (type_func,))
# raise an error if the metavar does not match the type
if hasattr(self, "_get_formatter"):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError("length of metavar tuple does not match nargs")
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
args = {'option': option_string,
'prefix_chars': self.prefix_chars}
msg = _('invalid option string %(option)r: '
'must start with a character %(prefix_chars)r')
raise ValueError(msg % args)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = ngettext('conflicting option string: %s',
'conflicting option strings: %s',
len(conflicting_actions))
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
self._mutually_exclusive_groups = container._mutually_exclusive_groups
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
- allow_abbrev -- Allow long options to be abbreviated unambiguously
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
allow_abbrev=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
self.allow_abbrev = allow_abbrev
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help argument if necessary
# (using explicit default to override global argument_default)
default_prefix = '-' if '-' in prefix_chars else prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
if args is None:
# args default to the system args
args = _sys.argv[1:]
else:
# make sure that args are mutable
args = list(args)
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
setattr(namespace, action.dest, action.default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# make sure all required actions were present and also convert
# action defaults which were not given as arguments
required_actions = []
for action in self._actions:
if action not in seen_actions:
if action.required:
required_actions.append(_get_action_name(action))
else:
# Convert action default now instead of doing it before
# parsing arguments to avoid calling convert functions
# twice (which may fail) if the argument was given, but
# only if it was defined already in the namespace
if (action.default is not None and
isinstance(action.default, str) and
hasattr(namespace, action.dest) and
action.default is getattr(namespace, action.dest)):
setattr(namespace, action.dest,
self._get_value(action, action.default))
if required_actions:
self.error(_('the following arguments are required: %s') %
', '.join(required_actions))
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
with open(arg_string[1:]) as args_file:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
except OSError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = ngettext('expected %s argument',
'expected %s arguments',
action.nargs) % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
if self.allow_abbrev:
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
args = {'option': arg_string, 'matches': options}
msg = _('ambiguous option: %(option)s could match %(matches)s')
self.error(msg % args)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER, REMAINDER args, strip out first '--'
if action.nargs not in [PARSER, REMAINDER]:
try:
arg_strings.remove('--')
except ValueError:
pass
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, str):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
args = {'type': name, 'value': arg_string}
msg = _('invalid %(type)s value: %(value)r')
raise ArgumentError(action, msg % args)
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
args = {'value': value,
'choices': ', '.join(map(repr, action.choices))}
msg = _('invalid choice: %(value)r (choose from %(choices)s)')
raise ArgumentError(action, msg % args)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
| 90,372 | 2,394 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pickletools.py | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import io
import pickle
import re
import sys
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
# "A pickle" is a program for a virtual pickle machine (PM, but more accurately
# called an unpickling machine). It's a sequence of opcodes, interpreted by the
# PM, building an arbitrarily complex Python object.
#
# For the most part, the PM is very simple: there are no looping, testing, or
# conditional instructions, no arithmetic and no function calls. Opcodes are
# executed once each, from first to last, until a STOP opcode is reached.
#
# The PM has two data areas, "the stack" and "the memo".
#
# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
# integer object on the stack, whose value is gotten from a decimal string
# literal immediately following the INT opcode in the pickle bytestream. Other
# opcodes take Python objects off the stack. The result of unpickling is
# whatever object is left on the stack when the final STOP opcode is executed.
#
# The memo is simply an array of objects, or it can be implemented as a dict
# mapping little integers to objects. The memo serves as the PM's "long term
# memory", and the little integers indexing the memo are akin to variable
# names. Some opcodes pop a stack object into the memo at a given index,
# and others push a memo object at a given index onto the stack again.
#
# At heart, that's all the PM has. Subtleties arise for these reasons:
#
# + Object identity. Objects can be arbitrarily complex, and subobjects
# may be shared (for example, the list [a, a] refers to the same object a
# twice). It can be vital that unpickling recreate an isomorphic object
# graph, faithfully reproducing sharing.
#
# + Recursive objects. For example, after "L = []; L.append(L)", L is a
# list, and L[0] is the same list. This is related to the object identity
# point, and some sequences of pickle opcodes are subtle in order to
# get the right result in all cases.
#
# + Things pickle doesn't know everything about. Examples of things pickle
# does know everything about are Python's builtin scalar and container
# types, like ints and tuples. They generally have opcodes dedicated to
# them. For things like module references and instances of user-defined
# classes, pickle's knowledge is limited. Historically, many enhancements
# have been made to the pickle protocol in order to do a better (faster,
# and/or more compact) job on those.
#
# + Backward compatibility and micro-optimization. As explained below,
# pickle opcodes never go away, not even when better ways to do a thing
# get invented. The repertoire of the PM just keeps growing over time.
# For example, protocol 0 had two opcodes for building Python integers (INT
# and LONG), protocol 1 added three more for more-efficient pickling of short
# integers, and protocol 2 added two more for more-efficient pickling of
# long integers (before protocol 2, the only ways to pickle a Python long
# took time quadratic in the number of digits, for both pickling and
# unpickling). "Opcode bloat" isn't so much a subtlety as a source of
# wearying complication.
#
#
# Pickle protocols:
#
# For compatibility, the meaning of a pickle opcode never changes. Instead new
# pickle opcodes get added, and each version's unpickler can handle all the
# pickle opcodes in all protocol versions to date. So old pickles continue to
# be readable forever. The pickler can generally be told to restrict itself to
# the subset of opcodes available under previous protocol versions too, so that
# users can create pickles under the current version readable by older
# versions. However, a pickle does not contain its version number embedded
# within it. If an older unpickler tries to read a pickle using a later
# protocol, the result is most likely an exception due to seeing an unknown (in
# the older unpickler) opcode.
#
# The original pickle used what's now called "protocol 0", and what was called
# "text mode" before Python 2.3. The entire pickle bytestream is made up of
# printable 7-bit ASCII characters, plus the newline character, in protocol 0.
# That's why it was called text mode. Protocol 0 is small and elegant, but
# sometimes painfully inefficient.
#
# The second major set of additions is now called "protocol 1", and was called
# "binary mode" before Python 2.3. This added many opcodes with arguments
# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
# bytes. Binary mode pickles can be substantially smaller than equivalent
# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
# int as 4 bytes following the opcode, which is cheaper to unpickle than the
# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added
# a number of opcodes that operate on many stack elements at once (like APPENDS
# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
#
# The third major set of additions came in Python 2.3, and is called "protocol
# 2". This added:
#
# - A better way to pickle instances of new-style classes (NEWOBJ).
#
# - A way for a pickle to identify its protocol (PROTO).
#
# - Time- and space- efficient pickling of long ints (LONG{1,4}).
#
# - Shortcuts for small tuples (TUPLE{1,2,3}}.
#
# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
#
# - The "extension registry", a vector of popular objects that can be pushed
# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
# the registry contents are predefined (there's nothing akin to the memo's
# PUT).
#
# Another independent change with Python 2.3 is the abandonment of any
# pretense that it might be safe to load pickles received from untrusted
# parties -- no sufficient security analysis has been done to guarantee
# this and there isn't a use case that warrants the expense of such an
# analysis.
#
# To this end, all tests for __safe_for_unpickling__ or for
# copyreg.safe_constructors are removed from the unpickling code.
# References to these variables in the descriptions below are to be seen
# as describing unpickling in Python 2.2 and before.
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int
TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4,
TAKEN_FROM_ARGUMENT4U,
TAKEN_FROM_ARGUMENT8U))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_uint4(f):
r"""
>>> import io
>>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<I", data)[0]
raise ValueError("not enough data in stream to read uint4")
uint4 = ArgumentDescriptor(
name='uint4',
n=4,
reader=read_uint4,
doc="Four-byte unsigned integer, little-endian.")
def read_uint8(f):
r"""
>>> import io
>>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00'))
255
>>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1
True
"""
data = f.read(8)
if len(data) == 8:
return _unpack("<Q", data)[0]
raise ValueError("not enough data in stream to read uint8")
uint8 = ArgumentDescriptor(
name='uint8',
n=8,
reader=read_uint8,
doc="Eight-byte unsigned integer, little-endian.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes, and the second argument is that many bytes.
""")
def read_bytes4(f):
r"""
>>> import io
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc"))
b''
>>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
b'abc'
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a bytes4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes4, but only %d remain" %
(n, len(data)))
bytes4 = ArgumentDescriptor(
name="bytes4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_bytes4,
doc="""A counted bytes string.
The first argument is a 4-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_bytes8(f):
r"""
>>> import io, struct, sys
>>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc"))
b''
>>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef"))
b'abc'
>>> bigsize8 = struct.pack("<Q", sys.maxsize//3)
>>> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: expected ... bytes in a bytes8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes8, but only %d remain" %
(n, len(data)))
bytes8 = ArgumentDescriptor(
name="bytes8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_bytes8,
doc="""A counted bytes string.
The first argument is an 8-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring1(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) # little-endian 1-byte length
>>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring1(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring1, but only 6 remain
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring1, but only %d "
"remain" % (n, len(data)))
unicodestring1 = ArgumentDescriptor(
name="unicodestring1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_unicodestring1,
doc="""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring8(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) + b'\0' * 7 # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring8, but only %d "
"remain" % (n, len(data)))
unicodestring8 = ArgumentDescriptor(
name="unicodestring8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_unicodestring8,
doc="""A counted Unicode string.
The first argument is an 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
# There's a hack for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(
name='int',
obtype=int,
doc="A Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer or boolean object.")
pybool = StackObject(
name='bool',
obtype=bool,
doc="A Python boolean object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pybytes_or_str = pystring = StackObject(
name='bytes_or_str',
obtype=(bytes, str),
doc="A Python bytes or (Unicode) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
pyset = StackObject(
name="set",
obtype=set,
doc="A Python set object.")
pyfrozenset = StackObject(
name="frozenset",
obtype=set,
doc="A Python frozenset object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pyint],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pybytes_or_str],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. These are usually decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian
signed int giving the number of bytes in the string, and the
second is that many bytes, which are taken literally as the string
content. These are usually decoded into a str instance using the
encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. These are
usually decoded into a str instance using the encoding given to
the Unpickler constructor. or the default, 'ASCII'. If the
encoding given was 'bytes' however, they will be decoded as bytes
object instead.
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=bytes4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes, and the second is that many bytes, which are
taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=bytes1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes, and the second is that many bytes, which are taken
literally as the string content.
"""),
I(name='BINBYTES8',
code='\x8e',
arg=bytes8,
stack_before=[],
stack_after=[pybytes],
proto=4,
doc="""Push a Python bytes object.
There are two arguments: the first is an 8-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="Push True onto the stack."),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="Push False onto the stack."),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='SHORT_BINUNICODE',
code='\x8c',
arg=unicodestring1,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 1-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE8',
code='\x8d',
arg=unicodestring8,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is an 8-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Ways to build sets
I(name='EMPTY_SET',
code='\x8f',
arg=None,
stack_before=[],
stack_after=[pyset],
proto=4,
doc="Push an empty set."),
I(name='ADDITEMS',
code='\x90',
arg=None,
stack_before=[pyset, markobject, stackslice],
stack_after=[pyset],
proto=4,
doc="""Add an arbitrary number of items to an existing set.
The slice of the stack following the topmost markobject is taken as
a sequence of items, added to the set immediately under the topmost
markobject. Everything at and after the topmost markobject is popped,
leaving the mutated set at the top of the stack.
Stack before: ... pyset markobject item_1 ... item_n
Stack after: ... pyset
where pyset has been modified via pyset.add(item_i) = item_i for i in
1, 2, ..., n, and in that order.
"""),
# Way to build frozensets
I(name='FROZENSET',
code='\x91',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pyfrozenset],
proto=4,
doc="""Build a frozenset out of the topmost slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python frozenset, which single frozenset object replaces all
of the stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3
Stack after: ... frozenset({1, 2, 3})
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=uint4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte unsigned
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=uint4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
unsigned little-endian integer following.
"""),
I(name='MEMOIZE',
code='\x94',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=4,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write is the number of
elements currently present in the memo.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
I(name='STACK_GLOBAL',
code='\x93',
arg=None,
stack_before=[pyunicode, pyunicode],
stack_after=[anyobject],
proto=4,
doc="""Push a global object (module.attr) on the stack.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
NOTE: the distinction between old-style and new-style classes does
not make sense in Python 3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
I(name='NEWOBJ_EX',
code='\x92',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[anyobject],
proto=4,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple and by a keyword argument dict
(the dict being the stack top). Call these cls and args. They are
popped off the stack, and the value returned by
cls.__new__(cls, *args, *kwargs) is pushed back onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Framing support.
I(name='FRAME',
code='\x95',
arg=uint8,
stack_before=[],
stack_after=[],
proto=4,
doc="""Indicate the beginning of a new frame.
The unpickler may use this opcode to safely prefetch data from its
underlying stream.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def _genops(data, yield_end_pos=False):
if isinstance(data, bytes_types):
data = io.BytesIO(data)
if hasattr(data, "tell"):
getpos = data.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = data.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
"<unknown>" if pos is None else pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(data)
if yield_end_pos:
yield opcode, arg, pos, getpos()
else:
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
return _genops(pickle)
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
put = 'PUT'
get = 'GET'
oldids = set() # set of all PUT ids
newids = {} # set of ids used by a GET opcode
opcodes = [] # (op, idx) or (pos, end_pos)
proto = 0
protoheader = b''
for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True):
if 'PUT' in opcode.name:
oldids.add(arg)
opcodes.append((put, arg))
elif opcode.name == 'MEMOIZE':
idx = len(oldids)
oldids.add(idx)
opcodes.append((put, idx))
elif 'FRAME' in opcode.name:
pass
elif 'GET' in opcode.name:
if opcode.proto > proto:
proto = opcode.proto
newids[arg] = None
opcodes.append((get, arg))
elif opcode.name == 'PROTO':
if arg > proto:
proto = arg
if pos == 0:
protoheader = p[pos: end_pos]
else:
opcodes.append((pos, end_pos))
else:
opcodes.append((pos, end_pos))
del oldids
# Copy the opcodes except for PUTS without a corresponding GET
out = io.BytesIO()
# Write the PROTO header before any framing
out.write(protoheader)
pickler = pickle._Pickler(out, proto)
if proto >= 4:
pickler.framer.start_framing()
idx = 0
for op, arg in opcodes:
if op is put:
if arg not in newids:
continue
data = pickler.put(idx)
newids[arg] = idx
idx += 1
elif op is get:
data = pickler.get(newids[arg])
else:
data = p[op:arg]
pickler.framer.commit_frame()
pickler.write(data)
pickler.framer.end_framing()
return out.getvalue()
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # column hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"):
if opcode.name == "MEMOIZE":
memo_idx = len(memo)
markmsg = "(as %d)" % memo_idx
else:
assert arg is not None
memo_idx = arg
if memo_idx in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[memo_idx] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import sys
try:
import doctest
except ImportError:
sys.exit(1)
print(doctest.testmod())
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
| 91,847 | 2,841 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/launchpy.py | import sys
from importlib import _bootstrap_external
def run_module_as_main(mod_name):
path = "/zip/.python/%s.pyc" % (mod_name.replace(".", "/"))
loader = _bootstrap_external.SourcelessFileLoader(mod_name, path)
code = loader.get_code(mod_name)
globs = sys.modules["__main__"].__dict__
globs["__name__"] = "__main__"
globs["__file__"] = path[:-1]
globs["__package__"] = None
globs["__loader__"] = loader
globs["__spec__"] = None
exec(code, globs)
return globs
| 506 | 16 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_markupbase.py | """Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in {"attlist", "linktype", "link", "element"}:
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in {"if", "else", "endif"}:
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in {"attlist", "element", "entity", "notation"}:
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| 14,598 | 396 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/bz2.py | """Interface to the libbzip2 compression library.
This module provides a file interface, classes for incremental
(de)compression, and functions for one-shot (de)compression.
"""
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
"open", "compress", "decompress"]
__author__ = "Nadeem Vawda <[email protected]>"
from builtins import open as _builtin_open
import io
import os
import warnings
import _compression
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
from _bz2 import BZ2Compressor, BZ2Decompressor
_MODE_CLOSED = 0
_MODE_READ = 1
# Value 2 no longer used
_MODE_WRITE = 3
class BZ2File(_compression.BaseStream):
"""A file object providing transparent bzip2 (de)compression.
A BZ2File can act as a wrapper for an existing file object, or refer
directly to a named file on disk.
Note that BZ2File provides a *binary* file interface - data read is
returned as bytes, and data to be written should be given as bytes.
"""
def __init__(self, filename, mode="r", buffering=None, compresslevel=9):
"""Open a bzip2-compressed file.
If filename is a str, bytes, or PathLike object, it gives the
name of the file to be opened. Otherwise, it should be a file
object, which will be used to read or write the compressed data.
mode can be 'r' for reading (default), 'w' for (over)writing,
'x' for creating exclusively, or 'a' for appending. These can
equivalently be given as 'rb', 'wb', 'xb', and 'ab'.
buffering is ignored. Its use is deprecated.
If mode is 'w', 'x' or 'a', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 (default) produces the most compression.
If mode is 'r', the input file may be the concatenation of
multiple compressed streams.
"""
# This lock must be recursive, so that BufferedIOBase's
# writelines() does not deadlock.
self._lock = RLock()
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
if buffering is not None:
warnings.warn("Use of 'buffering' argument is deprecated",
DeprecationWarning)
if not (1 <= compresslevel <= 9):
raise ValueError("compresslevel must be between 1 and 9")
if mode in ("", "r", "rb"):
mode = "rb"
mode_code = _MODE_READ
elif mode in ("w", "wb"):
mode = "wb"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
elif mode in ("x", "xb"):
mode = "xb"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
elif mode in ("a", "ab"):
mode = "ab"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
else:
raise ValueError("Invalid mode: %r" % (mode,))
if isinstance(filename, (str, bytes, os.PathLike)):
self._fp = _builtin_open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str, bytes, file or PathLike object")
if self._mode == _MODE_READ:
raw = _compression.DecompressReader(self._fp,
BZ2Decompressor, trailing_error=OSError)
self._buffer = io.BufferedReader(raw)
else:
self._pos = 0
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
with self._lock:
if self._mode == _MODE_CLOSED:
return
try:
if self._mode == _MODE_READ:
self._buffer.close()
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._buffer = None
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._buffer.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode == _MODE_READ
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
def peek(self, n=0):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
with self._lock:
self._check_can_read()
# Relies on the undocumented fact that BufferedReader.peek()
# always returns at least one byte (except at EOF), independent
# of the value of n
return self._buffer.peek(n)
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b'' if the file is already at EOF.
"""
with self._lock:
self._check_can_read()
return self._buffer.read(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream. Reads up to a
buffer's worth of data if size is negative.
Returns b'' if the file is at EOF.
"""
with self._lock:
self._check_can_read()
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def readinto(self, b):
"""Read bytes into b.
Returns the number of bytes read (0 for EOF).
"""
with self._lock:
self._check_can_read()
return self._buffer.readinto(b)
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
if not isinstance(size, int):
if not hasattr(size, "__index__"):
raise TypeError("Integer argument expected")
size = size.__index__()
with self._lock:
self._check_can_read()
return self._buffer.readline(size)
def readlines(self, size=-1):
"""Read a list of lines of uncompressed bytes from the file.
size can be specified to control the number of lines read: no
further lines will be read once the total size of the lines read
so far equals or exceeds size.
"""
if not isinstance(size, int):
if not hasattr(size, "__index__"):
raise TypeError("Integer argument expected")
size = size.__index__()
with self._lock:
self._check_can_read()
return self._buffer.readlines(size)
def write(self, data):
"""Write a byte string to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
"""
with self._lock:
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
def writelines(self, seq):
"""Write a sequence of byte strings to the file.
Returns the number of uncompressed bytes written.
seq can be any iterable yielding byte strings.
Line separators are not added between the written byte strings.
"""
with self._lock:
return _compression.BaseStream.writelines(self, seq)
def seek(self, offset, whence=io.SEEK_SET):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Values for whence are:
0: start of stream (default); offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, so depending on the parameters,
this operation may be extremely slow.
"""
with self._lock:
self._check_can_seek()
return self._buffer.seek(offset, whence)
def tell(self):
"""Return the current file position."""
with self._lock:
self._check_not_closed()
if self._mode == _MODE_READ:
return self._buffer.tell()
return self._pos
def open(filename, mode="rb", compresslevel=9,
encoding=None, errors=None, newline=None):
"""Open a bzip2-compressed file in binary or text mode.
The filename argument can be an actual filename (a str, bytes, or
PathLike object), or an existing file object to read from or write
to.
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or
"ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode.
The default mode is "rb", and the default compresslevel is 9.
For binary mode, this function is equivalent to the BZ2File
constructor: BZ2File(filename, mode, compresslevel). In this case,
the encoding, errors and newline arguments must not be provided.
For text mode, a BZ2File object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
bz_mode = mode.replace("t", "")
binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, compresslevel=9):
"""Compress a block of data.
compresslevel, if given, must be a number between 1 and 9.
For incremental compression, use a BZ2Compressor object instead.
"""
comp = BZ2Compressor(compresslevel)
return comp.compress(data) + comp.flush()
def decompress(data):
"""Decompress a block of data.
For incremental decompression, use a BZ2Decompressor object instead.
"""
results = []
while data:
decomp = BZ2Decompressor()
try:
res = decomp.decompress(data)
except OSError:
if results:
break # Leftover data is not a valid bzip2 stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise ValueError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
return b"".join(results)
| 12,478 | 362 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/posixpath.py | """Common operations on Posix pathnames.
Instead of importing this module directly, import os and refer to
this module as os.path. The "os.path" name is an alias for this
module on Posix systems; on other systems (e.g. Mac, Windows),
os.path provides the same operations in a manner specific to that
platform, and is an alias to another module (e.g. macpath, ntpath).
Some of this can actually be useful on non-Posix systems too, e.g.
for manipulation of the pathname component of URLs.
"""
# Strings representing various path-related bits and pieces.
# These are primarily for export; internally, they are hardcoded.
# Should be set before imports for resolving cyclic dependency.
curdir = '.'
pardir = '..'
extsep = '.'
sep = '/'
pathsep = ':'
defpath = ':/bin:/usr/bin'
altsep = None
devnull = '/dev/null'
import os
import sys
import stat
import cosmo
import ntpath
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
"ismount", "expanduser","expandvars","normpath","abspath",
"samefile","sameopenfile","samestat",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames","relpath",
"commonpath"]
def _get_sep(path):
if isinstance(path, bytes):
return b'/'
else:
return '/'
# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
# On MS-DOS this may also turn slashes into backslashes; however, other
# normalizations (such as optimizing '../' away) are not allowed
# (another function should be defined to do that).
def normcase(s):
"""Normalize case of pathname. Has no effect under Posix"""
s = os.fspath(s)
if not isinstance(s, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(s.__class__.__name__))
return s
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
def isabs(s):
"""Test whether a path is absolute"""
if cosmo.kernel == 'nt' and '\\' in s:
return ntpath.isabs(s)
s = os.fspath(s)
sep = _get_sep(s)
return s.startswith(sep)
# Join pathnames.
# Ignore the previous parts if a part is absolute.
# Insert a '/' unless the first part is empty or already ends in '/'.
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
will be discarded. An empty last part will result in a path that
ends with a separator."""
if cosmo.kernel == 'nt' and '\\' in a:
return ntpath.join(a, *p)
a = os.fspath(a)
sep = _get_sep(a)
path = a
try:
if not p:
path[:0] + sep #23780: Ensure compatible data type even if p is null.
for b in map(os.fspath, p):
if b.startswith(sep):
path = b
elif not path or path.endswith(sep):
path += b
else:
path += sep + b
except (TypeError, AttributeError, BytesWarning):
genericpath._check_arg_types('join', a, *p)
raise
return path
# Split a path in head (everything up to the last '/') and tail (the
# rest). If the path ends in '/', tail will be empty. If there is no
# '/' in the path, head will be empty.
# Trailing '/'es are stripped from head unless it is the root.
def split(p):
"""Split a pathname. Returns tuple "(head, tail)" where "tail" is
everything after the final slash. Either part may be empty."""
p = os.fspath(p)
sep = _get_sep(p)
i = p.rfind(sep) + 1
head, tail = p[:i], p[i:]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
p = os.fspath(p)
if isinstance(p, bytes):
sep = b'/'
extsep = b'.'
else:
sep = '/'
extsep = '.'
return genericpath._splitext(p, sep, None, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Split a pathname into a drive specification and the rest of the
# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
def splitdrive(p):
"""Split a pathname into drive and path. On Posix, drive is always
empty."""
p = os.fspath(p)
return p[:0], p
# Return the tail (basename) part of a path, same as split(path)[1].
def basename(p):
"""Returns the final component of a pathname"""
p = os.fspath(p)
sep = _get_sep(p)
i = p.rfind(sep) + 1
return p[i:]
# Return the head (dirname) part of a path, same as split(path)[0].
def dirname(p):
"""Returns the directory component of a pathname"""
p = os.fspath(p)
sep = _get_sep(p)
i = p.rfind(sep) + 1
head = p[:i]
if head and head != sep*len(head):
head = head.rstrip(sep)
return head
# Is a path a symbolic link?
# This will always return false on systems where os.lstat doesn't exist.
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (OSError, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
# Being true for dangling symbolic links is also useful.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
os.lstat(path)
except OSError:
return False
return True
# Is a path a mount point?
# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
def ismount(path):
"""Test whether a path is a mount point"""
try:
s1 = os.lstat(path)
except OSError:
# It doesn't exist -- so not a mount point. :-)
return False
else:
# A symlink can never be a mount point
if stat.S_ISLNK(s1.st_mode):
return False
if isinstance(path, bytes):
parent = join(path, b'..')
else:
parent = join(path, '..')
parent = realpath(parent)
try:
s2 = os.lstat(parent)
except OSError:
return False
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
return True # path/.. on a different device as path
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
return True # path/.. is the same i-node as path
return False
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if cosmo.kernel == 'nt' and '\\' in path:
return ntpath.expanduser(path)
path = os.fspath(path)
if isinstance(path, bytes):
tilde = b'~'
else:
tilde = '~'
if not path.startswith(tilde):
return path
sep = _get_sep(path)
i = path.find(sep, 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
try:
userhome = pwd.getpwuid(os.getuid()).pw_dir
except KeyError:
# bpo-10496: if the current user identifier doesn't exist in the
# password database, return the path unchanged
return path
else:
userhome = os.environ['HOME']
else:
import pwd
name = path[1:i]
if isinstance(name, bytes):
name = str(name, 'ASCII')
try:
pwent = pwd.getpwnam(name)
except KeyError:
# bpo-10496: if the user name from the path doesn't exist in the
# password database, return the path unchanged
return path
userhome = pwent.pw_dir
if isinstance(path, bytes):
userhome = os.fsencode(userhome)
root = b'/'
else:
root = '/'
userhome = userhome.rstrip(root)
return (userhome + path[i:]) or root
# Expand paths containing shell variable substitutions.
# This expands the forms $variable and ${variable} only.
# Non-existent variables are left unchanged.
_varprog = None
_varprogb = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
path = os.fspath(path)
global _varprog, _varprogb
if isinstance(path, bytes):
if b'$' not in path:
return path
if not _varprogb:
import re
_varprogb = re.compile(br'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprogb.search
start = b'{'
end = b'}'
environ = getattr(os, 'environb', None)
else:
if '$' not in path:
return path
if not _varprog:
import re
_varprog = re.compile(r'\$(\w+|\{[^}]*\})', re.ASCII)
search = _varprog.search
start = '{'
end = '}'
environ = os.environ
i = 0
while True:
m = search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith(start) and name.endswith(end):
name = name[1:-1]
try:
if environ is None:
value = os.fsencode(os.environ[os.fsdecode(name)])
else:
value = environ[name]
except KeyError:
i = j
else:
tail = path[j:]
path = path[:i] + value
i = len(path)
path += tail
return path
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
path = os.fspath(path)
if isinstance(path, bytes):
sep = b'/'
empty = b''
dot = b'.'
dotdot = b'..'
else:
sep = '/'
empty = ''
dot = '.'
dotdot = '..'
if path == empty:
return dot
initial_slashes = path.startswith(sep)
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith(sep*2) and not path.startswith(sep*3)):
initial_slashes = 2
comps = path.split(sep)
new_comps = []
for comp in comps:
if comp in (empty, dot):
continue
if (comp != dotdot or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == dotdot)):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = sep.join(comps)
if initial_slashes:
path = sep*initial_slashes + path
return path or dot
def abspath(path):
"""Return an absolute path."""
path = os.fspath(path)
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# Return a canonical path (i.e. the absolute location of a file on the
# filesystem).
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
filename = os.fspath(filename)
path, ok = _joinrealpath(filename[:0], filename, {})
return abspath(path)
# Join two paths, normalizing and eliminating any symbolic links
# encountered in the second path.
def _joinrealpath(path, rest, seen):
if isinstance(path, bytes):
sep = b'/'
curdir = b'.'
pardir = b'..'
else:
sep = '/'
curdir = '.'
pardir = '..'
if isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = split(path)
if name == pardir:
path = join(path, pardir, pardir)
else:
path = pardir
continue
newpath = join(path, name)
if not islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return join(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = _joinrealpath(path, os.readlink(newpath), seen)
if not ok:
return join(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
supports_unicode_filenames = (sys.platform == 'darwin')
def relpath(path, start=None):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
path = os.fspath(path)
if isinstance(path, bytes):
curdir = b'.'
sep = b'/'
pardir = b'..'
else:
curdir = '.'
sep = '/'
pardir = '..'
if start is None:
start = curdir
else:
start = os.fspath(start)
try:
start_list = [x for x in abspath(start).split(sep) if x]
path_list = [x for x in abspath(path).split(sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
except (TypeError, AttributeError, BytesWarning, DeprecationWarning):
genericpath._check_arg_types('relpath', path, start)
raise
# Return the longest common sub-path of the sequence of paths given as input.
# The paths are not normalized before comparing them (this is the
# responsibility of the caller). Any trailing separator is stripped from the
# returned path.
def commonpath(paths):
"""Given a sequence of path names, returns the longest common sub-path."""
if not paths:
raise ValueError('commonpath() arg is an empty sequence')
paths = tuple(map(os.fspath, paths))
if isinstance(paths[0], bytes):
sep = b'/'
curdir = b'.'
else:
sep = '/'
curdir = '.'
try:
split_paths = [path.split(sep) for path in paths]
try:
isabs, = set(p[:1] == sep for p in paths)
except ValueError:
raise ValueError("Can't mix absolute and relative paths") from None
split_paths = [[c for c in s if c and c != curdir] for s in split_paths]
s1 = min(split_paths)
s2 = max(split_paths)
common = s1
for i, c in enumerate(s1):
if c != s2[i]:
common = s1[:i]
break
prefix = sep if isabs else sep[:0]
return prefix + sep.join(common)
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise
| 16,036 | 539 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/functools.py | """functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>,
# Raymond Hettinger <python at rcn.com>,
# and Åukasz Langa <lukasz at langa.pl>.
# Copyright (C) 2006-2013 Python Software Foundation.
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial',
'partialmethod', 'singledispatch']
try:
from _functools import reduce
except ImportError:
pass
from abc import get_cache_token
from collections import namedtuple
from types import MappingProxyType
from weakref import WeakKeyDictionary
from reprlib import recursive_repr
try:
from _thread import RLock
except ImportError:
class RLock:
'Dummy reentrant lock for builds without threads'
def __enter__(self): pass
def __exit__(self, exctype, excinst, exctb): pass
################################################################################
### update_wrapper() and wraps() decorator
################################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
'__annotations__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
################################################################################
### total_ordering class decorator
################################################################################
# The total ordering functions all invoke the root magic method directly
# rather than using the corresponding operator. This avoids possible
# infinite recursion that could occur when the operator dispatch logic
# detects a NotImplemented result and then calls a reflected method.
def _gt_from_lt(self, other, NotImplemented=NotImplemented):
'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).'
op_result = self.__lt__(other)
if op_result is NotImplemented:
return op_result
return not op_result and self != other
def _le_from_lt(self, other, NotImplemented=NotImplemented):
'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'
op_result = self.__lt__(other)
return op_result or self == other
def _ge_from_lt(self, other, NotImplemented=NotImplemented):
'Return a >= b. Computed by @total_ordering from (not a < b).'
op_result = self.__lt__(other)
if op_result is NotImplemented:
return op_result
return not op_result
def _ge_from_le(self, other, NotImplemented=NotImplemented):
'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'
op_result = self.__le__(other)
if op_result is NotImplemented:
return op_result
return not op_result or self == other
def _lt_from_le(self, other, NotImplemented=NotImplemented):
'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'
op_result = self.__le__(other)
if op_result is NotImplemented:
return op_result
return op_result and self != other
def _gt_from_le(self, other, NotImplemented=NotImplemented):
'Return a > b. Computed by @total_ordering from (not a <= b).'
op_result = self.__le__(other)
if op_result is NotImplemented:
return op_result
return not op_result
def _lt_from_gt(self, other, NotImplemented=NotImplemented):
'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).'
op_result = self.__gt__(other)
if op_result is NotImplemented:
return op_result
return not op_result and self != other
def _ge_from_gt(self, other, NotImplemented=NotImplemented):
'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'
op_result = self.__gt__(other)
return op_result or self == other
def _le_from_gt(self, other, NotImplemented=NotImplemented):
'Return a <= b. Computed by @total_ordering from (not a > b).'
op_result = self.__gt__(other)
if op_result is NotImplemented:
return op_result
return not op_result
def _le_from_ge(self, other, NotImplemented=NotImplemented):
'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).'
op_result = self.__ge__(other)
if op_result is NotImplemented:
return op_result
return not op_result or self == other
def _gt_from_ge(self, other, NotImplemented=NotImplemented):
'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).'
op_result = self.__ge__(other)
if op_result is NotImplemented:
return op_result
return op_result and self != other
def _lt_from_ge(self, other, NotImplemented=NotImplemented):
'Return a < b. Computed by @total_ordering from (not a >= b).'
op_result = self.__ge__(other)
if op_result is NotImplemented:
return op_result
return not op_result
_convert = {
'__lt__': [('__gt__', _gt_from_lt),
('__le__', _le_from_lt),
('__ge__', _ge_from_lt)],
'__le__': [('__ge__', _ge_from_le),
('__lt__', _lt_from_le),
('__gt__', _gt_from_le)],
'__gt__': [('__lt__', _lt_from_gt),
('__ge__', _ge_from_gt),
('__le__', _le_from_gt)],
'__ge__': [('__le__', _le_from_ge),
('__gt__', _gt_from_ge),
('__lt__', _lt_from_ge)]
}
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
# Find user-defined comparisons (not those inherited from object).
roots = [op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)]
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in _convert[root]:
if opname not in roots:
opfunc.__name__ = opname
setattr(cls, opname, opfunc)
return cls
################################################################################
### cmp_to_key() function converter
################################################################################
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
__hash__ = None
return K
try:
from _functools import cmp_to_key
except ImportError:
pass
################################################################################
### partial() argument application
################################################################################
# Purely functional, no descriptor behaviour
class partial:
"""New function with partial application of the given arguments
and keywords.
"""
__slots__ = "func", "args", "keywords", "__dict__", "__weakref__"
def __new__(*args, **keywords):
if not args:
raise TypeError("descriptor '__new__' of partial needs an argument")
if len(args) < 2:
raise TypeError("type 'partial' takes at least one argument")
cls, func, *args = args
if not callable(func):
raise TypeError("the first argument must be callable")
args = tuple(args)
if hasattr(func, "func"):
args = func.args + args
tmpkw = func.keywords.copy()
tmpkw.update(keywords)
keywords = tmpkw
del tmpkw
func = func.func
self = super(partial, cls).__new__(cls)
self.func = func
self.args = args
self.keywords = keywords
return self
def __call__(*args, **keywords):
if not args:
raise TypeError("descriptor '__call__' of partial needs an argument")
self, *args = args
newkeywords = self.keywords.copy()
newkeywords.update(keywords)
return self.func(*self.args, *args, **newkeywords)
@recursive_repr()
def __repr__(self):
qualname = type(self).__qualname__
args = [repr(self.func)]
args.extend(repr(x) for x in self.args)
args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items())
if type(self).__module__ == "functools":
return f"functools.{qualname}({', '.join(args)})"
return f"{qualname}({', '.join(args)})"
def __reduce__(self):
return type(self), (self.func,), (self.func, self.args,
self.keywords or None, self.__dict__ or None)
def __setstate__(self, state):
if not isinstance(state, tuple):
raise TypeError("argument to __setstate__ must be a tuple")
if len(state) != 4:
raise TypeError(f"expected 4 items in state, got {len(state)}")
func, args, kwds, namespace = state
if (not callable(func) or not isinstance(args, tuple) or
(kwds is not None and not isinstance(kwds, dict)) or
(namespace is not None and not isinstance(namespace, dict))):
raise TypeError("invalid partial state")
args = tuple(args) # just in case it's a subclass
if kwds is None:
kwds = {}
elif type(kwds) is not dict: # XXX does it need to be *exactly* dict?
kwds = dict(kwds)
if namespace is None:
namespace = {}
self.__dict__ = namespace
self.func = func
self.args = args
self.keywords = kwds
try:
from _functools import partial
except ImportError:
pass
# Descriptor version
class partialmethod(object):
"""Method descriptor with partial application of the given arguments
and keywords.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
# func could be a descriptor like classmethod which isn't callable,
# so we can't inherit from partial (it verifies func is callable)
if isinstance(func, partialmethod):
# flattening is mandatory in order to place cls/self before all
# other arguments
# it's also more efficient since only one function will be called
self.func = func.func
self.args = func.args + args
self.keywords = func.keywords.copy()
self.keywords.update(keywords)
else:
self.func = func
self.args = args
self.keywords = keywords
def __repr__(self):
args = ", ".join(map(repr, self.args))
keywords = ", ".join("{}={!r}".format(k, v)
for k, v in self.keywords.items())
format_string = "{module}.{cls}({func}, {args}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__qualname__,
func=self.func,
args=args,
keywords=keywords)
def _make_unbound_method(self):
def _method(*args, **keywords):
call_keywords = self.keywords.copy()
call_keywords.update(keywords)
cls_or_self, *rest = args
call_args = (cls_or_self,) + self.args + tuple(rest)
return self.func(*call_args, **call_keywords)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method._partialmethod = self
return _method
def __get__(self, obj, cls):
get = getattr(self.func, "__get__", None)
result = None
if get is not None:
new_func = get(obj, cls)
if new_func is not self.func:
# Assume __get__ returning something new indicates the
# creation of an appropriate callable
result = partial(new_func, *self.args, **self.keywords)
try:
result.__self__ = new_func.__self__
except AttributeError:
pass
if result is None:
# If the underlying descriptor didn't do anything, treat this
# like an instance method
result = self._make_unbound_method().__get__(obj, cls)
return result
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
################################################################################
### LRU Cache function decorator
################################################################################
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
""" This class guarantees that hash() will be called no more than once
per element. This is important because the lru_cache() will hash
the key multiple times on a cache miss.
"""
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
tuple=tuple, type=type, len=len):
"""Make a cache key from optionally typed positional and keyword arguments
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = args
if kwds:
key += kwd_mark
for item in kwds.items():
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for v in kwds.values())
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Early detection of an erroneous call to @lru_cache without any arguments
# resulting in the inner function being passed to maxsize instead of an
# integer or None.
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
def decorating_function(user_function):
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
return update_wrapper(wrapper, user_function)
return decorating_function
def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
cache = {}
hits = misses = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
cache_len = cache.__len__ # get cache size without calling len()
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds)
misses += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
hits += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
root = oldroot[NEXT]
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
# Use the cache_len bound method instead of the len() function
# which could potentially be wrapped in an lru_cache itself.
full = (cache_len() >= maxsize)
misses += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, cache_len())
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = 0
full = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
try:
from _functools import _lru_cache_wrapper
except ImportError:
pass
################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################
def _c3_merge(sequences):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if candidate is None:
raise RuntimeError("Inconsistent hierarchy")
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
def _c3_mro(cls, abcs=None):
"""Computes the method resolution order using extended C3 linearization.
If no *abcs* are given, the algorithm works exactly like the built-in C3
linearization used for method resolution.
If given, *abcs* is a list of abstract base classes that should be inserted
into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
result. The algorithm inserts ABCs where their functionality is introduced,
i.e. issubclass(cls, abc) returns True for the class itself but returns
False for all its direct base classes. Implicit ABCs for a given class
(either registered or inferred from the presence of a special method like
__len__) are inserted directly after the last ABC explicitly listed in the
MRO of said class. If two implicit ABCs end up next to each other in the
resulting MRO, their ordering depends on the order of types in *abcs*.
"""
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if abcs else []
explicit_bases = list(cls.__bases__[:boundary])
abstract_bases = []
other_bases = list(cls.__bases__[boundary:])
for base in abcs:
if issubclass(cls, base) and not any(
issubclass(b, base) for b in cls.__bases__
):
# If *cls* is the class that introduces behaviour described by
# an ABC *base*, insert said ABC to its MRO.
abstract_bases.append(base)
for base in abstract_bases:
abcs.remove(base)
explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
return _c3_merge(
[[cls]] +
explicit_c3_mros + abstract_c3_mros + other_c3_mros +
[explicit_bases] + [abstract_bases] + [other_bases]
)
def _compose_mro(cls, types):
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(typ):
return (typ not in bases and hasattr(typ, '__mro__')
and issubclass(cls, typ))
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(typ):
for other in types:
if typ != other and typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro)
def _find_impl(cls, registry):
"""Returns the best matching implementation from *registry* for type *cls*.
Where there is no registered implementation for a specific type, its method
resolution order is used to find a more generic implementation.
Note: if *registry* does not contain an implementation for the base
*object* type, this function may return None.
"""
mro = _compose_mro(cls, registry.keys())
match = None
for t in mro:
if match is not None:
# If *match* is an implicit ABC but there is another unrelated,
# equally matching implicit ABC, refuse the temptation to guess.
if (t in registry and t not in cls.__mro__
and match not in cls.__mro__
and not issubclass(match, t)):
raise RuntimeError("Ambiguous dispatch: {} or {}".format(
match, t))
break
if t in registry:
match = t
return registry.get(match)
def singledispatch(func):
"""Single-dispatch generic function decorator.
Transforms a function into a generic function, which can have different
behaviours depending upon the type of its first argument. The decorated
function acts as the default implementation, and additional
implementations can be registered using the register() attribute of the
generic function.
"""
registry = {}
dispatch_cache = WeakKeyDictionary()
cache_token = None
def dispatch(cls):
"""generic_func.dispatch(cls) -> <function implementation>
Runs the dispatch algorithm to return the best available implementation
for the given *cls* registered on *generic_func*.
"""
nonlocal cache_token
if cache_token is not None:
current_token = get_cache_token()
if cache_token != current_token:
dispatch_cache.clear()
cache_token = current_token
try:
impl = dispatch_cache[cls]
except KeyError:
try:
impl = registry[cls]
except KeyError:
impl = _find_impl(cls, registry)
dispatch_cache[cls] = impl
return impl
def register(cls, func=None):
"""generic_func.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_func*.
"""
nonlocal cache_token
if func is None:
return lambda f: register(cls, f)
registry[cls] = func
if cache_token is None and hasattr(cls, '__abstractmethods__'):
cache_token = get_cache_token()
dispatch_cache.clear()
return func
def wrapper(*args, **kw):
if not args:
raise TypeError(f'{funcname} requires at least '
'1 positional argument')
return dispatch(args[0].__class__)(*args, **kw)
funcname = getattr(func, '__name__', 'singledispatch function')
registry[object] = func
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
wrapper._clear_cache = dispatch_cache.clear
update_wrapper(wrapper, func)
return wrapper
if __name__ == 'PYOBJ.COM':
import _functools
| 31,398 | 821 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/socket.py | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io, selectors
from enum import IntEnum, IntFlag
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
IntFlag._convert(
'MsgFlag',
__name__,
lambda C: C.isupper() and C.startswith('MSG_'))
IntFlag._convert(
'AddressInfo',
__name__,
lambda C: C.isupper() and C.startswith('AI_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class _GiveupOnSendfile(Exception): pass
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__qualname__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename, except the only
supported mode values are 'r' (default), 'w' and 'b'.
"""
# XXX refactor to share code?
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
if hasattr(os, 'sendfile'):
def _sendfile_use_sendfile(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
sockno = self.fileno()
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise _GiveupOnSendfile(err) # not a regular file
try:
fsize = os.fstat(fileno).st_size
except OSError as err:
raise _GiveupOnSendfile(err) # not a regular file
if not fsize:
return 0 # empty file
blocksize = fsize if not count else count
timeout = self.gettimeout()
if timeout == 0:
raise ValueError("non-blocking sockets are not supported")
# poll/select have the advantage of not requiring any
# extra file descriptor, contrarily to epoll/kqueue
# (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
selector = selectors.PollSelector()
else:
selector = selectors.SelectSelector()
selector.register(sockno, selectors.EVENT_WRITE)
total_sent = 0
# localize variable access to minimize overhead
selector_select = selector.select
os_sendfile = os.sendfile
try:
while True:
if timeout and not selector_select(timeout):
raise _socket.timeout('timed out')
if count:
blocksize = count - total_sent
if blocksize <= 0:
break
try:
sent = os_sendfile(sockno, fileno, offset, blocksize)
except BlockingIOError:
if not timeout:
# Block until the socket is ready to send some
# data; avoids hogging CPU resources.
selector_select()
continue
except OSError as err:
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
raise _GiveupOnSendfile(err)
raise err from None
else:
if sent == 0:
break # EOF
offset += sent
total_sent += sent
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset)
else:
def _sendfile_use_sendfile(self, file, offset=0, count=None):
raise _GiveupOnSendfile(
"os.sendfile() not available on this platform")
def _sendfile_use_send(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
if self.gettimeout() == 0:
raise ValueError("non-blocking sockets are not supported")
if offset:
file.seek(offset)
blocksize = min(count, 8192) if count else 8192
total_sent = 0
# localize variable access to minimize overhead
file_read = file.read
sock_send = self.send
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
data = memoryview(file_read(blocksize))
if not data:
break # EOF
while True:
try:
sent = sock_send(data)
except BlockingIOError:
continue
else:
total_sent += sent
if sent < len(data):
data = data[sent:]
else:
break
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not self.type & SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
def sendfile(self, file, offset=0, count=None):
"""sendfile(file[, offset[, count]]) -> sent
Send a file until EOF is reached by using high-performance
os.sendfile() and return the total number of bytes which
were sent.
*file* must be a regular file object opened in binary mode.
If os.sendfile() is not available (e.g. Windows) or file is
not a regular file socket.send() will be used instead.
*offset* tells from where to start reading the file.
If specified, *count* is the total number of bytes to transmit
as opposed to sending the file until EOF is reached.
File position is updated on return or also in case of error in
which case file.tell() can be used to figure out the number of
bytes which were sent.
The socket must be of SOCK_STREAM type.
Non-blocking sockets are not supported.
"""
try:
return self._sendfile_use_sendfile(file, offset, count)
except _GiveupOnSendfile:
return self._sendfile_use_send(file, offset, count)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
if hasattr(self._sock, '_decref_socketios'):
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
A host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
# Break explicitly a reference cycle
err = None
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
if __name__ == 'PYOBJ.COM':
AF_APPLETALK = 0
AF_ASH = 0
AF_ATMPVC = 0
AF_ATMSVC = 0
AF_AX25 = 0
AF_BRIDGE = 0
AF_CAN = 0
AF_ECONET = 0
AF_INET = 0
AF_INET6 = 0
AF_IPX = 0
AF_IRDA = 0
AF_KEY = 0
AF_LLC = 0
AF_NETBEUI = 0
AF_NETROM = 0
AF_PACKET = 0
AF_PPPOX = 0
AF_ROSE = 0
AF_ROUTE = 0
AF_SECURITY = 0
AF_SNA = 0
AF_UNIX = 0
AF_UNSPEC = 0
AF_X25 = 0
AI_ADDRCONFIG = 0
AI_ALL = 0
AI_CANONNAME = 0
AI_NUMERICHOST = 0
AI_NUMERICSERV = 0
AI_PASSIVE = 0
AI_V4MAPPED = 0
CAPI = 0
EAI_ADDRFAMILY = 0
EAI_AGAIN = 0
EAI_BADFLAGS = 0
EAI_FAIL = 0
EAI_FAMILY = 0
EAI_MEMORY = 0
EAI_NODATA = 0
EAI_NONAME = 0
EAI_OVERFLOW = 0
EAI_SERVICE = 0
EAI_SOCKTYPE = 0
EAI_SYSTEM = 0
INADDR_ALLHOSTS_GROUP = 0
INADDR_ANY = 0
INADDR_BROADCAST = 0
INADDR_LOOPBACK = 0
INADDR_MAX_LOCAL_GROUP = 0
INADDR_NONE = 0
INADDR_UNSPEC_GROUP = 0
IPPORT_RESERVED = 0
IPPORT_USERRESERVED = 0
IPPROTO_AH = 0
IPPROTO_DSTOPTS = 0
IPPROTO_EGP = 0
IPPROTO_ESP = 0
IPPROTO_FRAGMENT = 0
IPPROTO_GRE = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 0
IPPROTO_ICMPV6 = 0
IPPROTO_IDP = 0
IPPROTO_IGMP = 0
IPPROTO_IP = 0
IPPROTO_IPIP = 0
IPPROTO_IPV6 = 0
IPPROTO_MAX = 0
IPPROTO_NONE = 0
IPPROTO_PIM = 0
IPPROTO_PUP = 0
IPPROTO_RAW = 0
IPPROTO_ROUTING = 0
IPPROTO_RSVP = 0
IPPROTO_SCTP = 0
IPPROTO_TCP = 0
IPPROTO_TP = 0
IPPROTO_UDP = 0
IP_ADD_MEMBERSHIP = 0
IP_DEFAULT_MULTICAST_LOOP = 0
IP_DEFAULT_MULTICAST_TTL = 0
IP_DROP_MEMBERSHIP = 0
IP_HDRINCL = 0
IP_MAX_MEMBERSHIPS = 0
IP_MULTICAST_IF = 0
IP_MULTICAST_LOOP = 0
IP_MULTICAST_TTL = 0
IP_OPTIONS = 0
IP_RECVOPTS = 0
IP_RECVRETOPTS = 0
IP_RETOPTS = 0
IP_TOS = 0
IP_TRANSPARENT = 0
IP_TTL = 0
MSG_CMSG_CLOEXEC = 0
MSG_CONFIRM = 0
MSG_CTRUNC = 0
MSG_DONTROUTE = 0
MSG_DONTWAIT = 0
MSG_EOF = 0
MSG_EOR = 0
MSG_ERRQUEUE = 0
MSG_FASTOPEN = 0
MSG_MORE = 0
MSG_NOSIGNAL = 0
MSG_NOTIFICATION = 0
MSG_OOB = 0
MSG_PEEK = 0
MSG_TRUNC = 0
MSG_WAITALL = 0
NI_DGRAM = 0
NI_MAXHOST = 0
NI_MAXSERV = 0
NI_NAMEREQD = 0
NI_NOFQDN = 0
NI_NUMERICHOST = 0
NI_NUMERICSERV = 0
PF_CAN = 0
PF_PACKET = 0
PF_RDS = 0
SHUT_RD = 0
SHUT_RDWR = 0
SHUT_WR = 0
SOCK_CLOEXEC = 0
SOCK_DGRAM = 0
SOCK_NONBLOCK = 0
SOCK_RAW = 0
SOCK_RDM = 0
SOCK_SEQPACKET = 0
SOCK_STREAM = 0
SOL_IP = 0
SOL_RDS = 0
SOL_SOCKET = 0
SOL_TCP = 0
SOL_UDP = 0
SOMAXCONN = 0
SO_ACCEPTCONN = 0
SO_BINDTODEVICE = 0
SO_BROADCAST = 0
SO_DEBUG = 0
SO_DOMAIN = 0
SO_DONTROUTE = 0
SO_ERROR = 0
SO_KEEPALIVE = 0
SO_LINGER = 0
SO_MARK = 0
SO_OOBINLINE = 0
SO_PASSCRED = 0
SO_PASSSEC = 0
SO_PEERCRED = 0
SO_PEERSEC = 0
SO_PRIORITY = 0
SO_PROTOCOL = 0
SO_RCVBUF = 0
SO_RCVLOWAT = 0
SO_RCVTIMEO = 0
SO_REUSEADDR = 0
SO_REUSEPORT = 0
SO_SNDBUF = 0
SO_SNDLOWAT = 0
SO_SNDTIMEO = 0
SO_TYPE = 0
SocketType = 0
TCP_CONGESTION = 0
TCP_CORK = 0
TCP_DEFER_ACCEPT = 0
TCP_FASTOPEN = 0
TCP_FASTOPEN_CONNECT = 0
TCP_INFO = 0
TCP_KEEPCNT = 0
TCP_KEEPIDLE = 0
TCP_KEEPINTVL = 0
TCP_LINGER2 = 0
TCP_MAXSEG = 0
TCP_NODELAY = 0
TCP_QUICKACK = 0
TCP_SAVED_SYN = 0
TCP_SAVE_SYN = 0
TCP_SYNCNT = 0
TCP_USER_TIMEOUT = 0
TCP_WINDOW_CLAMP = 0
dup = 0
error = 0
gaierror = 0
getaddrinfo = 0
getdefaulttimeout = 0
gethostbyaddr = 0
gethostbyname = 0
gethostbyname_ex = 0
gethostname = 0
getnameinfo = 0
getprotobyname = 0
getservbyname = 0
getservbyport = 0
has_ipv6 = 0
herror = 0
htonl = 0
htons = 0
inet_aton = 0
inet_ntoa = 0
inet_ntop = 0
inet_pton = 0
ntohl = 0
ntohs = 0
setdefaulttimeout = 0
sethostname = 0
socket = 0
socketpair = 0
timeout = 0
| 31,697 | 964 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/__future__.py | """Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
"barry_as_FLUFL",
"generator_stop",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
CO_FUTURE_BARRY_AS_BDFL = 0x40000
CO_FUTURE_GENERATOR_STOP = 0x80000 # StopIteration becomes RuntimeError in generators
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_UNICODE_LITERALS)
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
(3, 9, 0, "alpha", 0),
CO_FUTURE_BARRY_AS_BDFL)
generator_stop = _Feature((3, 5, 0, "beta", 1),
(3, 7, 0, "alpha", 0),
CO_FUTURE_GENERATOR_STOP)
| 4,841 | 141 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/datetime.py | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
import sys
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
# -1 is a placeholder for indexing purposes.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us, timespec='auto'):
specs = {
'hours': '{:02d}',
'minutes': '{:02d}:{:02d}',
'seconds': '{:02d}:{:02d}:{:02d}',
'milliseconds': '{:02d}:{:02d}:{:02d}.{:03d}',
'microseconds': '{:02d}:{:02d}:{:02d}.{:06d}'
}
if timespec == 'auto':
# Skip trailing microseconds when us==0.
timespec = 'microseconds' if us else 'seconds'
elif timespec == 'milliseconds':
us //= 1000
try:
fmt = specs[timespec]
except KeyError:
raise ValueError('Unknown timespec value')
else:
return fmt.format(hh, mm, ss, us)
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of seconds, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be strictly between "
"-timedelta(hours=24) and timedelta(hours=24)" %
(name, offset))
def _check_int_field(value):
if isinstance(value, int):
return value
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return value
raise TypeError('__int__ returned non-int (type %s)' %
type(value).__name__)
raise TypeError('an integer is required (got type %s)' %
type(value).__name__)
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond, fold):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
if fold not in (0, 1):
raise ValueError('fold must be either 0 or 1', fold)
return hour, minute, second, microsecond, fold
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds = round(microseconds + usdouble)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
else:
microseconds = int(microseconds)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
microseconds = round(microseconds + usdouble)
assert isinstance(s, int)
assert isinstance(microseconds, int)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
seconds, us = divmod(microseconds, 1000000)
s += seconds
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def __repr__(self):
if self._microseconds:
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s.%s(%d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._days,
self._seconds)
return "%s.%s(%d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds) * 10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(b * usec, a))
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (month is None and
isinstance(year, (bytes, str)) and len(year) == 4 and
1 <= ord(year[2:3]) <= 12):
# Pickle support
if isinstance(year, str):
try:
year = year.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a date object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
# __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return type(self)(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
(used with permission)
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo, fold
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode', '_fold'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, *, fold=0):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
fold (keyword only, default to zero)
"""
if (isinstance(hour, (bytes, str)) and len(hour) == 6 and
ord(hour[0:1])&0x7F < 24):
# Pickle support
if isinstance(hour, str):
try:
hour = hour.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a time object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(hour, minute or None)
self._hashcode = -1
return self
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(t._getstate()[0])
else:
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
mm, ss = divmod(mm, timedelta(minutes=1))
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
if ss:
off += ':%02d' % ss.seconds
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def isoformat(self, timespec='auto'):
"""Return the time formatted according to ISO.
The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional
part is omitted if self.microsecond == 0.
The optional argument timespec specifies the number of additional
terms of the time to include.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond, timespec)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True, *, fold=None):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self._fold
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
# Pickle support.
def _getstate(self, protocol=3):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
h = self._hour
if self._fold and protocol > 3:
h += 128
basestate = bytes([h, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
h, self._minute, self._second, us1, us2, us3 = string
if h > 127:
self._fold = 1
self._hour = h - 128
else:
self._fold = 0
self._hour = h
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (time, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None, *, fold=0):
if (isinstance(year, (bytes, str)) and len(year) == 10 and
1 <= ord(year[2:3])&0x7F <= 12):
# Pickle support
if isinstance(year, str):
try:
year = bytes(year, 'latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a datetime object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond, fold = _check_time_fields(
hour, minute, second, microsecond, fold)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
self._fold = fold
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@property
def fold(self):
return self._fold
@classmethod
def _fromtimestamp(cls, t, utc, tz):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
frac, t = _math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
converter = _time.gmtime if utc else _time.localtime
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is None:
# As of version 2015f max fold in IANA database is
# 23 hours at 1969-09-30 13:00:00 in Kwajalein.
# Let's probe 24 hours in the past to detect a transition:
max_fold_seconds = 24 * 3600
# On Windows localtime_s throws an OSError for negative values,
# thus we can't perform fold detection for values of time less
# than the max time fold. See comments in _datetimemodule's
# version of this method for more details.
if t < max_fold_seconds and sys.platform.startswith("win"):
return result
y, m, d, hh, mm, ss = converter(t - max_fold_seconds)[:6]
probe1 = cls(y, m, d, hh, mm, ss, us, tz)
trans = result - probe1 - timedelta(0, max_fold_seconds)
if trans.days < 0:
y, m, d, hh, mm, ss = converter(t + trans // timedelta(0, 1))[:6]
probe2 = cls(y, m, d, hh, mm, ss, us, tz)
if probe2 == result:
result._fold = 1
else:
result = tz.fromutc(result)
return result
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
return cls._fromtimestamp(t, tz is not None, tz)
@classmethod
def utcfromtimestamp(cls, t):
"""Construct a naive UTC datetime from a POSIX timestamp."""
return cls._fromtimestamp(t, True, None)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time, tzinfo=True):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
if tzinfo is True:
tzinfo = time.tzinfo
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
tzinfo, fold=time.fold)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def _mktime(self):
"""Return integer POSIX timestamp."""
epoch = datetime(1970, 1, 1)
max_fold_seconds = 24 * 3600
t = (self - epoch) // timedelta(0, 1)
def local(u):
y, m, d, hh, mm, ss = _time.localtime(u)[:6]
return (datetime(y, m, d, hh, mm, ss) - epoch) // timedelta(0, 1)
# Our goal is to solve t = local(u) for u.
a = local(t) - t
u1 = t - a
t1 = local(u1)
if t1 == t:
# We found one solution, but it may not be the one we need.
# Look for an earlier solution (if `fold` is 0), or a
# later one (if `fold` is 1).
u2 = u1 + (-max_fold_seconds, max_fold_seconds)[self.fold]
b = local(u2) - u2
if a == b:
return u1
else:
b = t1 - u1
assert a != b
u2 = t - b
t2 = local(u2)
if t2 == t:
return u2
if t1 == t:
return u1
# We have found both offsets a and b, but neither t - a nor t - b is
# a solution. This means t is in the gap.
return (max, min)[self.fold](u1, u2)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
s = self._mktime()
return s + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond, fold=self.fold)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo, fold=self.fold)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True,
*, fold=None):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self.fold
return type(self)(year, month, day, hour, minute, second,
microsecond, tzinfo, fold=fold)
def _local_timezone(self):
if self.tzinfo is None:
ts = self._mktime()
else:
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
delta = local - datetime(*_time.gmtime(ts)[:6])
zone = _time.strftime('%Z', localtm)
tz = timezone(delta, zone)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
return tz
def astimezone(self, tz=None):
if tz is None:
tz = self._local_timezone()
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
mytz = self._local_timezone()
myoffset = mytz.utcoffset(self)
else:
myoffset = mytz.utcoffset(self)
if myoffset is None:
mytz = self.replace(tzinfo=None)._local_timezone()
myoffset = mytz.utcoffset(self)
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T', timespec='auto'):
"""Return the time formatted according to ISO.
The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.
By default, the fractional part is omitted if self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
The optional argument timespec specifies the number of additional
terms of the time to include.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond, timespec))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
mm, ss = divmod(mm, timedelta(minutes=1))
s += "%s%02d:%02d" % (sign, hh, mm)
if ss:
assert not ss.microseconds
s += ":%02d" % ss.seconds
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
", ".join(map(str, L)))
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
if self._fold:
assert s[-1:] == ")"
s = s[:-1] + ", fold=1)"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
# Assume that allow_mixed means that we are called from __eq__
if allow_mixed:
if myoff != self.replace(fold=not self.fold).utcoffset():
return 2
if otoff != other.replace(fold=not other.fold).utcoffset():
return 2
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
if self._hashcode == -1:
if self.fold:
t = self.replace(fold=0)
else:
t = self
tzoff = t.utcoffset()
if tzoff is None:
self._hashcode = hash(t._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
return self._hashcode
# Pickle support.
def _getstate(self, protocol=3):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
m = self._month
if self._fold and protocol > 3:
m += 128
basestate = bytes([yhi, ylo, m, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, m, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
if m > 127:
self._fold = 1
self._month = m - 128
else:
self._fold = 0
self._month = m
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce_ex__(self, protocol):
return (self.__class__, self._getstate(protocol))
def __reduce__(self):
return self.__reduce_ex__(2)
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta "
"strictly between -timedelta(hours=24) and "
"timedelta(hours=24).")
if (offset.microseconds != 0 or offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta "
"representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s.%s(%r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset)
return "%s.%s(%r, %r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if not delta:
return 'UTC'
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
# Some time zone algebra. For a datetime x, let
# x.n = x stripped of its timezone -- its naive time.
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
# return None
# x.d = x.dst(), and assuming that doesn't raise an exception or
# return None
# x.s = x's standard offset, x.o - x.d
#
# Now some derived rules, where k is a duration (timedelta).
#
# 1. x.o = x.s + x.d
# This follows from the definition of x.s.
#
# 2. If x and y have the same tzinfo member, x.s = y.s.
# This is actually a requirement, an assumption we need to make about
# sane tzinfo classes.
#
# 3. The naive UTC time corresponding to x is x.n - x.o.
# This is again a requirement for a sane tzinfo class.
#
# 4. (x+k).s = x.s
# This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
#
# 5. (x+k).n = x.n + k
# Again follows from how arithmetic is defined.
#
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
# (meaning that the various tzinfo methods exist, and don't blow up or return
# None when called).
#
# The function wants to return a datetime y with timezone tz, equivalent to x.
# x is already in UTC.
#
# By #3, we want
#
# y.n - y.o = x.n [1]
#
# The algorithm starts by attaching tz to x.n, and calling that y. So
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
# becomes true; in effect, we want to solve [2] for k:
#
# (y+k).n - (y+k).o = x.n [2]
#
# By #1, this is the same as
#
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
#
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
# Substituting that into [3],
#
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
# k - (y+k).s - (y+k).d = 0; rearranging,
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
# k = y.s - (y+k).d
#
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
# very large, since all offset-returning methods return a duration of magnitude
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
# be 0, so ignoring it has no consequence then.
#
# In any case, the new value is
#
# z = y + y.s [4]
#
# It's helpful to step back at look at [4] from a higher level: it's simply
# mapping from UTC to tz's standard time.
#
# At this point, if
#
# z.n - z.o = x.n [5]
#
# we have an equivalent time, and are almost done. The insecurity here is
# at the start of daylight time. Picture US Eastern for concreteness. The wall
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
# on the day DST starts. We want to return the 1:MM EST spelling because that's
# the only spelling that makes sense on the local wall clock.
#
# In fact, if [5] holds at this point, we do have the standard-time spelling,
# but that takes a bit of proof. We first prove a stronger result. What's the
# difference between the LHS and RHS of [5]? Let
#
# diff = x.n - (z.n - z.o) [6]
#
# Now
# z.n = by [4]
# (y + y.s).n = by #5
# y.n + y.s = since y.n = x.n
# x.n + y.s = since z and y are have the same tzinfo member,
# y.s = z.s by #2
# x.n + z.s
#
# Plugging that back into [6] gives
#
# diff =
# x.n - ((x.n + z.s) - z.o) = expanding
# x.n - x.n - z.s + z.o = cancelling
# - z.s + z.o = by #2
# z.d
#
# So diff = z.d.
#
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
# spelling we wanted in the endcase described above. We're done. Contrarily,
# if z.d = 0, then we have a UTC equivalent, and are also done.
#
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
# add to z (in effect, z is in tz's standard time, and we need to shift the
# local clock into tz's daylight time).
#
# Let
#
# z' = z + z.d = z + diff [7]
#
# and we can again ask whether
#
# z'.n - z'.o = x.n [8]
#
# If so, we're done. If not, the tzinfo class is insane, according to the
# assumptions we've made. This also requires a bit of proof. As before, let's
# compute the difference between the LHS and RHS of [8] (and skipping some of
# the justifications for the kinds of substitutions we've done several times
# already):
#
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
# - z.n + z.n - z.o + z'.o = cancel z.n
# - z.o + z'.o = #1 twice
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
# z'.d - z.d
#
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
# return z', not bothering to compute z'.d.
#
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
# would have to change the result dst() returns: we start in DST, and moving
# a little further into it takes us out of DST.
#
# There isn't a sane case where this can happen. The closest it gets is at
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
# standard time. Since that's what the local clock *does*, we want to map both
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
# in local time, but so it goes -- it's the way the local clock works.
#
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
# (correctly) concludes that z' is not UTC-equivalent to x.
#
# Because we know z.d said z was in daylight time (else [5] would have held and
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
# and we have stopped then), and there are only 2 possible values dst() can
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
# but the reasoning doesn't depend on the example -- it depends on there being
# two possible dst() outcomes, one zero and the other non-zero). Therefore
# z' must be in standard time, and is the spelling we want in this case.
#
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
# concerned (because it takes z' as being in standard time rather than the
# daylight time we intend here), but returning it gives the real-life "local
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
# tz.
#
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
# the 1:MM standard time spelling we want.
#
# So how can this break? One of the assumptions must be violated. Two
# possibilities:
#
# 1) [2] effectively says that y.s is invariant across all y belong to a given
# time zone. This isn't true if, for political reasons or continental drift,
# a region decides to change its base offset from UTC.
#
# 2) There may be versions of "double daylight" time where the tail end of
# the analysis gives up a step too early. I haven't thought about that
# enough to say.
#
# In any case, it's clear that the default fromutc() is strong enough to handle
# "almost all" time zones: so long as the standard offset is invariant, it
# doesn't matter if daylight time transition points change from year to year, or
# if daylight time is skipped in some years; it doesn't matter how large or
# small dst() may get within its bounds; and it doesn't even matter if some
# perverse time zone returns a negative dst()). So a breaking case must be
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
try:
from _datetime import *
except ImportError:
pass
else:
# Clean up unused names
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
_check_date_fields, _check_int_field, _check_time_fields,
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
_date_class, _days_before_month, _days_before_year, _days_in_month,
_format_time, _is_leap, _isoweek1monday, _math, _ord2ymd,
_time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord,
_divide_and_round)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
# remove the following line.
try:
from _datetime import __doc__
except ImportError:
pass
| 82,084 | 2,326 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/filecmp.py | """Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=True) -> int
cmpfiles(a, b, common) -> ([], [], [])
clear_cache()
"""
import os
import stat
from itertools import filterfalse
__all__ = ['clear_cache', 'cmp', 'dircmp', 'cmpfiles', 'DEFAULT_IGNORES']
_cache = {}
BUFSIZE = 8*1024
DEFAULT_IGNORES = [
'RCS', 'CVS', 'tags', '.git', '.hg', '.bzr', '_darcs', '__pycache__']
def clear_cache():
"""Clear the filecmp cache."""
_cache.clear()
def cmp(f1, f2, shallow=True):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to True.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with cache entries invalidated if their stat information
changes. The cache may be cleared by calling clear_cache().
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
outcome = _cache.get((f1, f2, s1, s2))
if outcome is None:
outcome = _do_cmp(f1, f2)
if len(_cache) > 100: # limit the maximum size of the cache
clear_cache()
_cache[f1, f2, s1, s2] = outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a, b, ignore=None, hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to DEFAULT_IGNORES.
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = DEFAULT_IGNORES
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(zip(map(os.path.normcase, self.left_list), self.left_list))
b = dict(zip(map(os.path.normcase, self.right_list), self.right_list))
self.common = list(map(a.__getitem__, filter(b.__contains__, a)))
self.left_only = list(map(a.__getitem__, filterfalse(b.__contains__, a)))
self.right_only = list(map(b.__getitem__, filterfalse(a.__contains__, b)))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except OSError as why:
# print('Can\'t stat', a_path, ':', why.args[1])
ok = 0
try:
b_stat = os.stat(b_path)
except OSError as why:
# print('Can\'t stat', b_path, ':', why.args[1])
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.values():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print('diff', self.left, self.right)
if self.left_only:
self.left_only.sort()
print('Only in', self.left, ':', self.left_only)
if self.right_only:
self.right_only.sort()
print('Only in', self.right, ':', self.right_only)
if self.same_files:
self.same_files.sort()
print('Identical files :', self.same_files)
if self.diff_files:
self.diff_files.sort()
print('Differing files :', self.diff_files)
if self.funny_files:
self.funny_files.sort()
print('Trouble with common files :', self.funny_files)
if self.common_dirs:
self.common_dirs.sort()
print('Common subdirectories :', self.common_dirs)
if self.common_funny:
self.common_funny.sort()
print('Common funny cases :', self.common_funny)
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.values():
print()
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.values():
print()
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError(attr)
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=True):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except OSError:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(filterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
| 9,830 | 306 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_bootlocale.py | def getpreferredencoding(do_setlocale=True):
return 'UTF-8'
| 64 | 3 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/compileall.py | """Module/script to byte-compile all .py files to .pyc files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import importlib.util
import py_compile
import struct
from functools import partial
__all__ = ["compile_dir","compile_file","compile_path"]
def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0):
if quiet < 2 and isinstance(dir, os.PathLike):
dir = os.fspath(dir)
if not quiet:
print('Listing {!r}...'.format(dir))
try:
names = os.listdir(dir)
except OSError:
if quiet < 2:
print("Can't list {!r}".format(dir))
names = []
names.sort()
for name in names:
if name == '__pycache__':
continue
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
yield fullname
elif (maxlevels > 0 and name != os.curdir and name != os.pardir and
os.path.isdir(fullname) and not os.path.islink(fullname)):
yield from _walk_dir(fullname, ddir=dfile,
maxlevels=maxlevels - 1, quiet=quiet)
def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None,
quiet=0, legacy=False, optimize=-1, workers=1):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: full output with False or 0, errors only with 1,
no output with 2
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
workers: maximum number of parallel workers
"""
ProcessPoolExecutor = None
if workers is not None:
if workers < 0:
raise ValueError('workers must be greater or equal to 0')
elif workers != 1:
try:
# Only import when needed, as low resource platforms may
# fail to import it
from concurrent.futures import ProcessPoolExecutor
except ImportError:
workers = 1
files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels,
ddir=ddir)
success = True
if workers is not None and workers != 1 and ProcessPoolExecutor is not None:
workers = workers or None
with ProcessPoolExecutor(max_workers=workers) as executor:
results = executor.map(partial(compile_file,
ddir=ddir, force=force,
rx=rx, quiet=quiet,
legacy=legacy,
optimize=optimize),
files)
success = min(results, default=True)
else:
for file in files:
if not compile_file(file, ddir, force, rx, quiet,
legacy, optimize):
success = False
return success
def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0,
legacy=False, optimize=-1):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if True, force compilation, even if timestamps are up-to-date
quiet: full output with False or 0, errors only with 1,
no output with 2
legacy: if True, produce legacy pyc paths instead of PEP 3147 paths
optimize: optimization level or -1 for level of the interpreter
"""
success = True
if quiet < 2 and isinstance(fullname, os.PathLike):
fullname = os.fspath(fullname)
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
if legacy:
cfile = fullname + 'c'
else:
if optimize >= 0:
opt = optimize if optimize >= 1 else ''
cfile = importlib.util.cache_from_source(
fullname, optimization=opt)
else:
cfile = importlib.util.cache_from_source(fullname)
cache_dir = os.path.dirname(cfile)
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', importlib.util.MAGIC_NUMBER,
mtime)
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except OSError:
pass
if not quiet:
print('Compiling {!r}...'.format(fullname))
try:
ok = py_compile.compile(fullname, cfile, dfile, True,
optimize=optimize)
except py_compile.PyCompileError as err:
success = False
if quiet >= 2:
return success
elif quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
# escape non-printable characters in msg
msg = err.msg.encode(sys.stdout.encoding,
errors='backslashreplace')
msg = msg.decode(sys.stdout.encoding)
print(msg)
except (SyntaxError, UnicodeError, OSError) as e:
success = False
if quiet >= 2:
return success
elif quiet:
print('*** Error compiling {!r}...'.format(fullname))
else:
print('*** ', end='')
print(e.__class__.__name__ + ':', e)
else:
if ok == 0:
success = False
return success
def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0,
legacy=False, optimize=-1):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default True)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default False)
quiet: as for compile_dir() (default 0)
legacy: as for compile_dir() (default False)
optimize: as for compile_dir() (default -1)
"""
success = True
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
if quiet < 2:
print('Skipping current directory')
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet,
legacy=legacy, optimize=optimize)
return success
def main():
"""Script main program."""
import argparse
parser = argparse.ArgumentParser(
description='Utilities to support installing Python libraries.')
parser.add_argument('-l', action='store_const', const=0,
default=10, dest='maxlevels',
help="don't recurse into subdirectories")
parser.add_argument('-r', type=int, dest='recursion',
help=('control the maximum recursion level. '
'if `-l` and `-r` options are specified, '
'then `-r` takes precedence.'))
parser.add_argument('-f', action='store_true', dest='force',
help='force rebuild even if timestamps are up to date')
parser.add_argument('-q', action='count', dest='quiet', default=0,
help='output only error messages; -qq will suppress '
'the error messages as well.')
parser.add_argument('-b', action='store_true', dest='legacy',
help='use legacy (pre-PEP3147) compiled file locations')
parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None,
help=('directory to prepend to file paths for use in '
'compile-time tracebacks and in runtime '
'tracebacks in cases where the source file is '
'unavailable'))
parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None,
help=('skip files matching the regular expression; '
'the regexp is searched for in the full path '
'of each file considered for compilation'))
parser.add_argument('-i', metavar='FILE', dest='flist',
help=('add all the files and directories listed in '
'FILE to the list considered for compilation; '
'if "-", names are read from stdin'))
parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*',
help=('zero or more file and directory names '
'to compile; if no arguments given, defaults '
'to the equivalent of -l sys.path'))
parser.add_argument('-j', '--workers', default=1,
type=int, help='Run compileall concurrently')
args = parser.parse_args()
compile_dests = args.compile_dest
if args.rx:
import re
args.rx = re.compile(args.rx)
if args.recursion is not None:
maxlevels = args.recursion
else:
maxlevels = args.maxlevels
# if flist is provided then load it
if args.flist:
try:
with (sys.stdin if args.flist=='-' else open(args.flist)) as f:
for line in f:
compile_dests.append(line.strip())
except OSError:
if args.quiet < 2:
print("Error reading file list {}".format(args.flist))
return False
if args.workers is not None:
args.workers = args.workers or None
success = True
try:
if compile_dests:
for dest in compile_dests:
if os.path.isfile(dest):
if not compile_file(dest, args.ddir, args.force, args.rx,
args.quiet, args.legacy):
success = False
else:
if not compile_dir(dest, maxlevels, args.ddir,
args.force, args.rx, args.quiet,
args.legacy, workers=args.workers):
success = False
return success
else:
return compile_path(legacy=args.legacy, force=args.force,
quiet=args.quiet)
except KeyboardInterrupt:
if args.quiet < 2:
print("\n[interrupted]")
return False
return True
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| 12,125 | 300 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_sysconfigdata_m_cosmo_x86_64_cosmo.py | # system configuration generated and used by the sysconfig module
build_time_vars = {'ABIFLAGS': 'm',
'AC_APPLE_UNIVERSAL_BUILD': 0,
'AIX_GENUINE_CPLUSPLUS': 0,
'ANDROID_API_LEVEL': 0,
'AR': 'ar',
'ARFLAGS': 'rcs',
'BASECFLAGS': '-Wno-unused-result -Wsign-compare',
'BASECPPFLAGS': '',
'BASEMODLIBS': '',
'BINDIR': '/bin',
'BINLIBDEST': '/lib/python3.6',
'BLDLIBRARY': 'libpython3.6m.a',
'BLDSHARED': 'gcc -shared -static -nostdlib -nostdinc -fno-pie '
'-mno-red-zone -include '
'/cpython36/libcosmo/cosmopolitan.h',
'BUILDEXE': '.com.dbg',
'BUILDPYTHON': 'python.com.dbg',
'BUILD_GNU_TYPE': 'x86_64-pc-cosmo-gnu',
'BYTESTR_DEPS': '\\',
'CC': 'gcc',
'CCSHARED': '-fPIC',
'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -Os -Wall '
'-Wno-strict-prototypes -Wno-unused-value -std=c99 '
'-static -fno-pie -fno-omit-frame-pointer '
'-ffunction-sections -fdata-sections -mno-red-zone -pg '
'-nostdinc -nostdlib '
'-I/cpython36/header_stubs -include '
'/cpython36/libcosmo/cosmopolitan.h',
'CFLAGSFORSHARED': '',
'CFLAGS_ALIASING': '',
'CFLAGS_NODIST': '',
'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in '
'Makefile.pre.in',
'CONFIGURE_CFLAGS': '-Os -Wall -Wno-strict-prototypes -Wno-unused-value '
'-std=c99 -static -fno-pie '
'-fno-omit-frame-pointer -ffunction-sections '
'-fdata-sections -mno-red-zone -pg -nostdinc '
'-nostdlib '
'-I/cpython36/header_stubs '
'-include '
'/cpython36/libcosmo/cosmopolitan.h',
'CONFIGURE_CFLAGS_NODIST': '-Os -Wall -Wno-strict-prototypes '
'-Wno-unused-value -std=c99 -static '
'-fno-pie -fno-omit-frame-pointer '
'-mno-red-zone -pg -nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'-std=c99 -Wextra -Wno-unused-result '
'-Wno-unused-parameter '
'-Wno-missing-field-initializers '
'-Wno-cast-function-type',
'CONFIGURE_CPPFLAGS': '',
'CONFIGURE_LDFLAGS': '-static -nostdlib -nostdinc -fno-pie '
'-mno-red-zone -include '
'/cpython36/libcosmo/cosmopolitan.h',
'CONFIGURE_LDFLAGS_NODIST': '',
'CONFIG_ARGS': "'--prefix=' '--disable-shared' '--disable-profiling' "
"'--disable-ipv6' '--disable-optimizations' "
"'--without-pydebug' '--without-threads' '--without-lto' "
"'--with-pymalloc' '--without-ensurepip' "
"'--with-tcltk-includes=' '--with-tcltk-libs=' '--with-libm=' "
"'OPT=' 'CCSHARED=' 'LINKFORSHARED=' 'PKG_CONFIG=' 'CFLAGS=-Os "
'-Wall -Wno-strict-prototypes -Wno-unused-value -std=c99 '
'-static -fno-pie -fno-omit-frame-pointer '
'-ffunction-sections -fdata-sections -mno-red-zone '
'-pg -nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
"/cpython36/libcosmo/cosmopolitan.h' "
"'LDFLAGS=-static -nostdlib -nostdinc -fno-pie "
'-mno-red-zone -include '
"/cpython36/libcosmo/cosmopolitan.h' "
"'LIBS= -Wl,--gc-sections -fuse-ld=bfd "
'-Wl,-T,/cpython36/libcosmo/ape.lds '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'/cpython36/libcosmo/crt.o '
'/cpython36/libcosmo/ape.o '
"/cpython36/libcosmo/cosmopolitan.a' "
"'CFLAGS_NODIST=-Os -Wall -Wno-strict-prototypes "
'-Wno-unused-value -std=c99 -static -fno-pie '
'-fno-omit-frame-pointer -mno-red-zone -pg '
'-nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
"/cpython36/libcosmo/cosmopolitan.h'",
'CONFINCLUDEDIR': '/include',
'CONFINCLUDEPY': '/include/python3.6m',
'COREPYTHONPATH': ':',
'COVERAGE_INFO': '/cpython36/coverage.info',
'COVERAGE_REPORT': '/cpython36/lcov-report',
'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov '
'report"',
'CPPFLAGS': '-I. -I./Include',
'CXX': 'g++',
'DECIMAL_USE_ASM': "'ASM=1'",
'DECIMAL_USE_X64': "'CONFIG_64=1'",
'DESTDIRS': '/lib /lib/python3.6 /lib/python3.6/lib-dynload',
'DESTLIB': '/lib/python3.6',
'DESTPATH': '',
'DESTSHARED': '/lib/python3.6/lib-dynload',
'DFLAGS': '',
'DIRMODE': 755,
'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in '
'Makefile.pre.in Include Lib Misc Ext-dummy',
'DISTDIRS': 'Include Lib Misc Ext-dummy',
'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h '
'pyconfig.h.in Makefile.pre.in',
'DLINCLDIR': '.',
'DLLLIBRARY': '',
'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0,
'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0,
'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1,
'DTRACE': '',
'DTRACE_DEPS': '\\',
'DTRACE_HEADERS': '',
'DTRACE_OBJS': '',
'DYNLOADFILE': 'dynload_shlib.o',
'ENABLE_IPV6': 0,
'ENSUREPIP': 'no',
'EXE': '.com',
'EXEMODE': 755,
'EXTRAMACHDEPPATH': '',
'EXTRATESTOPTS': '',
'EXTRA_CFLAGS': '',
'EXT_SUFFIX': '.cpython-36m-x86_64-cosmo-gnu.so',
'FILEMODE': 644,
'FLOCK_NEEDS_LIBBSD': 0,
'GETPGRP_HAVE_ARG': 0,
'GETTIMEOFDAY_NO_TZ': 0,
'GITBRANCH': 'git --git-dir ./.git name-rev --name-only HEAD',
'GITTAG': 'git --git-dir ./.git describe --all --always --dirty',
'GITVERSION': 'git --git-dir ./.git rev-parse --short HEAD',
'GNULD': 'yes',
'HAVE_ACCEPT4': 1,
'HAVE_ACOSH': 1,
'HAVE_ADDRINFO': 1,
'HAVE_ALARM': 1,
'HAVE_ALIGNED_REQUIRED': 0,
'HAVE_ALTZONE': 0,
'HAVE_ASINH': 1,
'HAVE_ATANH': 1,
'HAVE_BIND_TEXTDOMAIN_CODESET': 0,
'HAVE_BROKEN_MBSTOWCS': 0,
'HAVE_BROKEN_NICE': 0,
'HAVE_BROKEN_PIPE_BUF': 0,
'HAVE_BROKEN_POLL': 0,
'HAVE_BROKEN_POSIX_SEMAPHORES': 0,
'HAVE_BROKEN_PTHREAD_SIGMASK': 0,
'HAVE_BROKEN_SEM_GETVALUE': 1,
'HAVE_BROKEN_UNSETENV': 0,
'HAVE_BUILTIN_ATOMIC': 1,
'HAVE_CHFLAGS': 0,
'HAVE_CHOWN': 1,
'HAVE_CHROOT': 1,
'HAVE_CLOCK': 1,
'HAVE_CLOCK_GETRES': 1,
'HAVE_CLOCK_GETTIME': 1,
'HAVE_CLOCK_SETTIME': 1,
'HAVE_COMPUTED_GOTOS': 1,
'HAVE_CONFSTR': 0,
'HAVE_COPYSIGN': 1,
'HAVE_COSMO_CAN_RAW_FD_FRAMES': 0,
'HAVE_CTERMID': 0,
'HAVE_CTERMID_R': 0,
'HAVE_CURSES_FILTER': 1,
'HAVE_CURSES_HAS_KEY': 1,
'HAVE_CURSES_IMMEDOK': 1,
'HAVE_CURSES_IS_PAD': 1,
'HAVE_CURSES_IS_TERM_RESIZED': 1,
'HAVE_CURSES_RESIZETERM': 1,
'HAVE_CURSES_RESIZE_TERM': 1,
'HAVE_CURSES_SYNCOK': 1,
'HAVE_CURSES_TYPEAHEAD': 1,
'HAVE_CURSES_USE_ENV': 1,
'HAVE_CURSES_WCHGAT': 1,
'HAVE_DECL_ISFINITE': 1,
'HAVE_DECL_ISINF': 1,
'HAVE_DECL_ISNAN': 1,
'HAVE_DECL_RTLD_DEEPBIND': 0,
'HAVE_DECL_RTLD_GLOBAL': 1,
'HAVE_DECL_RTLD_LAZY': 1,
'HAVE_DECL_RTLD_LOCAL': 1,
'HAVE_DECL_RTLD_NODELETE': 0,
'HAVE_DECL_RTLD_NOLOAD': 0,
'HAVE_DECL_RTLD_NOW': 1,
'HAVE_DECL_TZNAME': 0,
'HAVE_DEVICE_MACROS': 0,
'HAVE_DEV_PTC': 0,
'HAVE_DEV_PTMX': 1,
'HAVE_DIRENT_D_TYPE': 1,
'HAVE_DIRFD': 1,
'HAVE_DLOPEN': 1,
'HAVE_DUP2': 1,
'HAVE_DUP3': 1,
'HAVE_DYNAMIC_LOADING': 1,
'HAVE_EPOLL': 0,
'HAVE_EPOLL_CREATE1': 0,
'HAVE_ERF': 1,
'HAVE_ERFC': 1,
'HAVE_EXECV': 1,
'HAVE_EXPM1': 1,
'HAVE_FACCESSAT': 1,
'HAVE_FCHDIR': 1,
'HAVE_FCHMOD': 1,
'HAVE_FCHMODAT': 1,
'HAVE_FCHOWN': 1,
'HAVE_FCHOWNAT': 1,
'HAVE_FDATASYNC': 1,
'HAVE_FDOPENDIR': 1,
'HAVE_FEXECVE': 0,
'HAVE_FINITE': 1,
'HAVE_FLOCK': 1,
'HAVE_FORK': 1,
'HAVE_FORKPTY': 1,
'HAVE_FPATHCONF': 1,
'HAVE_FSEEK64': 0,
'HAVE_FSEEKO': 1,
'HAVE_FSTATAT': 1,
'HAVE_FSTATVFS': 0,
'HAVE_FSYNC': 1,
'HAVE_FTELL64': 0,
'HAVE_FTELLO': 1,
'HAVE_FTIME': 0,
'HAVE_FTRUNCATE': 1,
'HAVE_FUTIMENS': 1,
'HAVE_FUTIMES': 1,
'HAVE_FUTIMESAT': 1,
'HAVE_GAI_STRERROR': 1,
'HAVE_GAMMA': 0,
'HAVE_GCC_ASM_FOR_MC68881': 0,
'HAVE_GCC_ASM_FOR_X64': 1,
'HAVE_GCC_ASM_FOR_X87': 1,
'HAVE_GCC_UINT128_T': 1,
'HAVE_GETADDRINFO': 0,
'HAVE_GETC_UNLOCKED': 1,
'HAVE_GETENTROPY': 1,
'HAVE_GETGROUPLIST': 1,
'HAVE_GETGROUPS': 1,
'HAVE_GETHOSTBYNAME': 1,
'HAVE_GETHOSTBYNAME_R': 0,
'HAVE_GETHOSTBYNAME_R_3_ARG': 0,
'HAVE_GETHOSTBYNAME_R_5_ARG': 0,
'HAVE_GETHOSTBYNAME_R_6_ARG': 0,
'HAVE_GETITIMER': 1,
'HAVE_GETLOADAVG': 0,
'HAVE_GETLOGIN': 1,
'HAVE_GETNAMEINFO': 1,
'HAVE_GETPAGESIZE': 1,
'HAVE_GETPEERNAME': 1,
'HAVE_GETPGID': 1,
'HAVE_GETPGRP': 1,
'HAVE_GETPID': 1,
'HAVE_GETPRIORITY': 1,
'HAVE_GETPWENT': 1,
'HAVE_GETRANDOM': 1,
'HAVE_GETRANDOM_SYSCALL': 0,
'HAVE_GETRESGID': 1,
'HAVE_GETRESUID': 1,
'HAVE_GETSID': 1,
'HAVE_GETSPENT': 0,
'HAVE_GETSPNAM': 0,
'HAVE_GETTIMEOFDAY': 1,
'HAVE_GETWD': 0,
'HAVE_HSTRERROR': 0,
'HAVE_HTOLE64': 1,
'HAVE_HYPOT': 1,
'HAVE_IF_NAMEINDEX': 0,
'HAVE_INET_ATON': 1,
'HAVE_INET_PTON': 1,
'HAVE_INITGROUPS': 1,
'HAVE_IPA_PURE_CONST_BUG': 1,
'HAVE_KILL': 1,
'HAVE_KILLPG': 1,
'HAVE_KQUEUE': 0,
'HAVE_LARGEFILE_SUPPORT': 0,
'HAVE_LCHFLAGS': 0,
'HAVE_LCHMOD': 0,
'HAVE_LCHOWN': 1,
'HAVE_LGAMMA': 1,
'HAVE_LIBDL': 0,
'HAVE_LIBDLD': 0,
'HAVE_LIBIEEE': 0,
'HAVE_LIBREADLINE': 0,
'HAVE_LIBRESOLV': 0,
'HAVE_LIBSENDFILE': 0,
'HAVE_LINK': 1,
'HAVE_LINKAT': 1,
'HAVE_LOCKF': 0,
'HAVE_LOG1P': 1,
'HAVE_LOG2': 1,
'HAVE_LONG_DOUBLE': 1,
'HAVE_LSTAT': 1,
'HAVE_LUTIMES': 0,
'HAVE_MAKEDEV': 1,
'HAVE_MBRTOWC': 1,
'HAVE_MEMMOVE': 1,
'HAVE_MEMRCHR': 1,
'HAVE_MKDIRAT': 1,
'HAVE_MKFIFO': 1,
'HAVE_MKFIFOAT': 1,
'HAVE_MKNOD': 1,
'HAVE_MKNODAT': 1,
'HAVE_MKTIME': 1,
'HAVE_MMAP': 1,
'HAVE_MREMAP': 1,
'HAVE_NICE': 1,
'HAVE_OPENAT': 1,
'HAVE_OPENPTY': 1,
'HAVE_PATHCONF': 1,
'HAVE_PAUSE': 1,
'HAVE_PIPE2': 1,
'HAVE_PLOCK': 0,
'HAVE_POLL': 1,
'HAVE_POSIX_FADVISE': 1,
'HAVE_POSIX_FALLOCATE': 0,
'HAVE_PREAD': 1,
'HAVE_PRLIMIT': 0,
'HAVE_PROTOTYPES': 1,
'HAVE_PTHREAD_ATFORK': 1,
'HAVE_PTHREAD_DESTRUCTOR': 1,
'HAVE_PTHREAD_KILL': 1,
'HAVE_PTHREAD_SIGMASK': 1,
'HAVE_PUTENV': 1,
'HAVE_PWRITE': 1,
'HAVE_READLINK': 1,
'HAVE_READLINKAT': 1,
'HAVE_READV': 1,
'HAVE_REALPATH': 1,
'HAVE_RENAMEAT': 1,
'HAVE_RL_APPEND_HISTORY': 0,
'HAVE_RL_CALLBACK': 0,
'HAVE_RL_CATCH_SIGNAL': 1,
'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1,
'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1,
'HAVE_RL_COMPLETION_MATCHES': 0,
'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1,
'HAVE_RL_PRE_INPUT_HOOK': 1,
'HAVE_RL_RESIZE_TERMINAL': 0,
'HAVE_ROUND': 1,
'HAVE_SCHED_GET_PRIORITY_MAX': 0,
'HAVE_SCHED_RR_GET_INTERVAL': 0,
'HAVE_SCHED_SETAFFINITY': 1,
'HAVE_SCHED_SETPARAM': 1,
'HAVE_SCHED_SETSCHEDULER': 1,
'HAVE_SELECT': 1,
'HAVE_SEM_GETVALUE': 0,
'HAVE_SEM_OPEN': 0,
'HAVE_SEM_TIMEDWAIT': 0,
'HAVE_SEM_UNLINK': 0,
'HAVE_SENDFILE': 1,
'HAVE_SETEGID': 1,
'HAVE_SETEUID': 1,
'HAVE_SETGID': 1,
'HAVE_SETGROUPS': 1,
'HAVE_SETHOSTNAME': 1,
'HAVE_SETITIMER': 1,
'HAVE_SETLOCALE': 1,
'HAVE_SETPGID': 1,
'HAVE_SETPGRP': 0,
'HAVE_SETPRIORITY': 1,
'HAVE_SETREGID': 1,
'HAVE_SETRESGID': 1,
'HAVE_SETRESUID': 1,
'HAVE_SETREUID': 1,
'HAVE_SETSID': 1,
'HAVE_SETUID': 1,
'HAVE_SETVBUF': 1,
'HAVE_SIGACTION': 1,
'HAVE_SIGALTSTACK': 0,
'HAVE_SIGINTERRUPT': 1,
'HAVE_SIGPENDING': 0,
'HAVE_SIGRELSE': 0,
'HAVE_SIGTIMEDWAIT': 0,
'HAVE_SIGWAIT': 0,
'HAVE_SIGWAITINFO': 0,
'HAVE_SNPRINTF': 1,
'HAVE_SOCKADDR_ALG': 0,
'HAVE_SOCKADDR_SA_LEN': 0,
'HAVE_SOCKADDR_STORAGE': 1,
'HAVE_SOCKETPAIR': 1,
'HAVE_SSIZE_T': 1,
'HAVE_STATVFS': 0,
'HAVE_STAT_TV_NSEC': 1,
'HAVE_STAT_TV_NSEC2': 0,
'HAVE_STDARG_PROTOTYPES': 1,
'HAVE_STD_ATOMIC': 0,
'HAVE_STRDUP': 1,
'HAVE_STRFTIME': 1,
'HAVE_STRLCPY': 1,
'HAVE_STRUCT_PASSWD_PW_GECOS': 1,
'HAVE_STRUCT_PASSWD_PW_PASSWD': 1,
'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0,
'HAVE_STRUCT_STAT_ST_BLKSIZE': 1,
'HAVE_STRUCT_STAT_ST_BLOCKS': 1,
'HAVE_STRUCT_STAT_ST_FLAGS': 0,
'HAVE_STRUCT_STAT_ST_GEN': 0,
'HAVE_STRUCT_STAT_ST_RDEV': 1,
'HAVE_STRUCT_TM_TM_ZONE': 1,
'HAVE_SYMLINK': 1,
'HAVE_SYMLINKAT': 1,
'HAVE_SYNC': 1,
'HAVE_SYSCONF': 1,
'HAVE_TCGETPGRP': 1,
'HAVE_TCSETPGRP': 1,
'HAVE_TEMPNAM': 1,
'HAVE_TGAMMA': 1,
'HAVE_TIMEGM': 1,
'HAVE_TIMES': 1,
'HAVE_TMPFILE': 1,
'HAVE_TMPNAM': 0,
'HAVE_TMPNAM_R': 0,
'HAVE_TM_ZONE': 1,
'HAVE_TRUNCATE': 1,
'HAVE_TZNAME': 0,
'HAVE_UCS4_TCL': 0,
'HAVE_UNAME': 1,
'HAVE_UNLINKAT': 1,
'HAVE_UNSETENV': 1,
'HAVE_USABLE_WCHAR_T': 1,
'HAVE_UTIMENSAT': 1,
'HAVE_UTIMES': 1,
'HAVE_WAIT': 1,
'HAVE_WAIT3': 1,
'HAVE_WAIT4': 1,
'HAVE_WAITID': 0,
'HAVE_WAITPID': 1,
'HAVE_WCSCOLL': 0,
'HAVE_WCSFTIME': 0,
'HAVE_WCSXFRM': 0,
'HAVE_WMEMCMP': 1,
'HAVE_WORKING_TZSET': 1,
'HAVE_WRITEV': 1,
'HAVE_ZLIB_COPY': 1,
'HAVE__GETPTY': 0,
'HAVE_ALLOCA_H': 1,
'HAVE_ASM_TYPES_H': 0,
'HAVE_BLUETOOTH_BLUETOOTH_H': 0,
'HAVE_BLUETOOTH_H': 0,
'HAVE_CONIO_H': 0,
'HAVE_COSMO_CAN_BCM_H': 0,
'HAVE_COSMO_CAN_H': 0,
'HAVE_COSMO_CAN_RAW_H': 0,
'HAVE_COSMO_NETLINK_H': 0,
'HAVE_COSMO_RANDOM_H': 0,
'HAVE_COSMO_TIPC_H': 0,
'HAVE_CRYPT_H': 0,
'HAVE_CURSES_H': 1,
'HAVE_DIRECT_H': 0,
'HAVE_DIRENT_H': 1,
'HAVE_DLFCN_H': 1,
'HAVE_ENDIAN_H': 1,
'HAVE_ERRNO_H': 1,
'HAVE_FCNTL_H': 1,
'HAVE_GRP_H': 1,
'HAVE_IEEEFP_H': 0,
'HAVE_INTTYPES_H': 1,
'HAVE_IO_H': 0,
'HAVE_LANGINFO_H': 0,
'HAVE_LIBINTL_H': 0,
'HAVE_LIBUTIL_H': 0,
'HAVE_MEMORY_H': 1,
'HAVE_NCURSES_H': 1,
'HAVE_NDIR_H': 0,
'HAVE_NETPACKET_PACKET_H': 0,
'HAVE_NET_IF_H': 0,
'HAVE_PROCESS_H': 0,
'HAVE_PTHREAD_H': 1,
'HAVE_PTY_H': 1,
'HAVE_SCHED_H': 1,
'HAVE_SHADOW_H': 0,
'HAVE_SIGNAL_H': 1,
'HAVE_SPAWN_H': 1,
'HAVE_STDINT_H': 1,
'HAVE_STDLIB_H': 1,
'HAVE_STRINGS_H': 1,
'HAVE_STRING_H': 1,
'HAVE_STROPTS_H': 0,
'HAVE_SYSEXITS_H': 1,
'HAVE_SYS_AUDIOIO_H': 0,
'HAVE_SYS_BSDTTY_H': 0,
'HAVE_SYS_DEVPOLL_H': 0,
'HAVE_SYS_DIR_H': 1,
'HAVE_SYS_ENDIAN_H': 0,
'HAVE_SYS_EPOLL_H': 1,
'HAVE_SYS_EVENT_H': 0,
'HAVE_SYS_FILE_H': 1,
'HAVE_SYS_IOCTL_H': 1,
'HAVE_SYS_KERN_CONTROL_H': 0,
'HAVE_SYS_LOADAVG_H': 0,
'HAVE_SYS_LOCK_H': 0,
'HAVE_SYS_MKDEV_H': 0,
'HAVE_SYS_MODEM_H': 0,
'HAVE_SYS_NDIR_H': 0,
'HAVE_SYS_PARAM_H': 1,
'HAVE_SYS_POLL_H': 1,
'HAVE_SYS_RANDOM_H': 1,
'HAVE_SYS_RESOURCE_H': 1,
'HAVE_SYS_SELECT_H': 1,
'HAVE_SYS_SENDFILE_H': 1,
'HAVE_SYS_SOCKET_H': 1,
'HAVE_SYS_STATVFS_H': 1,
'HAVE_SYS_STAT_H': 1,
'HAVE_SYS_SYSCALL_H': 1,
'HAVE_SYS_SYSMACROS_H': 1,
'HAVE_SYS_SYS_DOMAIN_H': 0,
'HAVE_SYS_TERMIO_H': 0,
'HAVE_SYS_TIMES_H': 1,
'HAVE_SYS_TIME_H': 1,
'HAVE_SYS_TYPES_H': 1,
'HAVE_SYS_UIO_H': 0,
'HAVE_SYS_UN_H': 1,
'HAVE_SYS_UTSNAME_H': 1,
'HAVE_SYS_WAIT_H': 1,
'HAVE_SYS_XATTR_H': 1,
'HAVE_TERMIOS_H': 1,
'HAVE_TERM_H': 1,
'HAVE_UNISTD_H': 1,
'HAVE_UTIL_H': 0,
'HAVE_UTIME_H': 1,
'HAVE_WCHAR_H': 1,
'HOST_GNU_TYPE': 'x86_64-pc-cosmo-gnu',
'INCLDIRSTOMAKE': '/include /include /include/python3.6m /include/python3.6m',
'INCLUDEDIR': '/include',
'INCLUDEPY': '/include/python3.6m',
'INSTALL': '/usr/bin/install -c',
'INSTALL_DATA': '/usr/bin/install -c -m 644',
'INSTALL_PROGRAM': '/usr/bin/install -c',
'INSTALL_SCRIPT': '/usr/bin/install -c',
'INSTALL_SHARED': '/usr/bin/install -c -m 555',
'INSTSONAME': 'libpython3.6m.a',
'IO_OBJS': '\\',
'LDCXXSHARED': 'g++ -shared',
'LDFLAGS': '-static -nostdlib -nostdinc -fno-pie -mno-red-zone '
'-include '
'/cpython36/libcosmo/cosmopolitan.h',
'LDFLAGS_NODIST': '',
'LDLAST': '',
'LDLIBRARY': 'libpython3.6m.a',
'LDLIBRARYDIR': '',
'LDSHARED': 'gcc -shared -static -nostdlib -nostdinc -fno-pie '
'-mno-red-zone -include '
'/cpython36/libcosmo/cosmopolitan.h',
'LDVERSION': '3.6m',
'LIBC': '',
'LIBDEST': '/lib/python3.6',
'LIBDIR': '/lib',
'LIBFFI_INCLUDEDIR': '',
'LIBM': '',
'LIBOBJDIR': 'Python/',
'LIBOBJS': '',
'LIBPC': '/lib/pkgconfig',
'LIBPL': '/lib/python3.6/config-3.6m-x86_64-cosmo-gnu',
'LIBRARY': 'libpython3.6m.a',
'LIBRARY_OBJS': '\\',
'LIBRARY_OBJS_OMIT_FROZEN': '\\',
'LIBS': '-Wl,--gc-sections -fuse-ld=bfd '
'-Wl,-T,/cpython36/libcosmo/ape.lds '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'/cpython36/libcosmo/crt.o '
'/cpython36/libcosmo/ape.o '
'/cpython36/libcosmo/cosmopolitan.a',
'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\',
'LINKCC': 'gcc',
'LINKFORSHARED': '',
'LIPO_32BIT_FLAGS': '',
'LLVM_PROF_ERR': 'no',
'LLVM_PROF_FILE': '',
'LLVM_PROF_MERGER': 'true',
'LN': 'ln',
'LOCALMODLIBS': '',
'LOG1P_DROPS_ZERO_SIGN': 0,
'MACHDEP': 'cosmo',
'MACHDEPPATH': ':',
'MACHDEP_OBJS': '',
'MACHDESTLIB': '/lib/python3.6',
'MACOSX_DEPLOYMENT_TARGET': '',
'MAINCC': 'gcc',
'MAJOR_IN_MKDEV': 0,
'MAJOR_IN_SYSMACROS': 0,
'MAKESETUP': './Modules/makesetup',
'MANDIR': '/share/man',
'MKDIR_P': '/usr/bin/mkdir -p',
'MODLIBS': '',
'MODNAMES': '_decimal posix errno pwd _sre '
'_codecs _functools _operator _collections itertools atexit '
'_signal _stat time _locale _io zipimport faulthandler '
'_symtable array cmath math _struct _weakref '
'_testcapi _random _elementtree _pickle _datetime _bisect _heapq '
'unicodedata fcntl grp select mmap _csv _socket resource '
'_posixsubprocess _md5 _sha1 _sha256 _sha512 _sha3 syslog '
'binascii parser fpectl zlib pyexpat _multibytecodec _codecs_cn '
'_codecs_hk _codecs_iso2022 _codecs_jp _codecs_kr _codecs_tw '
'_json _lsprof _opcode',
'MODOBJS': 'Modules/_decimal.o Modules/basearith.o Modules/constants.o '
'Modules/context.o Modules/convolute.o Modules/crt.o '
'Modules/difradix2.o Modules/fnt.o Modules/fourstep.o Modules/io.o '
'Modules/memory.o Modules/mpdecimal.o Modules/numbertheory.o '
'Modules/sixstep.o Modules/transpose.o '
'Modules/posixmodule.o Modules/errnomodule.o '
'Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o '
'Modules/_functoolsmodule.o Modules/_operator.o '
'Modules/_collectionsmodule.o Modules/itertoolsmodule.o '
'Modules/atexitmodule.o Modules/signalmodule.o Modules/_stat.o '
'Modules/timemodule.o Modules/_localemodule.o '
'Modules/_iomodule.o Modules/iobase.o Modules/fileio.o '
'Modules/bytesio.o Modules/bufferedio.o Modules/textio.o '
'Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o '
'Modules/hashtable.o '
'Modules/symtablemodule.o Modules/arraymodule.o '
'Modules/cmathmodule.o Modules/mathmodule.o Modules/_math.o '
'Modules/_struct.o Modules/_weakref.o Modules/_testcapimodule.o '
'Modules/_randommodule.o Modules/_elementtree.o '
'Modules/_pickle.o Modules/_datetimemodule.o '
'Modules/_bisectmodule.o Modules/_heapqmodule.o '
'Modules/unicodedata.o Modules/fcntlmodule.o '
'Modules/grpmodule.o Modules/selectmodule.o '
'Modules/mmapmodule.o Modules/_csv.o Modules/socketmodule.o '
'Modules/resource.o Modules/_posixsubprocess.o '
'Modules/md5module.o Modules/sha1module.o '
'Modules/sha256module.o Modules/sha512module.o '
'Modules/_sha3.o '
'Modules/syslogmodule.o Modules/binascii.o '
'Modules/parsermodule.o Modules/fpectlmodule.o '
'Modules/zlibmodule.o Modules/xmlparse.o Modules/xmlrole.o '
'Modules/xmltok.o Modules/pyexpat.o Modules/multibytecodec.o '
'Modules/_codecs_cn.o Modules/_codecs_hk.o '
'Modules/_codecs_iso2022.o Modules/_codecs_jp.o '
'Modules/_codecs_kr.o Modules/_codecs_tw.o '
'Modules/_json.o Modules/_lsprof.o Modules/rotatingtree.o '
'Modules/_opcode.o',
'MODULE_OBJS': '\\',
'MULTIARCH': 'x86_64-cosmo-gnu',
'MULTIARCH_CPPFLAGS': '-DMULTIARCH=\\"x86_64-cosmo-gnu\\"',
'MVWDELCH_IS_EXPRESSION': 1,
'NO_AS_NEEDED': '-Wl,--no-as-needed',
'OBJECT_OBJS': '\\',
'OPT': '-DNDEBUG',
'OTHER_LIBTOOL_OPT': '',
'PACKAGE_BUGREPORT': 0,
'PACKAGE_NAME': 0,
'PACKAGE_STRING': 0,
'PACKAGE_TARNAME': 0,
'PACKAGE_URL': 0,
'PACKAGE_VERSION': 0,
'PARSER_HEADERS': '\\',
'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o',
'PGEN': 'Parser/pgen.com.dbg',
'PGENOBJS': '\\ \\',
'PGOBJS': '\\',
'PGO_PROF_GEN_FLAG': '-fprofile-generate',
'PGO_PROF_USE_FLAG': '-fprofile-use -fprofile-correction',
'PLATDIR': '',
'POBJS': '\\',
'POSIX_SEMAPHORES_NOT_ENABLED': 1,
'PROFILE_TASK': '-m test.regrtest --pgo',
'PTHREAD_SYSTEM_SCHED_SUPPORTED': 0,
'PURIFY': '',
'PY3LIBRARY': '',
'PYEXPAT_ENTROPY': "'XML_POOR_ENTROPY=1'",
'PYLONG_BITS_IN_DIGIT': 0,
'PYTHON': 'python.com.dbg',
'PYTHONFRAMEWORK': '',
'PYTHONFRAMEWORKDIR': 'no-framework',
'PYTHONFRAMEWORKINSTALLDIR': '',
'PYTHONFRAMEWORKPREFIX': '',
'PYTHONPATH': ':',
'PYTHON_APE': 'python.com',
'PYTHON_FOR_BUILD': './python.com.dbg -E',
'PYTHON_FOR_REGEN': 'python3',
'PYTHON_HEADERS': '\\',
'PYTHON_OBJS': '\\',
'PY_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -Os -Wall '
'-Wno-strict-prototypes -Wno-unused-value -std=c99 '
'-static -fno-pie -fno-omit-frame-pointer '
'-ffunction-sections -fdata-sections -mno-red-zone '
'-pg -nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
'/cpython36/libcosmo/cosmopolitan.h',
'PY_CFLAGS_NODIST': '-Os -Wall -Wno-strict-prototypes -Wno-unused-value '
'-std=c99 -static -fno-pie '
'-fno-omit-frame-pointer -mno-red-zone -pg '
'-nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'-std=c99 -Wextra -Wno-unused-result '
'-Wno-unused-parameter -Wno-missing-field-initializers '
'-Wno-cast-function-type',
'PY_CORE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -Os -Wall '
'-Wno-strict-prototypes -Wno-unused-value -std=c99 '
'-static -fno-pie -fno-omit-frame-pointer '
'-ffunction-sections -fdata-sections -mno-red-zone '
'-pg -nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'-Os -Wall -Wno-strict-prototypes -Wno-unused-value '
'-std=c99 -static -fno-pie '
'-fno-omit-frame-pointer -mno-red-zone -pg '
'-nostdinc -nostdlib '
'-I/cpython36/header_stubs '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'-std=c99 -Wextra -Wno-unused-result -Wno-unused-parameter '
'-Wno-missing-field-initializers -Wno-cast-function-type '
'-I. -I./Include -DPy_BUILD_CORE',
'PY_CORE_LDFLAGS': '-static -nostdlib -nostdinc -fno-pie '
'-mno-red-zone -include '
'/cpython36/libcosmo/cosmopolitan.h',
'PY_CPPFLAGS': '-I. -I./Include',
'PY_FORMAT_SIZE_T': '"z"',
'PY_LDFLAGS': '-static -nostdlib -nostdinc -fno-pie -mno-red-zone '
'-include '
'/cpython36/libcosmo/cosmopolitan.h',
'PY_LDFLAGS_NODIST': '',
'Py_DEBUG': 0,
'Py_ENABLE_SHARED': 0,
'Py_HASH_ALGORITHM': 0,
'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\',
'READELF': 'readelf',
'RESSRCDIR': 'Mac/Resources/framework',
'RETSIGTYPE': 'void',
'RUNSHARED': '',
'SCRIPTDIR': '/lib',
'SETPGRP_HAVE_ARG': 0,
'SGI_ABI': '',
'SHELL': '/bin/sh',
'SHLIBS': '-Wl,--gc-sections -fuse-ld=bfd '
'-Wl,-T,/cpython36/libcosmo/ape.lds '
'-include '
'/cpython36/libcosmo/cosmopolitan.h '
'/cpython36/libcosmo/crt.o '
'/cpython36/libcosmo/ape.o '
'/cpython36/libcosmo/cosmopolitan.a',
'SHLIB_SUFFIX': '.so',
'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0,
'SITEPATH': '',
'SIZEOF_DOUBLE': 8,
'SIZEOF_FLOAT': 4,
'SIZEOF_FPOS_T': 8,
'SIZEOF_INT': 4,
'SIZEOF_LONG': 8,
'SIZEOF_LONG_DOUBLE': 16,
'SIZEOF_LONG_LONG': 8,
'SIZEOF_OFF_T': 8,
'SIZEOF_PID_T': 4,
'SIZEOF_PTHREAD_T': 8,
'SIZEOF_SHORT': 2,
'SIZEOF_SIZE_T': 8,
'SIZEOF_TIME_T': 8,
'SIZEOF_UINTPTR_T': 8,
'SIZEOF_VOID_P': 8,
'SIZEOF_WCHAR_T': 4,
'SIZEOF__BOOL': 1,
'SOABI': 'cpython-36m-x86_64-cosmo',
'SRCDIRS': 'Parser Objects Python Modules Programs',
'SRC_GDB_HOOKS': './Tools/gdb/libpython.py',
'STDC_HEADERS': 1,
'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
'STRIPFLAG': '-s',
'SUBDIRS': '',
'SUBDIRSTOO': 'Include Lib Misc',
'SYSLIBS': '',
'SYS_SELECT_WITH_SYS_TIME': 1,
'TANH_PRESERVES_ZERO_SIGN': 1,
'TCLTK_INCLUDES': '',
'TCLTK_LIBS': '',
'TESTOPTS': '',
'TESTPATH': '',
'TESTPYTHON': './python.com.dbg',
'TESTPYTHONOPTS': '',
'TESTRUNNER': './python.com.dbg ./Tools/scripts/run_tests.py',
'TESTTIMEOUT': 1200,
'THREADOBJ': '',
'TIMEMODULE_LIB': 0,
'TIME_WITH_SYS_TIME': 1,
'TM_IN_SYS_TIME': 0,
'UNICODE_DEPS': '\\',
'UNIVERSALSDK': '',
'UPDATE_FILE': 'python3 ./Tools/scripts/update_file.py',
'USE_COMPUTED_GOTOS': 1,
'USE_INLINE': 1,
'VERSION': '3.6',
'WANT_SIGFPE_HANDLER': 0,
'WINDOW_HAS_FLAGS': 1,
'WITH_DOC_STRINGS': 1,
'WITH_DTRACE': 0,
'WITH_DYLD': 0,
'WITH_LIBINTL': 0,
'WITH_NEXT_FRAMEWORK': 0,
'WITH_PYMALLOC': 1,
'WITH_THREAD': 1,
'WITH_VALGRIND': 0,
'X87_DOUBLE_ROUNDING': 0,
'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
'abs_builddir': '/cpython36',
'abs_srcdir': '/cpython36',
'datarootdir': '/share',
'exec_prefix': '',
'prefix': '',
'srcdir': '.'}
| 26,578 | 810 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_threading_local.py | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... def __init__(self, **kw):
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red')], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = "key", "dicts", "localargs", "locallock", "__weakref__"
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = "_threading_local._localimpl." + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, "_local__impl")
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, "__dict__", dct)
yield
class DummyList(list):
def pop(self, index=-1):
try:
return super.pop(index)
except IndexError as e:
return None
class local:
__slots__ = "_local__impl", "__dict__"
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
stack = DummyList()
object.__setattr__(self, "_local__impl", impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()["stack"] = stack
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == "__dict__":
raise AttributeError(
"%r object attribute '__dict__' is read-only" % self.__class__.__name__
)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == "__dict__":
raise AttributeError(
"%r object attribute '__dict__' is read-only" % self.__class__.__name__
)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
| 7,417 | 257 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/heapq.py | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
a usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
def merge(*iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), order * direction, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, next = s = h[0]
yield value
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, next = h[0]
yield value
yield from next.__self__
return
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
value = next()
h_append([key(value), order * direction, value, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, next = s = h[0]
yield value
value = next()
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, next = h[0]
yield value
yield from next.__self__
# Algorithm notes for nlargest() and nsmallest()
# ==============================================
#
# Make a single pass over the data while keeping the k most extreme values
# in a heap. Memory consumption is limited to keeping k values in a list.
#
# Measured performance for random inputs:
#
# number of comparisons
# n inputs k-extreme values (average of 5 trials) % more than min()
# ------------- ---------------- --------------------- -----------------
# 1,000 100 3,317 231.7%
# 10,000 100 14,046 40.5%
# 100,000 100 105,749 5.7%
# 1,000,000 100 1,007,751 0.8%
# 10,000,000 100 10,009,401 0.1%
#
# Theoretical number of comparisons for k smallest of n random inputs:
#
# Step Comparisons Action
# ---- -------------------------- ---------------------------
# 1 1.66 * k heapify the first k-inputs
# 2 n - k compare remaining elements to top of heap
# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
#
# Combining and simplifying for a rough estimate gives:
#
# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
#
# Computing the number of comparisons for step 3:
# -----------------------------------------------
# * For the i-th new value from the iterable, the probability of being in the
# k most extreme values is k/i. For example, the probability of the 101st
# value seen being in the 100 most extreme values is 100/101.
# * If the value is a new extreme value, the cost of inserting it into the
# heap is 1 + log(k, 2).
# * The probability times the cost gives:
# (k/i) * (1 + log(k, 2))
# * Summing across the remaining n-k elements gives:
# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
# * This reduces to:
# (H(n) - H(k)) * k * (1 + log(k, 2))
# * Where H(n) is the n-th harmonic number estimated by:
# gamma = 0.5772156649
# H(n) = log(n, e) + gamma + 1 / (2 * n)
# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
# * Substituting the H(n) formula:
# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
#
# Worst-case for step 3:
# ----------------------
# In the worst case, the input data is reversed sorted so that every new element
# must be inserted in the heap:
#
# comparisons = 1.66 * k + log(k, 2) * (n - k)
#
# Alternative Algorithms
# ----------------------
# Other algorithms were not used because they:
# 1) Took much more auxiliary memory,
# 2) Made multiple passes over the data.
# 3) Made more comparisons in common cases (small k, large n, semi-random input).
# See the more detailed comparison of approach at:
# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top = result[0][0]
order += 1
result.sort()
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order += 1
result.sort()
return [r[2] for r in result]
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[2] for r in result]
try:
from _heapq import *
from _heapq import _heapreplace_max
from _heapq import _heapify_max
from _heapq import _heappop_max
except ImportError:
pass
if __name__ == "PYOBJ.COM":
import _heapq
if __name__ == "__main__":
try:
import sys
import doctest
except ImportError:
sys.exit(1)
print(doctest.testmod())
| 22,909 | 603 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/hashlib.py | # Copyright (C) 2005-2010 Gregory P. Smith ([email protected])
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, data=b'', **kwargs) - returns a new hash object implementing
the given hash function; initializing
the hash using the given binary data.
Named constructor functions are also available, these are faster
than using new(name):
md5(), sha1(), sha224(), sha256(), sha384(), sha512(), sha3_224(),
sha3_256(), sha3_384(), sha3_512(), shake_128(), shake_256(), and
finally blake2b256() which is an Actually Portable Python feature
- zlib.crc32 n=22851 46 ps/byte 20 GB/s
- hashlib.md5 n=22851 1 ns/byte 676 mb/s
- hashlib.sha1 n=22851 516 ps/byte 1,892 mb/s
- hashlib.sha256 n=22851 537 ps/byte 1,818 mb/s
- hashlib.sha384 n=22851 1 ns/byte 800 mb/s
- hashlib.sha512 n=22851 1 ns/byte 802 mb/s
- hashlib.blake2b256 n=22851 1 ns/byte 712 mb/s
More algorithms may be available on your platform but the above are
guaranteed to exist. See algorithms_guaranteed/algorithms_available
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available
in the zlib module.
Hash objects have these methods:
- update(data): Update the hash object with the bytes in data. Repeated
calls are equivalent to a single call with the
concatenation of all the arguments.
- digest(): Return the digest of the bytes passed to the update()
method so far as a bytes object.
- hexdigest(): Like digest() except the digest is returned as a string
of double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be
used to efficiently compute the digests of datas that
share a common initial substring.
For example, to obtain the digest of the byte string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.blake2b256()
>>> m.update(b"Science is what we understand well enough to explain ")
>>> m.update(b"to a computer; art is everything else. -D.E. Knuth")
>>> m.digest().hex()
'e246f77a8c37bd2f601a47273846f085ec3000e1c1a692b82e76921410386e56'
More condensed:
>>> hashlib.sha224(b"Nobody inspects the spammish repetition").digest().hex()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
import _hashlib as _prevent_recursive_loading
del _prevent_recursive_loading
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = (
'md5',
'sha1',
'sha224', 'sha256', 'sha384', 'sha512',
# 'sha3_224', 'sha3_256', 'sha3_384',
# 'sha3_512', 'shake_128', 'shake_256',
'blake2b256',
)
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'pbkdf2_hmac')
__builtin_constructor_cache = {}
def __get_builtin_constructor(name):
cache = __builtin_constructor_cache
constructor = cache.get(name)
if constructor is not None:
return constructor
try:
if name in ('SHA1', 'sha1'):
import _sha1
cache['SHA1'] = cache['sha1'] = _sha1.sha1
elif name in ('MD5', 'md5'):
import _md5
cache['MD5'] = cache['md5'] = _md5.md5
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
cache['SHA224'] = cache['sha224'] = _sha256.sha224
cache['SHA256'] = cache['sha256'] = _sha256.sha256
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
cache['SHA384'] = cache['sha384'] = _sha512.sha384
cache['SHA512'] = cache['sha512'] = _sha512.sha512
elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512',
'shake_128', 'shake_256'}:
import _sha3
cache['sha3_224'] = _sha3.sha3_224
cache['sha3_256'] = _sha3.sha3_256
cache['sha3_384'] = _sha3.sha3_384
cache['sha3_512'] = _sha3.sha3_512
cache['shake_128'] = _sha3.shake_128
cache['shake_256'] = _sha3.shake_256
except ImportError:
pass # no extension module, this hash is unsupported.
constructor = cache.get(name)
if constructor is not None:
return constructor
raise ValueError('unsupported hash type ' + name)
def __get_mbedtls_constructor(name):
try:
f = getattr(_hashlib, 'mbedtls_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to Mbedtls.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, data=b'', **kwargs):
"""new(name, data=b'', **kwargs) - Return a new hashing object using the
named algorithm; optionally initialized with data (which must be
a bytes-like object).
"""
return __get_builtin_constructor(name)(data, **kwargs)
def __hash_new(name, data=b'', **kwargs):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be a bytes-like object).
"""
try:
return _hashlib.new(name, data)
except ValueError:
# If the _hashlib module (Mbedtls) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the Mbedtls library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data)
try:
import _hashlib
new = __hash_new
__get_hash = __get_mbedtls_constructor
algorithms_available = algorithms_available.union(
_hashlib.mbedtls_md_meth_names)
except ImportError as e:
new = __py_new
__get_hash = __get_builtin_constructor
try:
# Mbedtls's scrypt requires Mbedtls 1.1+
from _hashlib import scrypt
except ImportError:
pass
md5 = _hashlib.mbedtls_md5
sha1 = _hashlib.mbedtls_sha1
sha224 = _hashlib.mbedtls_sha224
sha256 = _hashlib.mbedtls_sha256
sha384 = _hashlib.mbedtls_sha384
sha512 = _hashlib.mbedtls_sha512
blake2b256 = _hashlib.mbedtls_blake2b256
pbkdf2_hmac = _hashlib.pbkdf2_hmac
try:
sha3_224 = __get_builtin_constructor('sha3_224')
sha3_256 = __get_builtin_constructor('sha3_256')
sha3_384 = __get_builtin_constructor('sha3_384')
sha3_512 = __get_builtin_constructor('sha3_512')
shake_128 = __get_builtin_constructor('shake_128')
shake_256 = __get_builtin_constructor('shake_256')
except (ImportError, ValueError):
pass # [jart] modified to not force using sha3
# Cleanup locals()
del __always_supported, __get_hash
del __py_new, __hash_new, __get_mbedtls_constructor
| 7,316 | 194 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/pydoc.py | #!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
At the Python interactive prompt, calling help(thing) on a Python object
documents the object, and calling help() starts up an interactive
help session.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
https://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import cosmo
import builtins
import importlib._bootstrap
import importlib._bootstrap_external
import importlib.machinery
import importlib.util
import inspect
import io
import os
import pkgutil
import platform
import re
import sys
import time
import tokenize
import urllib.parse
import warnings
from collections import deque
from reprlib import Repr
from traceback import format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def _is_bound_method(fn):
"""
Returns True if fn is a bound method, regardless of whether
fn was implemented in Python or in C.
"""
if inspect.ismethod(fn):
return True
if inspect.isbuiltin(fn):
self = getattr(fn, '__self__', None)
return not (inspect.ismodule(self) or (self is None))
return False
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
# XXX Remove __initializing__?
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__spec__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
def sort_attributes(attrs, object):
'Sort the attrs list in-place by _fields and then alphabetically by name'
# This allows data descriptors to be ordered according
# to a _fields attribute if present.
fields = getattr(object, '_fields', [])
try:
field_order = {name : i-len(fields) for (i, name) in enumerate(fields)}
except TypeError:
field_order = {}
keyfunc = lambda attr: (field_order.get(attr[0], 0), attr[0])
attrs.sort(key=keyfunc)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
# Look for binary suffixes first, falling back to source.
if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
loader_cls = importlib.machinery.SourcelessFileLoader
elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):
loader_cls = importlib.machinery.ExtensionFileLoader
else:
loader_cls = None
# Now handle the choice.
if loader_cls is None:
# Must be a source file.
try:
file = tokenize.open(filename)
except OSError:
# module can't be opened, so skip it
return None
# text modules can be directly examined
with file:
result = source_synopsis(file)
else:
# Must be a binary module, which has to be imported.
loader = loader_cls('__temp__', filename)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location('__temp__', filename,
loader=loader)
try:
module = importlib._bootstrap._load(spec)
except:
return None
del sys.modules['__temp__']
result = module.__doc__.splitlines()[0] if module.__doc__ else None
# Cache the result.
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = importlib.util.MAGIC_NUMBER
with open(path, 'rb') as file:
is_bytecode = magic == file.read(len(magic))
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
if is_bytecode:
loader = importlib._bootstrap_external.SourcelessFileLoader(name, path)
else:
loader = importlib._bootstrap_external.SourceFileLoader(name, path)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location(name, path, loader=loader)
try:
return importlib._bootstrap._load(spec)
except:
raise ErrorDuringImport(path, sys.exc_info())
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif issubclass(exc, ImportError) and value.name == path:
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"https://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object,
basedir=os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.normcase(basedir)
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith(("http://", "https://")):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
else:
docloc = os.path.join(docloc, object.__name__.lower() + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = urllib.parse.quote(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
sort_attributes(attrs, object)
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if cl and inspect.getattr_static(cl, realname, []) is object:
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = title + self.escape(argspec) + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
try:
obj = getattr(object, name)
except AttributeError:
obj = homecls.__dict__[name]
push(self.docother(obj, name, mod, maxlen=70, doc=doc) +
'\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
sort_attributes(attrs, object)
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
if name == realname:
title = self.bold(realname)
else:
if cl and inspect.getattr_static(cl, realname, []) is object:
skipdocs = 1
title = self.bold(name) + ' = ' + realname
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
use_pager = os.environ.get('MANPAGER') or os.environ.get('PAGER')
platform = sys.platform
if cosmo.kernel == 'nt':
platform = 'win32'
if use_pager:
if platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), use_pager)
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), use_pager)
else:
return lambda text: pipepager(text, use_pager)
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if platform == 'win32':
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
pipe.write(text)
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
while True:
try:
proc.wait()
break
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
with open(filename, 'w', errors='backslashreplace') as file:
file.write(text)
quoted = filename
# [jart] bug fix: cmd.exe doesn't work if quotes are used when not needed
if ' ' in quoted or '\t' in quoted:
quoted = '"' + filename + '"'
try:
os.system(cmd + ' ' + filename)
finally:
os.unlink(filename)
def _escape_stdout(text):
# Escape non-encodable characters to avoid encoding errors later
encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'
return text.encode(encoding, 'backslashreplace').decode(encoding)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_escape_stdout(text)).split('\n')
import tty
try:
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError, io.UnsupportedOperation):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(_escape_stdout(text)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError('''\
No Python documentation found for %r.
Use help() to get the interactive help utility.
Use help(str) for help on the str class.''' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
with open(name + '.html', 'w', encoding='utf-8') as file:
file.write(page)
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_strprefixes = [p + q for p in ('b', 'f', 'r', 'u') for q in ("'", '"')]
_symbols_inverse = {
'STRINGS' : ("'", "'''", '"', '"""', *_strprefixes),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<%s.%s instance>' % (self.__class__.__module__,
self.__class__.__qualname__)
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = request.strip()
# Make sure significant trailing quoting marks of literals don't
# get deleted while cleaning input
if (len(request) > 2 and request[0] == request[-1] in ("'", '"')
and request[0] not in request[1:-1]):
request = request[1:-1]
if request.lower() in ('q', 'quit'): break
if request == 'help':
self.intro()
else:
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
else: doc(str, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python {0}'s help utility!
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at https://docs.python.org/{0}/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, symbols, or topics, type
"modules", "keywords", "symbols", or "topics". Each module also comes
with a one-line summary of what it does; to list the modules whose name
or summary contain a given string such as "spam", type "modules spam".
'''.format('%d.%d' % sys.version_info[:2]))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
doc = doc.strip() + '\n'
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import textwrap
text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n'
wrapped_text = textwrap.wrap(text, 72)
doc += '\n%s\n' % '\n'.join(wrapped_text)
pager(doc)
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of modules whose name or summary contains '{}'.
If there are any, enter a module name to get more help.
'''.format(key))
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose name or summary contain the string "spam".
''')
help = Helper()
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
spec = pkgutil._get_spec(importer, modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
loader = spec.loader
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = importlib._bootstrap._load(spec)
except ImportError:
if onerror:
onerror(modname)
continue
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port, host='localhost'):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
try:
import http.server
import email.message
import select
import threading
except ImportError:
sys.exit(1)
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, host, port, callback):
self.host = host
self.address = (self.host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, host, port):
self.urlhandler = urlhandler
self.host = host
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.host, self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.join()
# explicitly break a reference cycle: DocServer.callback
# has indirectly a reference to ServerThread.
self.docserver = None
self.serving = False
self.url = None
thread = ServerThread(urlhandler, host, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<[email protected]></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
def onerror(modname):
pass
ModuleScanner().run(callback, key, onerror=onerror)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True, host='localhost', have_threads=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
try:
import webbrowser
except ImportError:
webbrowser = None
if not have_threads:
print('starting server on http://%s:%s/' % (host, port))
serverthread = _start_server(_url_handler, port, host)
if serverthread.error:
print(serverthread.error)
return
if not have_threads:
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser and webbrowser is not None:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
if webbrowser is not None:
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
try:
import _thread
have_threads = True
except ImportError:
have_threads = False
class BadUsage(Exception):
pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:h:w')
writing = False
start_server = False
open_browser = False
host = None
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-h':
host = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
if host is None:
host = 'localhost'
browse(port, open_browser=open_browser, host=host,
have_threads=have_threads)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
| 104,549 | 2,686 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/cgitb.py | """More comprehensive traceback formatting for Python scripts.
To enable this module, do:
import cgitb; cgitb.enable()
at the top of your script. The optional arguments to enable() are:
display - if true, tracebacks are displayed in the web browser
logdir - if set, tracebacks are written to files in this directory
context - number of lines of source code to show for each stack frame
format - 'text' or 'html' controls the output format
By default, tracebacks are displayed but not saved, the context is 5 lines
and the output format is 'html' (for backwards compatibility with the
original use of this module)
Alternatively, if you have caught an exception and want cgitb to display it
for you, call cgitb.handler(). The optional argument to handler() is a
3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
The default handler displays output as HTML.
"""
import inspect
import keyword
import linecache
import os
import pydoc
import sys
import tempfile
import time
import tokenize
import traceback
def reset():
"""Return a string that resets the CGI and browser to a known state."""
return '''<!--: spam
Content-Type: text/html
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
</font> </font> </font> </script> </object> </blockquote> </pre>
</table> </table> </table> </table> </table> </font> </font> </font>'''
__UNDEF__ = [] # a special sentinel object
def small(text):
if text:
return '<small>' + text + '</small>'
else:
return ''
def strong(text):
if text:
return '<strong>' + text + '</strong>'
else:
return ''
def grey(text):
if text:
return '<font color="#909090">' + text + '</font>'
else:
return ''
def lookup(name, frame, locals):
"""Find the value for a given name in the given environment."""
if name in locals:
return 'local', locals[name]
if name in frame.f_globals:
return 'global', frame.f_globals[name]
if '__builtins__' in frame.f_globals:
builtins = frame.f_globals['__builtins__']
if type(builtins) is type({}):
if name in builtins:
return 'builtin', builtins[name]
else:
if hasattr(builtins, name):
return 'builtin', getattr(builtins, name)
return None, __UNDEF__
def scanvars(reader, frame, locals):
"""Scan one logical line of Python and look up values of variables used."""
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
if ttype == tokenize.NEWLINE: break
if ttype == tokenize.NAME and token not in keyword.kwlist:
if lasttoken == '.':
if parent is not __UNDEF__:
value = getattr(parent, token, __UNDEF__)
vars.append((prefix + token, prefix, value))
else:
where, value = lookup(token, frame, locals)
vars.append((token, where, value))
elif token == '.':
prefix += lasttoken + '.'
parent = value
else:
parent, prefix = None, ''
lasttoken = token
return vars
def html(einfo, context=5):
"""Return a nice HTML document describing a given traceback."""
etype, evalue, etb = einfo
if isinstance(etype, type):
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
'<big><big>%s</big></big>' %
strong(pydoc.html.escape(str(etype))),
'#ffffff', '#6622aa', pyver + '<br>' + date) + '''
<p>A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.</p>'''
indent = '<tt>' + small(' ' * 5) + ' </tt>'
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
if file:
file = os.path.abspath(file)
link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
else:
file = link = '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + strong(pydoc.html.escape(func)) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
('<big> </big>', link, call)]
if index is not None:
i = lnum - index
for line in lines:
num = small(' ' * (5-len(str(i))) + str(i)) + ' '
if i in highlight:
line = '<tt>=>%s%s</tt>' % (num, pydoc.html.preformat(line))
rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
else:
line = '<tt> %s%s</tt>' % (num, pydoc.html.preformat(line))
rows.append('<tr><td>%s</td></tr>' % grey(line))
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where in ('global', 'builtin'):
name = ('<em>%s</em> ' % where) + strong(name)
elif where == 'local':
name = strong(name)
else:
name = where + strong(name.split('.')[-1])
dump.append('%s = %s' % (name, pydoc.html.repr(value)))
else:
dump.append(name + ' <em>undefined</em>')
rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
frames.append('''
<table width="100%%" cellspacing=0 cellpadding=0 border=0>
%s</table>''' % '\n'.join(rows))
exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))),
pydoc.html.escape(str(evalue)))]
for name in dir(evalue):
if name[:1] == '_': continue
value = pydoc.html.repr(getattr(evalue, name))
exception.append('\n<br>%s%s =\n%s' % (indent, name, value))
return head + ''.join(frames) + ''.join(exception) + '''
<!-- The above is a description of an error in a Python program, formatted
for a Web browser because the 'cgitb' module was enabled. In case you
are not reading this in a Web browser, here is the original traceback:
%s
-->
''' % pydoc.html.escape(
''.join(traceback.format_exception(etype, evalue, etb)))
def text(einfo, context=5):
"""Return a plain text document describing a given traceback."""
etype, evalue, etb = einfo
if isinstance(etype, type):
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.
'''
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + func + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = [' %s %s' % (file, call)]
if index is not None:
i = lnum - index
for line in lines:
num = '%5d ' % i
rows.append(num+line.rstrip())
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where == 'global': name = 'global ' + name
elif where != 'local': name = where + name.split('.')[-1]
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
else:
dump.append(name + ' undefined')
rows.append('\n'.join(dump))
frames.append('\n%s\n' % '\n'.join(rows))
exception = ['%s: %s' % (str(etype), str(evalue))]
for name in dir(evalue):
value = pydoc.text.repr(getattr(evalue, name))
exception.append('\n%s%s = %s' % (" "*4, name, value))
return head + ''.join(frames) + ''.join(exception) + '''
The above is a description of an error in a Python program. Here is
the original traceback:
%s
''' % ''.join(traceback.format_exception(etype, evalue, etb))
class Hook:
"""A hook to replace sys.excepthook that shows tracebacks in HTML."""
def __init__(self, display=1, logdir=None, context=5, file=None,
format="html"):
self.display = display # send tracebacks to browser if true
self.logdir = logdir # log tracebacks to files if not None
self.context = context # number of source code lines per frame
self.file = file or sys.stdout # place to send the output
self.format = format
def __call__(self, etype, evalue, etb):
self.handle((etype, evalue, etb))
def handle(self, info=None):
info = info or sys.exc_info()
if self.format == "html":
self.file.write(reset())
formatter = (self.format=="html") and html or text
plain = False
try:
doc = formatter(info, self.context)
except: # just in case something goes wrong
doc = ''.join(traceback.format_exception(*info))
plain = True
if self.display:
if plain:
doc = pydoc.html.escape(doc)
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
else:
self.file.write('<p>A problem occurred in a Python script.\n')
if self.logdir is not None:
suffix = ['.txt', '.html'][self.format=="html"]
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
try:
with os.fdopen(fd, 'w') as file:
file.write(doc)
msg = '%s contains the description of this error.' % path
except:
msg = 'Tried to save traceback to %s, but failed.' % path
if self.format == 'html':
self.file.write('<p>%s</p>\n' % msg)
else:
self.file.write(msg + '\n')
try:
self.file.flush()
except: pass
handler = Hook().handle
def enable(display=1, logdir=None, context=5, format="html"):
"""Install an exception handler that formats tracebacks as HTML.
The optional argument 'display' can be set to 0 to suppress sending the
traceback to the browser, and 'logdir' can be set to a directory to cause
tracebacks to be written to files there."""
sys.excepthook = Hook(display=display, logdir=logdir,
context=context, format=format)
| 12,018 | 320 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/bisect.py | """Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass
if __name__ == 'PYOBJ.COM':
import _bisect
| 2,643 | 96 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_pyio.py | """
Python implementation of the io module.
"""
import os
import abc
import codecs
import errno
import stat
import sys
# Import _thread instead of threading to reduce startup cost
try:
from _thread import allocate_lock as Lock
except ImportError:
from _dummy_thread import allocate_lock as Lock
if sys.platform in {'win32', 'cygwin'}:
from msvcrt import setmode as _setmode
else:
_setmode = None
import io
from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
valid_seek_flags = {0, 1, 2} # Hardwired values
if hasattr(os, 'SEEK_HOLE') :
valid_seek_flags.add(os.SEEK_HOLE)
valid_seek_flags.add(os.SEEK_DATA)
# open() uses st_blksize whenever we can
DEFAULT_BUFFER_SIZE = 8 * 1024 # bytes
# NOTE: Base classes defined here are registered with the "official" ABCs
# defined in io.py. We don't use real inheritance though, because we don't want
# to inherit the C implementations.
# Rebind for compatibility
BlockingIOError = BlockingIOError
def open(file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
r"""Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file is
opened. It defaults to 'r' which means open for reading in text mode. Other
common values are 'w' for writing (truncating the file if it already
exists), 'x' for exclusive creation of a new file, and 'a' for appending
(which on some Unix systems, means that all writes append to the end of the
file regardless of the current seek position). In text mode, if encoding is
not specified the encoding used is platform dependent. (For reading and
writing raw bytes use binary mode and leave encoding unspecified.) The
available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the str name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register for a list of the permitted
encoding error strings.
newline is a string controlling how universal newlines works (it only
applies to text mode). It can be None, '', '\n', '\r', and '\r\n'. It works
as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '', no translation takes place. If newline is any of the
other legal values, any '\n' characters written are translated to
the given string.
closedfd is a bool. If closefd is False, the underlying file descriptor will
be kept open when the file is closed. This does not work when a file name is
given and must be True in that case.
The newly created file is non-inheritable.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by calling
*opener* with (*file*, *flags*). *opener* must return an open file
descriptor (passing os.open as *opener* results in functionality similar to
passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
if not isinstance(file, int):
file = os.fspath(file)
if not isinstance(file, (str, bytes, int)):
raise TypeError("invalid file: %r" % file)
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
if not isinstance(buffering, int):
raise TypeError("invalid buffering: %r" % buffering)
if encoding is not None and not isinstance(encoding, str):
raise TypeError("invalid encoding: %r" % encoding)
if errors is not None and not isinstance(errors, str):
raise TypeError("invalid errors: %r" % errors)
modes = set(mode)
if modes - set("axrwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
creating = "x" in modes
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if creating or writing or appending or updating:
raise ValueError("mode U cannot be combined with 'x', 'w', 'a', or '+'")
import warnings
warnings.warn("'U' mode is deprecated",
DeprecationWarning, 2)
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if creating + reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (creating or reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
if binary and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
if binary and errors is not None:
raise ValueError("binary mode doesn't take an errors argument")
if binary and newline is not None:
raise ValueError("binary mode doesn't take a newline argument")
raw = FileIO(file,
(creating and "x" or "") +
(reading and "r" or "") +
(writing and "w" or "") +
(appending and "a" or "") +
(updating and "+" or ""),
closefd, opener=opener)
result = raw
try:
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0:
raise ValueError("invalid buffering size")
if buffering == 0:
if binary:
return result
raise ValueError("can't have unbuffered text I/O")
if updating:
buffer = BufferedRandom(raw, buffering)
elif creating or writing or appending:
buffer = BufferedWriter(raw, buffering)
elif reading:
buffer = BufferedReader(raw, buffering)
else:
raise ValueError("unknown mode: %r" % mode)
result = buffer
if binary:
return result
text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
result = text
text.mode = mode
return result
except:
result.close()
raise
class DocDescriptor:
"""Helper for builtins.open.__doc__
"""
def __get__(self, obj, typ):
return (
"open(file, mode='r', buffering=-1, encoding=None, "
"errors=None, newline=None, closefd=True)\n\n" +
open.__doc__)
class OpenWrapper:
"""Wrapper for builtins.open
Trick so that open won't become a bound method when stored
as a class variable (as dbm.dumb does).
See initstdio() in Python/pylifecycle.c.
"""
__doc__ = DocDescriptor()
def __new__(cls, *args, **kwargs):
return open(*args, **kwargs)
# In normal operation, both `UnsupportedOperation`s should be bound to the
# same object.
try:
UnsupportedOperation = io.UnsupportedOperation
except AttributeError:
class UnsupportedOperation(OSError, ValueError):
pass
class IOBase(metaclass=abc.ABCMeta):
"""The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too. In
some cases (such as readinto), a writable object is required. Text I/O
classes work with str data.
Note that calling any method (even inquiries) on a closed stream is
undefined. Implementations may raise OSError in this case.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
### Internal ###
def _unsupported(self, name):
"""Internal: raise an OSError exception for unsupported operations."""
raise UnsupportedOperation("%s.%s() not supported" %
(self.__class__.__name__, name))
### Positioning ###
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are ints:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Some operating systems / file systems could provide additional values.
Return an int indicating the new absolute position.
"""
self._unsupported("seek")
def tell(self):
"""Return an int indicating the current stream position."""
return self.seek(0, 1)
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell(). Return
the new size.
"""
self._unsupported("truncate")
### Flush and close ###
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
# XXX Should this return the number of bytes written???
__closed = False
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.__closed:
try:
self.flush()
finally:
self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
### Inquiries ###
def seekable(self):
"""Return a bool indicating whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
return False
def _checkSeekable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not seekable
"""
if not self.seekable():
raise UnsupportedOperation("File or stream is not seekable."
if msg is None else msg)
def readable(self):
"""Return a bool indicating whether object was opened for reading.
If False, read() will raise OSError.
"""
return False
def _checkReadable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not readable
"""
if not self.readable():
raise UnsupportedOperation("File or stream is not readable."
if msg is None else msg)
def writable(self):
"""Return a bool indicating whether object was opened for writing.
If False, write() and truncate() will raise OSError.
"""
return False
def _checkWritable(self, msg=None):
"""Internal: raise UnsupportedOperation if file is not writable
"""
if not self.writable():
raise UnsupportedOperation("File or stream is not writable."
if msg is None else msg)
@property
def closed(self):
"""closed: bool. True iff the file has been closed.
For backwards compatibility, this is a property, not a predicate.
"""
return self.__closed
def _checkClosed(self, msg=None):
"""Internal: raise a ValueError if file is closed
"""
if self.closed:
raise ValueError("I/O operation on closed file."
if msg is None else msg)
### Context manager ###
def __enter__(self): # That's a forward reference
"""Context management protocol. Returns self (an instance of IOBase)."""
self._checkClosed()
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
### Lower-level APIs ###
# XXX Should these be present even if unimplemented?
def fileno(self):
"""Returns underlying file descriptor (an int) if one exists.
An OSError is raised if the IO object does not use a file descriptor.
"""
self._unsupported("fileno")
def isatty(self):
"""Return a bool indicating whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
self._checkClosed()
return False
### Readline[s] and writelines ###
def readline(self, size=-1):
r"""Read and return a line of bytes from the stream.
If size is specified, at most size bytes will be read.
Size should be an int.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
# For backwards compatibility, a (slowish) readline().
if hasattr(self, "peek"):
def nreadahead():
readahead = self.peek(1)
if not readahead:
return 1
n = (readahead.find(b"\n") + 1) or len(readahead)
if size >= 0:
n = min(n, size)
return n
else:
def nreadahead():
return 1
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError("size must be an integer")
res = bytearray()
while size < 0 or len(res) < size:
b = self.read(nreadahead())
if not b:
break
res += b
if res.endswith(b"\n"):
break
return bytes(res)
def __iter__(self):
self._checkClosed()
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readlines(self, hint=None):
"""Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
if hint is None or hint <= 0:
return list(self)
n = 0
lines = []
for line in self:
lines.append(line)
n += len(line)
if n >= hint:
break
return lines
def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
class RawIOBase(IOBase):
"""Base class for raw binary I/O."""
# The read() method is implemented by calling readinto(); derived
# classes that want to support read() only need to implement
# readinto() as a primitive operation. In general, readinto() can be
# more efficient than read().
# (It would be tempting to also provide an implementation of
# readinto() in terms of read(), in case the latter is a more suitable
# primitive operation, but that would lead to nasty recursion in case
# a subclass doesn't implement either.)
def read(self, size=-1):
"""Read and return up to size bytes, where size is an int.
Returns an empty bytes object on EOF, or None if the object is
set not to block and has no data to read.
"""
if size is None:
size = -1
if size < 0:
return self.readall()
b = bytearray(size.__index__())
n = self.readinto(b)
if n is None:
return None
del b[n:]
return bytes(b)
def readall(self):
"""Read until EOF, using multiple read() call."""
res = bytearray()
while True:
data = self.read(DEFAULT_BUFFER_SIZE)
if not data:
break
res += data
if res:
return bytes(res)
else:
# b'' or None
return data
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Returns an int representing the number of bytes read (0 for EOF), or
None if the object is set not to block and has no data to read.
"""
self._unsupported("readinto")
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than the
length of b in bytes.
"""
self._unsupported("write")
io.RawIOBase.register(RawIOBase)
from _io import FileIO
RawIOBase.register(FileIO)
class BufferedIOBase(IOBase):
"""Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def read(self, size=None):
"""Read and return up to size bytes, where size is an int.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (XXX and for pipes?), at most one raw
read will be issued, and a short result does not imply that
EOF is imminent.
Returns an empty bytes array on EOF.
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
self._unsupported("read")
def read1(self, size=None):
"""Read up to size bytes with at most one read() system call,
where size is an int.
"""
self._unsupported("read1")
def readinto(self, b):
"""Read bytes into a pre-allocated bytes-like object b.
Like read(), this may issue multiple reads to the underlying raw
stream, unless the latter is 'interactive'.
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=False)
def readinto1(self, b):
"""Read bytes into buffer *b*, using at most one system call
Returns an int representing the number of bytes read (0 for EOF).
Raises BlockingIOError if the underlying raw stream has no
data at the moment.
"""
return self._readinto(b, read1=True)
def _readinto(self, b, read1):
if not isinstance(b, memoryview):
b = memoryview(b)
b = b.cast('B')
if read1:
data = self.read1(len(b))
else:
data = self.read(len(b))
n = len(data)
b[:n] = data
return n
def write(self, b):
"""Write the given bytes buffer to the IO stream.
Return the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
self._unsupported("write")
def detach(self):
"""
Separate the underlying raw stream from the buffer and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
self._unsupported("detach")
io.BufferedIOBase.register(BufferedIOBase)
class _BufferedIOMixin(BufferedIOBase):
"""A mixin implementation of BufferedIOBase with an underlying raw stream.
This passes most requests on to the underlying raw stream. It
does *not* provide implementations of read(), readinto() or
write().
"""
def __init__(self, raw):
self._raw = raw
### Positioning ###
def seek(self, pos, whence=0):
new_position = self.raw.seek(pos, whence)
if new_position < 0:
raise OSError("seek() returned an invalid position")
return new_position
def tell(self):
pos = self.raw.tell()
if pos < 0:
raise OSError("tell() returned an invalid position")
return pos
def truncate(self, pos=None):
# Flush the stream. We're mixing buffered I/O with lower-level I/O,
# and a flush may be necessary to synch both views of the current
# file state.
self.flush()
if pos is None:
pos = self.tell()
# XXX: Should seek() be used, instead of passing the position
# XXX directly to truncate?
return self.raw.truncate(pos)
### Flush and close ###
def flush(self):
if self.closed:
raise ValueError("flush of closed file")
self.raw.flush()
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
self.raw.close()
def detach(self):
if self.raw is None:
raise ValueError("raw stream already detached")
self.flush()
raw = self._raw
self._raw = None
return raw
### Inquiries ###
def seekable(self):
return self.raw.seekable()
@property
def raw(self):
return self._raw
@property
def closed(self):
return self.raw.closed
@property
def name(self):
return self.raw.name
@property
def mode(self):
return self.raw.mode
def __getstate__(self):
raise TypeError("can not serialize a '{0}' object"
.format(self.__class__.__name__))
def __repr__(self):
modname = self.__class__.__module__
clsname = self.__class__.__qualname__
try:
name = self.name
except Exception:
return "<{}.{}>".format(modname, clsname)
else:
return "<{}.{} name={!r}>".format(modname, clsname, name)
### Lower-level APIs ###
def fileno(self):
return self.raw.fileno()
def isatty(self):
return self.raw.isatty()
class BytesIO(BufferedIOBase):
"""Buffered I/O implementation using an in-memory bytes buffer."""
def __init__(self, initial_bytes=None):
buf = bytearray()
if initial_bytes is not None:
buf += initial_bytes
self._buffer = buf
self._pos = 0
def __getstate__(self):
if self.closed:
raise ValueError("__getstate__ on closed file")
return self.__dict__.copy()
def getvalue(self):
"""Return the bytes value (contents) of the buffer
"""
if self.closed:
raise ValueError("getvalue on closed file")
return bytes(self._buffer)
def getbuffer(self):
"""Return a readable and writable view of the buffer.
"""
if self.closed:
raise ValueError("getbuffer on closed file")
return memoryview(self._buffer)
def close(self):
self._buffer.clear()
super().close()
def read(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
if size < 0:
size = len(self._buffer)
if len(self._buffer) <= self._pos:
return b""
newpos = min(len(self._buffer), self._pos + size)
b = self._buffer[self._pos : newpos]
self._pos = newpos
return bytes(b)
def read1(self, size):
"""This is the same as read.
"""
return self.read(size)
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with memoryview(b) as view:
n = view.nbytes # Size of any bytes-like object
if n == 0:
return 0
pos = self._pos
if pos > len(self._buffer):
# Inserts null bytes between the current end of the file
# and the new write position.
padding = b'\x00' * (pos - len(self._buffer))
self._buffer += padding
self._buffer[pos:pos + n] = b
self._pos += n
return n
def seek(self, pos, whence=0):
if self.closed:
raise ValueError("seek on closed file")
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if whence == 0:
if pos < 0:
raise ValueError("negative seek position %r" % (pos,))
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, len(self._buffer) + pos)
else:
raise ValueError("unsupported whence value")
return self._pos
def tell(self):
if self.closed:
raise ValueError("tell on closed file")
return self._pos
def truncate(self, pos=None):
if self.closed:
raise ValueError("truncate on closed file")
if pos is None:
pos = self._pos
else:
try:
pos.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if pos < 0:
raise ValueError("negative truncate position %r" % (pos,))
del self._buffer[pos:]
return pos
def readable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def writable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return True
class BufferedReader(_BufferedIOMixin):
"""BufferedReader(raw[, buffer_size])
A buffer for a readable, sequential BaseRawIO object.
The constructor creates a BufferedReader for the given readable raw
stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
is used.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
"""Create a new buffered reader using the given readable raw IO object.
"""
if not raw.readable():
raise OSError('"raw" argument must be readable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._reset_read_buf()
self._read_lock = Lock()
def readable(self):
return self.raw.readable()
def _reset_read_buf(self):
self._read_buf = b""
self._read_pos = 0
def read(self, size=None):
"""Read size bytes.
Returns exactly size bytes of data unless the underlying raw IO
stream reaches EOF or if the call would block in non-blocking
mode. If size is negative, read until EOF or until read() would
block.
"""
if size is not None and size < -1:
raise ValueError("invalid number of bytes to read")
with self._read_lock:
return self._read_unlocked(size)
def _read_unlocked(self, n=None):
nodata_val = b""
empty_values = (b"", None)
buf = self._read_buf
pos = self._read_pos
# Special case for when the number of bytes to read is unspecified.
if n is None or n == -1:
self._reset_read_buf()
if hasattr(self.raw, 'readall'):
chunk = self.raw.readall()
if chunk is None:
return buf[pos:] or None
else:
return buf[pos:] + chunk
chunks = [buf[pos:]] # Strip the consumed bytes.
current_size = 0
while True:
# Read until EOF or until read() would block.
chunk = self.raw.read()
if chunk in empty_values:
nodata_val = chunk
break
current_size += len(chunk)
chunks.append(chunk)
return b"".join(chunks) or nodata_val
# The number of bytes to read is specified, return at most n bytes.
avail = len(buf) - pos # Length of the available buffered data.
if n <= avail:
# Fast path: the data to read is fully buffered.
self._read_pos += n
return buf[pos:pos+n]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
chunks = [buf[pos:]]
wanted = max(self.buffer_size, n)
while avail < n:
chunk = self.raw.read(wanted)
if chunk in empty_values:
nodata_val = chunk
break
avail += len(chunk)
chunks.append(chunk)
# n is more than avail only when an EOF occurred or when
# read() would have blocked.
n = min(n, avail)
out = b"".join(chunks)
self._read_buf = out[n:] # Save the extra data in the buffer.
self._read_pos = 0
return out[:n] if out else nodata_val
def peek(self, size=0):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes; we
do at most one raw read to satisfy it. We never return more
than self.buffer_size.
"""
with self._read_lock:
return self._peek_unlocked(size)
def _peek_unlocked(self, n=0):
want = min(n, self.buffer_size)
have = len(self._read_buf) - self._read_pos
if have < want or have <= 0:
to_read = self.buffer_size - have
current = self.raw.read(to_read)
if current:
self._read_buf = self._read_buf[self._read_pos:] + current
self._read_pos = 0
return self._read_buf[self._read_pos:]
def read1(self, size):
"""Reads up to size bytes, with at most one read() system call."""
# Returns up to size bytes. If at least one byte is buffered, we
# only return buffered bytes. Otherwise, we do one raw read.
if size < 0:
raise ValueError("number of bytes to read must be positive")
if size == 0:
return b""
with self._read_lock:
self._peek_unlocked(1)
return self._read_unlocked(
min(size, len(self._read_buf) - self._read_pos))
# Implementing readinto() and readinto1() is not strictly necessary (we
# could rely on the base class that provides an implementation in terms of
# read() and read1()). We do it anyway to keep the _pyio implementation
# similar to the io implementation (which implements the methods for
# performance reasons).
def _readinto(self, buf, read1):
"""Read data into *buf* with at most one system call."""
# Need to create a memoryview object of type 'b', otherwise
# we may not be able to assign bytes to it, and slicing it
# would create a new object.
if not isinstance(buf, memoryview):
buf = memoryview(buf)
if buf.nbytes == 0:
return 0
buf = buf.cast('B')
written = 0
with self._read_lock:
while written < len(buf):
# First try to read from internal buffer
avail = min(len(self._read_buf) - self._read_pos, len(buf))
if avail:
buf[written:written+avail] = \
self._read_buf[self._read_pos:self._read_pos+avail]
self._read_pos += avail
written += avail
if written == len(buf):
break
# If remaining space in callers buffer is larger than
# internal buffer, read directly into callers buffer
if len(buf) - written > self.buffer_size:
n = self.raw.readinto(buf[written:])
if not n:
break # eof
written += n
# Otherwise refill internal buffer - unless we're
# in read1 mode and already got some data
elif not (read1 and written):
if not self._peek_unlocked(1):
break # eof
# In readinto1 mode, return as soon as we have some data
if read1 and written:
break
return written
def tell(self):
return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._read_lock:
if whence == 1:
pos -= len(self._read_buf) - self._read_pos
pos = _BufferedIOMixin.seek(self, pos, whence)
self._reset_read_buf()
return pos
class BufferedWriter(_BufferedIOMixin):
"""A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
if not raw.writable():
raise OSError('"raw" argument must be writable.')
_BufferedIOMixin.__init__(self, raw)
if buffer_size <= 0:
raise ValueError("invalid buffer size")
self.buffer_size = buffer_size
self._write_buf = bytearray()
self._write_lock = Lock()
def writable(self):
return self.raw.writable()
def write(self, b):
if self.closed:
raise ValueError("write to closed file")
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
# XXX we can implement some more tricks to try and avoid
# partial writes
if len(self._write_buf) > self.buffer_size:
# We're full, so let's pre-flush the buffer. (This may
# raise BlockingIOError with characters_written == 0.)
self._flush_unlocked()
before = len(self._write_buf)
self._write_buf.extend(b)
written = len(self._write_buf) - before
if len(self._write_buf) > self.buffer_size:
try:
self._flush_unlocked()
except BlockingIOError as e:
if len(self._write_buf) > self.buffer_size:
# We've hit the buffer_size. We have to accept a partial
# write and cut back our buffer.
overage = len(self._write_buf) - self.buffer_size
written -= overage
self._write_buf = self._write_buf[:self.buffer_size]
raise BlockingIOError(e.errno, e.strerror, written)
return written
def truncate(self, pos=None):
with self._write_lock:
self._flush_unlocked()
if pos is None:
pos = self.raw.tell()
return self.raw.truncate(pos)
def flush(self):
with self._write_lock:
self._flush_unlocked()
def _flush_unlocked(self):
if self.closed:
raise ValueError("flush of closed file")
while self._write_buf:
try:
n = self.raw.write(self._write_buf)
except BlockingIOError:
raise RuntimeError("self.raw should implement RawIOBase: it "
"should not raise BlockingIOError")
if n is None:
raise BlockingIOError(
errno.EAGAIN,
"write could not complete without blocking", 0)
if n > len(self._write_buf) or n < 0:
raise OSError("write() returned incorrect number of bytes")
del self._write_buf[:n]
def tell(self):
return _BufferedIOMixin.tell(self) + len(self._write_buf)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
with self._write_lock:
self._flush_unlocked()
return _BufferedIOMixin.seek(self, pos, whence)
class BufferedRWPair(BufferedIOBase):
"""A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
# XXX The usefulness of this (compared to having two separate IO
# objects) is questionable.
def __init__(self, reader, writer, buffer_size=DEFAULT_BUFFER_SIZE):
"""Constructor.
The arguments are two RawIO instances.
"""
if not reader.readable():
raise OSError('"reader" argument must be readable.')
if not writer.writable():
raise OSError('"writer" argument must be writable.')
self.reader = BufferedReader(reader, buffer_size)
self.writer = BufferedWriter(writer, buffer_size)
def read(self, size=None):
if size is None:
size = -1
return self.reader.read(size)
def readinto(self, b):
return self.reader.readinto(b)
def write(self, b):
return self.writer.write(b)
def peek(self, size=0):
return self.reader.peek(size)
def read1(self, size):
return self.reader.read1(size)
def readinto1(self, b):
return self.reader.readinto1(b)
def readable(self):
return self.reader.readable()
def writable(self):
return self.writer.writable()
def flush(self):
return self.writer.flush()
def close(self):
try:
self.writer.close()
finally:
self.reader.close()
def isatty(self):
return self.reader.isatty() or self.writer.isatty()
@property
def closed(self):
return self.writer.closed
class BufferedRandom(BufferedWriter, BufferedReader):
"""A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
raw._checkSeekable()
BufferedReader.__init__(self, raw, buffer_size)
BufferedWriter.__init__(self, raw, buffer_size)
def seek(self, pos, whence=0):
if whence not in valid_seek_flags:
raise ValueError("invalid whence value")
self.flush()
if self._read_buf:
# Undo read ahead.
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
# First do the raw seek, then empty the read buffer, so that
# if the raw seek fails, we don't lose buffered data forever.
pos = self.raw.seek(pos, whence)
with self._read_lock:
self._reset_read_buf()
if pos < 0:
raise OSError("seek() returned invalid position")
return pos
def tell(self):
if self._write_buf:
return BufferedWriter.tell(self)
else:
return BufferedReader.tell(self)
def truncate(self, pos=None):
if pos is None:
pos = self.tell()
# Use seek to flush the read buffer.
return BufferedWriter.truncate(self, pos)
def read(self, size=None):
if size is None:
size = -1
self.flush()
return BufferedReader.read(self, size)
def readinto(self, b):
self.flush()
return BufferedReader.readinto(self, b)
def peek(self, size=0):
self.flush()
return BufferedReader.peek(self, size)
def read1(self, size):
self.flush()
return BufferedReader.read1(self, size)
def readinto1(self, b):
self.flush()
return BufferedReader.readinto1(self, b)
def write(self, b):
if self._read_buf:
# Undo readahead
with self._read_lock:
self.raw.seek(self._read_pos - len(self._read_buf), 1)
self._reset_read_buf()
return BufferedWriter.write(self, b)
class FileIO(RawIOBase):
_fd = -1
_created = False
_readable = False
_writable = False
_appending = False
_seekable = None
_closefd = True
def __init__(self, file, mode='r', closefd=True, opener=None):
"""Open a file. The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'. Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
if self._fd >= 0:
# Have to close the existing file first.
try:
if self._closefd:
os.close(self._fd)
finally:
self._fd = -1
if isinstance(file, float):
raise TypeError('integer argument expected, got float')
if isinstance(file, int):
fd = file
if fd < 0:
raise ValueError('negative file descriptor')
else:
fd = -1
if not isinstance(mode, str):
raise TypeError('invalid mode: %s' % (mode,))
if not set(mode) <= set('xrwab+'):
raise ValueError('invalid mode: %s' % (mode,))
if sum(c in 'rwax' for c in mode) != 1 or mode.count('+') > 1:
raise ValueError('Must have exactly one of create/read/write/append '
'mode and at most one plus')
if 'x' in mode:
self._created = True
self._writable = True
flags = os.O_EXCL | os.O_CREAT
elif 'r' in mode:
self._readable = True
flags = 0
elif 'w' in mode:
self._writable = True
flags = os.O_CREAT | os.O_TRUNC
elif 'a' in mode:
self._writable = True
self._appending = True
flags = os.O_APPEND | os.O_CREAT
if '+' in mode:
self._readable = True
self._writable = True
if self._readable and self._writable:
flags |= os.O_RDWR
elif self._readable:
flags |= os.O_RDONLY
else:
flags |= os.O_WRONLY
flags |= getattr(os, 'O_BINARY', 0)
noinherit_flag = (getattr(os, 'O_NOINHERIT', 0) or
getattr(os, 'O_CLOEXEC', 0))
flags |= noinherit_flag
owned_fd = None
try:
if fd < 0:
if not closefd:
raise ValueError('Cannot use closefd=False with file name')
if opener is None:
fd = os.open(file, flags, 0o666)
else:
fd = opener(file, flags)
if not isinstance(fd, int):
raise TypeError('expected integer from opener')
if fd < 0:
raise OSError('Negative file descriptor')
owned_fd = fd
if not noinherit_flag:
os.set_inheritable(fd, False)
self._closefd = closefd
fdfstat = os.fstat(fd)
try:
if stat.S_ISDIR(fdfstat.st_mode):
raise IsADirectoryError(errno.EISDIR,
os.strerror(errno.EISDIR), file)
except AttributeError:
# Ignore the AttribueError if stat.S_ISDIR or errno.EISDIR
# don't exist.
pass
self._blksize = getattr(fdfstat, 'st_blksize', 0)
if self._blksize <= 1:
self._blksize = DEFAULT_BUFFER_SIZE
if _setmode:
# don't translate newlines (\r\n <=> \n)
_setmode(fd, os.O_BINARY)
self.name = file
if self._appending:
# For consistent behaviour, we explicitly seek to the
# end of file (otherwise, it might be done only on the
# first write()).
os.lseek(fd, 0, SEEK_END)
except:
if owned_fd is not None:
os.close(owned_fd)
raise
self._fd = fd
def __del__(self):
if self._fd >= 0 and self._closefd and not self.closed:
import warnings
warnings.warn('unclosed file %r' % (self,), ResourceWarning,
stacklevel=2, source=self)
self.close()
def __getstate__(self):
raise TypeError("cannot serialize '%s' object", self.__class__.__name__)
def __repr__(self):
class_name = '%s.%s' % (self.__class__.__module__,
self.__class__.__qualname__)
if self.closed:
return '<%s [closed]>' % class_name
try:
name = self.name
except AttributeError:
return ('<%s fd=%d mode=%r closefd=%r>' %
(class_name, self._fd, self.mode, self._closefd))
else:
return ('<%s name=%r mode=%r closefd=%r>' %
(class_name, name, self.mode, self._closefd))
def _checkReadable(self):
if not self._readable:
raise UnsupportedOperation('File not open for reading')
def _checkWritable(self, msg=None):
if not self._writable:
raise UnsupportedOperation('File not open for writing')
def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None
def readall(self):
"""Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
bufsize = DEFAULT_BUFFER_SIZE
try:
pos = os.lseek(self._fd, 0, SEEK_CUR)
end = os.fstat(self._fd).st_size
if end >= pos:
bufsize = end - pos + 1
except OSError:
pass
result = bytearray()
while True:
if len(result) >= bufsize:
bufsize = len(result)
bufsize += max(bufsize, DEFAULT_BUFFER_SIZE)
n = bufsize - len(result)
try:
chunk = os.read(self._fd, n)
except BlockingIOError:
if result:
break
return None
if not chunk: # reached the end of the file
break
result += chunk
return bytes(result)
def readinto(self, b):
"""Same as RawIOBase.readinto()."""
m = memoryview(b).cast('B')
data = self.read(len(m))
n = len(data)
m[:n] = data
return n
def write(self, b):
"""Write bytes b to file, return number written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
self._checkClosed()
self._checkWritable()
try:
return os.write(self._fd, b)
except BlockingIOError:
return None
def seek(self, pos, whence=SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
if isinstance(pos, float):
raise TypeError('an integer is required')
self._checkClosed()
return os.lseek(self._fd, pos, whence)
def tell(self):
"""tell() -> int. Current file position.
Can raise OSError for non seekable files."""
self._checkClosed()
return os.lseek(self._fd, 0, SEEK_CUR)
def truncate(self, size=None):
"""Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
self._checkClosed()
self._checkWritable()
if size is None:
size = self.tell()
os.ftruncate(self._fd, size)
return size
def close(self):
"""Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
if not self.closed:
try:
if self._closefd:
os.close(self._fd)
finally:
super().close()
def seekable(self):
"""True if file supports random-access."""
self._checkClosed()
if self._seekable is None:
try:
self.tell()
except OSError:
self._seekable = False
else:
self._seekable = True
return self._seekable
def readable(self):
"""True if file was opened in a read mode."""
self._checkClosed()
return self._readable
def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable
def fileno(self):
"""Return the underlying file descriptor (an integer)."""
self._checkClosed()
return self._fd
def isatty(self):
"""True if the file is connected to a TTY device."""
self._checkClosed()
return os.isatty(self._fd)
@property
def closefd(self):
"""True if the file descriptor will be closed by close()."""
return self._closefd
@property
def mode(self):
"""String giving the file mode"""
if self._created:
if self._readable:
return 'xb+'
else:
return 'xb'
elif self._appending:
if self._readable:
return 'ab+'
else:
return 'ab'
elif self._readable:
if self._writable:
return 'rb+'
else:
return 'rb'
else:
return 'wb'
class TextIOBase(IOBase):
"""Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def read(self, size=-1):
"""Read at most size characters from stream, where size is an int.
Read from underlying buffer until we have size characters or we hit EOF.
If size is negative or omitted, read until EOF.
Returns a string.
"""
self._unsupported("read")
def write(self, s):
"""Write string s to stream and returning an int."""
self._unsupported("write")
def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate")
def readline(self):
"""Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
self._unsupported("readline")
def detach(self):
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
self._unsupported("detach")
@property
def encoding(self):
"""Subclasses should override."""
return None
@property
def newlines(self):
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
return None
@property
def errors(self):
"""Error setting of the decoder or encoder.
Subclasses should override."""
return None
io.TextIOBase.register(TextIOBase)
class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
r"""Codec used when reading a file in universal newlines mode. It wraps
another incremental decoder, translating \r\n and \r into \n. It also
records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece.
"""
def __init__(self, decoder, translate, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors=errors)
self.translate = translate
self.decoder = decoder
self.seennl = 0
self.pendingcr = False
def decode(self, input, final=False):
# decode input (with the eventual \r from a previous pass)
if self.decoder is None:
output = input
else:
output = self.decoder.decode(input, final=final)
if self.pendingcr and (output or final):
output = "\r" + output
self.pendingcr = False
# retain last \r even when not translating data:
# then readline() is sure to get \r\n in one pass
if output.endswith("\r") and not final:
output = output[:-1]
self.pendingcr = True
# Record which newlines are read
crlf = output.count('\r\n')
cr = output.count('\r') - crlf
lf = output.count('\n') - crlf
self.seennl |= (lf and self._LF) | (cr and self._CR) \
| (crlf and self._CRLF)
if self.translate:
if crlf:
output = output.replace("\r\n", "\n")
if cr:
output = output.replace("\r", "\n")
return output
def getstate(self):
if self.decoder is None:
buf = b""
flag = 0
else:
buf, flag = self.decoder.getstate()
flag <<= 1
if self.pendingcr:
flag |= 1
return buf, flag
def setstate(self, state):
buf, flag = state
self.pendingcr = bool(flag & 1)
if self.decoder is not None:
self.decoder.setstate((buf, flag >> 1))
def reset(self):
self.seennl = 0
self.pendingcr = False
if self.decoder is not None:
self.decoder.reset()
_LF = 1
_CR = 2
_CRLF = 4
@property
def newlines(self):
return (None,
"\n",
"\r",
("\r", "\n"),
"\r\n",
("\n", "\r\n"),
("\r", "\r\n"),
("\r", "\n", "\r\n")
)[self.seennl]
class TextIOWrapper(TextIOBase):
r"""Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see the
codecs.register) and defaults to "strict".
newline can be None, '', '\n', '\r', or '\r\n'. It controls the
handling of line endings. If it is None, universal newlines is
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
_CHUNK_SIZE = 2048
# The write_through argument has no effect here since this
# implementation always writes through. The argument is present only
# so that the signature can match the signature of the C version.
def __init__(self, buffer, encoding=None, errors=None, newline=None,
line_buffering=False, write_through=False):
if newline is not None and not isinstance(newline, str):
raise TypeError("illegal newline type: %r" % (type(newline),))
if newline not in (None, "", "\n", "\r", "\r\n"):
raise ValueError("illegal newline value: %r" % (newline,))
if encoding is None:
try:
encoding = os.device_encoding(buffer.fileno())
except (AttributeError, UnsupportedOperation):
pass
if encoding is None:
try:
import locale
except ImportError:
# Importing locale may fail if Python is being built
encoding = "ascii"
else:
encoding = locale.getpreferredencoding(False)
if not isinstance(encoding, str):
raise ValueError("invalid encoding: %r" % encoding)
if not codecs.lookup(encoding)._is_text_encoding:
msg = ("%r is not a text encoding; "
"use codecs.open() to handle arbitrary codecs")
raise LookupError(msg % encoding)
if errors is None:
errors = "strict"
else:
if not isinstance(errors, str):
raise ValueError("invalid errors: %r" % errors)
self._buffer = buffer
self._line_buffering = line_buffering
self._encoding = encoding
self._errors = errors
self._readuniversal = not newline
self._readtranslate = newline is None
self._readnl = newline
self._writetranslate = newline != ''
self._writenl = newline or os.linesep
self._encoder = None
self._decoder = None
self._decoded_chars = '' # buffer for text returned from decoder
self._decoded_chars_used = 0 # offset into _decoded_chars for read()
self._snapshot = None # info for reconstructing decoder state
self._seekable = self._telling = self.buffer.seekable()
self._has_read1 = hasattr(self.buffer, 'read1')
self._b2cratio = 0.0
if self._seekable and self.writable():
position = self.buffer.tell()
if position != 0:
try:
self._get_encoder().setstate(0)
except LookupError:
# Sometimes the encoder doesn't exist
pass
# self._snapshot is either None, or a tuple (dec_flags, next_input)
# where dec_flags is the second (integer) item of the decoder state
# and next_input is the chunk of input bytes that comes next after the
# snapshot point. We use this to reconstruct decoder states in tell().
# Naming convention:
# - "bytes_..." for integer variables that count input bytes
# - "chars_..." for integer variables that count decoded characters
def __repr__(self):
result = "<{}.{}".format(self.__class__.__module__,
self.__class__.__qualname__)
try:
name = self.name
except Exception:
pass
else:
result += " name={0!r}".format(name)
try:
mode = self.mode
except Exception:
pass
else:
result += " mode={0!r}".format(mode)
return result + " encoding={0!r}>".format(self.encoding)
@property
def encoding(self):
return self._encoding
@property
def errors(self):
return self._errors
@property
def line_buffering(self):
return self._line_buffering
@property
def buffer(self):
return self._buffer
def seekable(self):
if self.closed:
raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
return self.buffer.readable()
def writable(self):
return self.buffer.writable()
def flush(self):
self.buffer.flush()
self._telling = self._seekable
def close(self):
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
self.buffer.close()
@property
def closed(self):
return self.buffer.closed
@property
def name(self):
return self.buffer.name
def fileno(self):
return self.buffer.fileno()
def isatty(self):
return self.buffer.isatty()
def write(self, s):
'Write data, where s is a str'
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, str):
raise TypeError("can't write %s to text stream" %
s.__class__.__name__)
length = len(s)
haslf = (self._writetranslate or self._line_buffering) and "\n" in s
if haslf and self._writetranslate and self._writenl != "\n":
s = s.replace("\n", self._writenl)
encoder = self._encoder or self._get_encoder()
# XXX What if we were just reading?
b = encoder.encode(s)
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
return length
def _get_encoder(self):
make_encoder = codecs.getincrementalencoder(self._encoding)
self._encoder = make_encoder(self._errors)
return self._encoder
def _get_decoder(self):
make_decoder = codecs.getincrementaldecoder(self._encoding)
decoder = make_decoder(self._errors)
if self._readuniversal:
decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
self._decoder = decoder
return decoder
# The following three methods implement an ADT for _decoded_chars.
# Text returned from the decoder is buffered here until the client
# requests it by calling our read() or readline() method.
def _set_decoded_chars(self, chars):
"""Set the _decoded_chars buffer."""
self._decoded_chars = chars
self._decoded_chars_used = 0
def _get_decoded_chars(self, n=None):
"""Advance into the _decoded_chars buffer."""
offset = self._decoded_chars_used
if n is None:
chars = self._decoded_chars[offset:]
else:
chars = self._decoded_chars[offset:offset + n]
self._decoded_chars_used += len(chars)
return chars
def _rewind_decoded_chars(self, n):
"""Rewind the _decoded_chars buffer."""
if self._decoded_chars_used < n:
raise AssertionError("rewind decoded_chars out of bounds")
self._decoded_chars_used -= n
def _read_chunk(self):
"""
Read and decode the next chunk of data from the BufferedReader.
"""
# The return value is True unless EOF was reached. The decoded
# string is placed in self._decoded_chars (replacing its previous
# value). The entire input chunk is sent to the decoder, though
# some of it may remain buffered in the decoder, yet to be
# converted.
if self._decoder is None:
raise ValueError("no decoder")
if self._telling:
# To prepare for tell(), we need to snapshot a point in the
# file where the decoder's input buffer is empty.
dec_buffer, dec_flags = self._decoder.getstate()
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
# Read a chunk, decode it, and put the result in self._decoded_chars.
if self._has_read1:
input_chunk = self.buffer.read1(self._CHUNK_SIZE)
else:
input_chunk = self.buffer.read(self._CHUNK_SIZE)
eof = not input_chunk
decoded_chars = self._decoder.decode(input_chunk, eof)
self._set_decoded_chars(decoded_chars)
if decoded_chars:
self._b2cratio = len(input_chunk) / len(self._decoded_chars)
else:
self._b2cratio = 0.0
if self._telling:
# At the snapshot point, len(dec_buffer) bytes before the read,
# the next input to be decoded is dec_buffer + input_chunk.
self._snapshot = (dec_flags, dec_buffer + input_chunk)
return not eof
def _pack_cookie(self, position, dec_flags=0,
bytes_to_feed=0, need_eof=0, chars_to_skip=0):
# The meaning of a tell() cookie is: seek to position, set the
# decoder flags to dec_flags, read bytes_to_feed bytes, feed them
# into the decoder with need_eof as the EOF flag, then skip
# chars_to_skip characters of the decoded result. For most simple
# decoders, tell() will often just give a byte offset in the file.
return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
(chars_to_skip<<192) | bool(need_eof)<<256)
def _unpack_cookie(self, bigint):
rest, position = divmod(bigint, 1<<64)
rest, dec_flags = divmod(rest, 1<<64)
rest, bytes_to_feed = divmod(rest, 1<<64)
need_eof, chars_to_skip = divmod(rest, 1<<64)
return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
def tell(self):
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if not self._telling:
raise OSError("telling position disabled by next() call")
self.flush()
position = self.buffer.tell()
decoder = self._decoder
if decoder is None or self._snapshot is None:
if self._decoded_chars:
# This should never happen.
raise AssertionError("pending decoded text")
return position
# Skip backward to the snapshot point (see _read_chunk).
dec_flags, next_input = self._snapshot
position -= len(next_input)
# How many decoded characters have been used up since the snapshot?
chars_to_skip = self._decoded_chars_used
if chars_to_skip == 0:
# We haven't moved from the snapshot point.
return self._pack_cookie(position, dec_flags)
# Starting from the snapshot position, we will walk the decoder
# forward until it gives us enough decoded characters.
saved_state = decoder.getstate()
try:
# Fast search for an acceptable start point, close to our
# current pos.
# Rationale: calling decoder.decode() has a large overhead
# regardless of chunk size; we want the number of such calls to
# be O(1) in most situations (common decoders, non-crazy input).
# Actually, it will be exactly 1 for fixed-size codecs (all
# 8-bit codecs, also UTF-16 and UTF-32).
skip_bytes = int(self._b2cratio * chars_to_skip)
skip_back = 1
assert skip_bytes <= len(next_input)
while skip_bytes > 0:
decoder.setstate((b'', dec_flags))
# Decode up to temptative start point
n = len(decoder.decode(next_input[:skip_bytes]))
if n <= chars_to_skip:
b, d = decoder.getstate()
if not b:
# Before pos and no bytes buffered in decoder => OK
dec_flags = d
chars_to_skip -= n
break
# Skip back by buffered amount and reset heuristic
skip_bytes -= len(b)
skip_back = 1
else:
# We're too far ahead, skip back a bit
skip_bytes -= skip_back
skip_back = skip_back * 2
else:
skip_bytes = 0
decoder.setstate((b'', dec_flags))
# Note our initial start point.
start_pos = position + skip_bytes
start_flags = dec_flags
if chars_to_skip == 0:
# We haven't moved from the start point.
return self._pack_cookie(start_pos, start_flags)
# Feed the decoder one byte at a time. As we go, note the
# nearest "safe start point" before the current location
# (a point where the decoder has nothing buffered, so seek()
# can safely start from there and advance to this location).
bytes_fed = 0
need_eof = 0
# Chars decoded since `start_pos`
chars_decoded = 0
for i in range(skip_bytes, len(next_input)):
bytes_fed += 1
chars_decoded += len(decoder.decode(next_input[i:i+1]))
dec_buffer, dec_flags = decoder.getstate()
if not dec_buffer and chars_decoded <= chars_to_skip:
# Decoder buffer is empty, so this is a safe start point.
start_pos += bytes_fed
chars_to_skip -= chars_decoded
start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
if chars_decoded >= chars_to_skip:
break
else:
# We didn't get enough decoded data; signal EOF to get more.
chars_decoded += len(decoder.decode(b'', final=True))
need_eof = 1
if chars_decoded < chars_to_skip:
raise OSError("can't reconstruct logical file position")
# The returned cookie corresponds to the last safe start point.
return self._pack_cookie(
start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
finally:
decoder.setstate(saved_state)
def truncate(self, pos=None):
self.flush()
if pos is None:
pos = self.tell()
return self.buffer.truncate(pos)
def detach(self):
if self.buffer is None:
raise ValueError("buffer is already detached")
self.flush()
buffer = self._buffer
self._buffer = None
return buffer
def seek(self, cookie, whence=0):
def _reset_encoder(position):
"""Reset the encoder (merely useful for proper BOM handling)"""
try:
encoder = self._encoder or self._get_encoder()
except LookupError:
# Sometimes the encoder doesn't exist
pass
else:
if position != 0:
encoder.setstate(0)
else:
encoder.reset()
if self.closed:
raise ValueError("tell on closed file")
if not self._seekable:
raise UnsupportedOperation("underlying stream is not seekable")
if whence == 1: # seek relative to current position
if cookie != 0:
raise UnsupportedOperation("can't do nonzero cur-relative seeks")
# Seeking to the current position should attempt to
# sync the underlying buffer with the current position.
whence = 0
cookie = self.tell()
if whence == 2: # seek relative to end of file
if cookie != 0:
raise UnsupportedOperation("can't do nonzero end-relative seeks")
self.flush()
position = self.buffer.seek(0, 2)
self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
_reset_encoder(position)
return position
if whence != 0:
raise ValueError("unsupported whence (%r)" % (whence,))
if cookie < 0:
raise ValueError("negative seek position %r" % (cookie,))
self.flush()
# The strategy of seek() is to go back to the safe start point
# and replay the effect of read(chars_to_skip) from there.
start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
self._unpack_cookie(cookie)
# Seek back to the safe start point.
self.buffer.seek(start_pos)
self._set_decoded_chars('')
self._snapshot = None
# Restore the decoder to its state from the safe start point.
if cookie == 0 and self._decoder:
self._decoder.reset()
elif self._decoder or dec_flags or chars_to_skip:
self._decoder = self._decoder or self._get_decoder()
self._decoder.setstate((b'', dec_flags))
self._snapshot = (dec_flags, b'')
if chars_to_skip:
# Just like _read_chunk, feed the decoder and save a snapshot.
input_chunk = self.buffer.read(bytes_to_feed)
self._set_decoded_chars(
self._decoder.decode(input_chunk, need_eof))
self._snapshot = (dec_flags, input_chunk)
# Skip chars_to_skip of the decoded characters.
if len(self._decoded_chars) < chars_to_skip:
raise OSError("can't restore logical file position")
self._decoded_chars_used = chars_to_skip
_reset_encoder(cookie)
return cookie
def read(self, size=None):
self._checkReadable()
if size is None:
size = -1
decoder = self._decoder or self._get_decoder()
try:
size.__index__
except AttributeError as err:
raise TypeError("an integer is required") from err
if size < 0:
# Read everything.
result = (self._get_decoded_chars() +
decoder.decode(self.buffer.read(), final=True))
self._set_decoded_chars('')
self._snapshot = None
return result
else:
# Keep reading chunks until we have size characters to return.
eof = False
result = self._get_decoded_chars(size)
while len(result) < size and not eof:
eof = not self._read_chunk()
result += self._get_decoded_chars(size - len(result))
return result
def __next__(self):
self._telling = False
line = self.readline()
if not line:
self._snapshot = None
self._telling = self._seekable
raise StopIteration
return line
def readline(self, size=None):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError("size must be an integer")
# Grab all the decoded text (we will rewind any extra bits later).
line = self._get_decoded_chars()
start = 0
# Make the decoder if it doesn't already exist.
if not self._decoder:
self._get_decoder()
pos = endpos = None
while True:
if self._readtranslate:
# Newlines are already translated, only search for \n
pos = line.find('\n', start)
if pos >= 0:
endpos = pos + 1
break
else:
start = len(line)
elif self._readuniversal:
# Universal newline search. Find any of \r, \r\n, \n
# The decoder ensures that \r\n are not split in two pieces
# In C we'd look for these in parallel of course.
nlpos = line.find("\n", start)
crpos = line.find("\r", start)
if crpos == -1:
if nlpos == -1:
# Nothing found
start = len(line)
else:
# Found \n
endpos = nlpos + 1
break
elif nlpos == -1:
# Found lone \r
endpos = crpos + 1
break
elif nlpos < crpos:
# Found \n
endpos = nlpos + 1
break
elif nlpos == crpos + 1:
# Found \r\n
endpos = crpos + 2
break
else:
# Found \r
endpos = crpos + 1
break
else:
# non-universal
pos = line.find(self._readnl)
if pos >= 0:
endpos = pos + len(self._readnl)
break
if size >= 0 and len(line) >= size:
endpos = size # reached length size
break
# No line ending seen yet - get more data'
while self._read_chunk():
if self._decoded_chars:
break
if self._decoded_chars:
line += self._get_decoded_chars()
else:
# end of file
self._set_decoded_chars('')
self._snapshot = None
return line
if size >= 0 and endpos > size:
endpos = size # don't exceed size
# Rewind _decoded_chars to just after the line ending we found.
self._rewind_decoded_chars(len(line) - endpos)
return line[:endpos]
@property
def newlines(self):
return self._decoder.newlines if self._decoder else None
class StringIO(TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def __init__(self, initial_value="", newline="\n"):
super(StringIO, self).__init__(BytesIO(),
encoding="utf-8",
errors="surrogatepass",
newline=newline)
# Issue #5645: make universal newlines semantics the same as in the
# C version, even under Windows.
if newline is None:
self._writetranslate = False
if initial_value is not None:
if not isinstance(initial_value, str):
raise TypeError("initial_value must be str or None, not {0}"
.format(type(initial_value).__name__))
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
decoder = self._decoder or self._get_decoder()
old_state = decoder.getstate()
decoder.reset()
try:
return decoder.decode(self.buffer.getvalue(), final=True)
finally:
decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
# that's an implementation detail.
return object.__repr__(self)
@property
def errors(self):
return None
@property
def encoding(self):
return None
def detach(self):
# This doesn't make sense on StringIO.
self._unsupported("detach")
| 88,097 | 2,533 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/codecs.py | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
from _codecs import _forget_codec, ascii_decode, ascii_encode, charmap_build, charmap_decode, charmap_encode, decode, encode, escape_decode, escape_encode, latin_1_decode, latin_1_encode, lookup, lookup_error, raw_unicode_escape_decode, raw_unicode_escape_encode, readbuffer_encode, register, register_error, unicode_escape_decode, unicode_escape_encode, unicode_internal_decode, unicode_internal_encode, utf_16_be_decode, utf_16_be_encode, utf_16_decode, utf_16_encode, utf_16_ex_decode, utf_16_le_decode, utf_16_le_encode, utf_32_be_decode, utf_32_be_encode, utf_32_decode, utf_32_encode, utf_32_ex_decode, utf_32_le_decode, utf_32_le_encode, utf_7_decode, utf_7_encode, utf_8_decode, utf_8_encode
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder",
"StreamReader", "StreamWriter",
"StreamReaderWriter", "StreamRecoder",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"encode", "decode", "iterencode", "iterdecode",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"backslashreplace_errors", "namereplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
"""Codec details when looking up the codec registry"""
# Private API to allow Python 3.4 to blacklist the known non-Unicode
# codecs in the standard library. A more general mechanism to
# reliably distinguish test encodings from other codecs will hopefully
# be defined for Python 3.5
#
# See http://bugs.python.org/issue19619
_is_text_encoding = True # Assume codecs are text encodings by default
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None,
*, _is_text_encoding=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
if _is_text_encoding is not None:
self._is_text_encoding = _is_text_encoding
return self
def __repr__(self):
return "<%s.%s object for encoding %s at %#x>" % \
(self.__class__.__module__, self.__class__.__qualname__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private code points U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences.
'namereplace' - Replace with \\N{...} escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamWriter for codecs which have to keep state in order to
make encoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamReader for codecs which have to keep state in order to
make decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create an IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences.
'namereplace' - Replace with \\N{...} escape sequences.
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'backslashreplace' - Replace with backslashed escape sequences;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of decoded code points or bytes to
return. read() will never return more data than requested,
but it might return less, if there is not enough available.
size indicates the approximate maximum number of decoded
bytes or code points to read for decoding. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy, meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
if chars < 0:
# For compatibility with other read() methods that take a
# single argument
chars = size
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
if not data:
break
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as a list.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances translate data from one encoding to another.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the StreamRecoder is first decoded into an
intermediate format (depending on the "decode" codec) and then
written to the underlying stream using an instance of the provided
Writer class.
In the other direction, data is read from the underlying stream using
a Reader instance and then encoded and returned to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
data visible to .read() and .write()) while Reader and Writer
work on the backend (the data in stream).
You can use these objects to do transparent
transcodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode and decode must adhere to the Codec interface; Reader and
Writer must be factory functions or classes providing the
StreamReader and StreamWriter interfaces resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='r', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Underlying encoded files are always opened in binary mode.
The default file mode is 'r', meaning to open the file in read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Data written to the wrapped file is decoded according
to the given data_encoding and then encoded to the underlying
file using file_encoding. The intermediate data type
will usually be Unicode but depends on the specified codecs.
Bytes read from the file are decoded using file_encoding and then
passed back to the caller encoded using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using an IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using an IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \\u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
namereplace_errors = lookup_error("namereplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
namereplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| 36,849 | 1,111 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/warnings.py | """Python part of the warnings subsystem."""
import sys
__all__ = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
msg = WarningMessage(message, category, filename, lineno, file, line)
_showwarnmsg_impl(msg)
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
msg = WarningMessage(message, category, filename, lineno, None, line)
return _formatwarnmsg_impl(msg)
def _showwarnmsg_impl(msg):
file = msg.file
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None when run with pythonw.exe:
# warnings get lost
return
text = _formatwarnmsg(msg)
try:
file.write(text)
except OSError:
# the file (probably stderr) is invalid - this warning gets lost.
pass
def _formatwarnmsg_impl(msg):
s = ("%s:%s: %s: %s\n"
% (msg.filename, msg.lineno, msg.category.__name__,
msg.message))
if msg.line is None:
try:
import linecache
line = linecache.getline(msg.filename, msg.lineno)
except Exception:
# When a warning is logged during Python shutdown, linecache
# and the import machinery don't work anymore
line = None
linecache = None
else:
line = msg.line
if line:
line = line.strip()
s += " %s\n" % line
if msg.source is not None:
try:
import tracemalloc
tb = tracemalloc.get_object_traceback(msg.source)
except Exception:
# When a warning is logged during Python shutdown, tracemalloc
# and the import machinery don't work anymore
tb = None
if tb is not None:
s += 'Object allocated at (most recent call first):\n'
for frame in tb:
s += (' File "%s", lineno %s\n'
% (frame.filename, frame.lineno))
try:
if linecache is not None:
line = linecache.getline(frame.filename, frame.lineno)
else:
line = None
except Exception:
line = None
if line:
line = line.strip()
s += ' %s\n' % line
return s
# Keep a reference to check if the function was replaced
_showwarning_orig = showwarning
def _showwarnmsg(msg):
"""Hook to write a warning to a file; replace if you like."""
try:
sw = showwarning
except NameError:
pass
else:
if sw is not _showwarning_orig:
# warnings.showwarning() was replaced
if not callable(sw):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
sw(msg.message, msg.category, msg.filename, msg.lineno,
msg.file, msg.line)
return
_showwarnmsg_impl(msg)
# Keep a reference to check if the function was replaced
_formatwarning_orig = formatwarning
def _formatwarnmsg(msg):
"""Function to format a warning the standard way."""
try:
fw = formatwarning
except NameError:
pass
else:
if fw is not _formatwarning_orig:
# warnings.formatwarning() was replaced
return fw(msg.message, msg.category,
msg.filename, msg.lineno, line=msg.line)
return _formatwarnmsg_impl(msg)
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
_add_filter(action, re.compile(message, re.I), category,
re.compile(module), lineno, append=append)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
_add_filter(action, None, category, None, lineno, append=append)
def _add_filter(*item, append):
# Remove possible duplicate filters, so new one will be placed
# in correct place. If append=True and duplicate exists, do nothing.
if not append:
try:
filters.remove(item)
except ValueError:
pass
filters.insert(0, item)
else:
if item not in filters:
filters.append(item)
_filters_mutated()
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
_filters_mutated()
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
def _is_internal_frame(frame):
"""Signal whether the frame is an internal CPython implementation detail."""
filename = frame.f_code.co_filename
return 'importlib' in filename and '_bootstrap' in filename
def _next_external_frame(frame):
"""Find the next frame that doesn't involve CPython internals."""
frame = frame.f_back
while frame is not None and _is_internal_frame(frame):
frame = frame.f_back
return frame
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
if not (isinstance(category, type) and issubclass(category, Warning)):
raise TypeError("category must be a Warning subclass, "
"not '{:s}'".format(type(category).__name__))
# Get context information
try:
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = frame.f_globals
lineno = frame.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc"):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals, source)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None,
source=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != _filters_version:
registry.clear()
registry['version'] = _filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
msg = WarningMessage(message, category, filename, lineno, source)
_showwarnmsg(msg)
class WarningMessage(object):
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line", "source")
def __init__(self, message, category, filename, lineno, file=None,
line=None, source=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._module._filters_mutated()
self._showwarning = self._module.showwarning
self._showwarnmsg_impl = self._module._showwarnmsg_impl
if self._record:
log = []
self._module._showwarnmsg_impl = log.append
# Reset showwarning() to the default implementation to make sure
# that _showwarnmsg() calls _showwarnmsg_impl()
self._module.showwarning = self._module._showwarning_orig
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module._filters_mutated()
self._module.showwarning = self._showwarning
self._module._showwarnmsg_impl = self._showwarnmsg_impl
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit, _filters_mutated)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
_filters_version = 1
def _filters_mutated():
global _filters_version
_filters_version += 1
if __name__ == 'PYOBJ.COM':
import _warnings
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| 18,535 | 527 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/difflib.py | """
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
Function context_diff(a, b):
For two lists of strings, return a delta in context diff format.
Function ndiff(a, b):
Return a delta: the difference between `a` and `b` (lists of strings).
Function restore(delta, which):
Return one of the two sequences that generated an ndiff delta.
Function unified_diff(a, b):
For two lists of strings, return a delta in unified diff format.
Class SequenceMatcher:
A flexible class for comparing pairs of sequences of any type.
Class Differ:
For producing human-readable deltas from sequences of lines of text.
Class HtmlDiff:
For producing HTML side by side comparison with change highlights.
"""
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match']
from heapq import nlargest as _nlargest
from collections import namedtuple as _namedtuple
Match = _namedtuple('Match', 'a b size')
def _calculate_ratio(matches, length):
if length:
return 2.0 * matches / length
return 1.0
class SequenceMatcher:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to synching up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
Methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
set_seqs(a, b)
Set the two sequences to be compared.
set_seq1(a)
Set the first sequence to be compared.
set_seq2(b)
Set the second sequence to be compared.
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
get_matching_blocks()
Return list of triples describing matching subsequences.
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
real_quick_ratio()
Return an upper bound on ratio() very quickly.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
not isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
not isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.__contains__, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = _nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _count_leading(line, ch):
"""
Return number of `ch` characters at the start of `line`.
Example:
>>> _count_leading(' abc', ' ')
3
"""
i, n = 0, len(line)
while i < n and line[i] == ch:
i += 1
return i
class Differ:
r"""
Differ is a class for comparing sequences of lines of text, and
producing human-readable differences or deltas. Differ uses
SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
Each line of a Differ delta begins with a two-letter code:
'- ' line unique to sequence 1
'+ ' line unique to sequence 2
' ' line common to both sequences
'? ' line not present in either input sequence
Lines beginning with '? ' attempt to guide the eye to intraline
differences, and were not present in either input sequence. These lines
can be confusing if the sequences contain tab characters.
Note that Differ makes no claim to produce a *minimal* diff. To the
contrary, minimal diffs are often counter-intuitive, because they synch
up anywhere possible, sometimes accidental matches 100 pages apart.
Restricting synch points to contiguous matches preserves some notion of
locality, at the occasional cost of producing a longer diff.
Example: Comparing two texts.
First we set up the texts, sequences of individual single-line strings
ending with newlines (such sequences can also be obtained from the
`readlines()` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
... 3. Simple is better than complex.
... 4. Complex is better than complicated.
... '''.splitlines(keepends=True)
>>> len(text1)
4
>>> text1[0][-1]
'\n'
>>> text2 = ''' 1. Beautiful is better than ugly.
... 3. Simple is better than complex.
... 4. Complicated is better than complex.
... 5. Flat is better than nested.
... '''.splitlines(keepends=True)
Next we instantiate a Differ object:
>>> d = Differ()
Note that when instantiating a Differ object we may pass functions to
filter out line and character 'junk'. See Differ.__init__ for details.
Finally, we compare the two:
>>> result = list(d.compare(text1, text2))
'result' is a list of strings, so let's pretty-print it:
>>> from pprint import pprint as _pprint
>>> _pprint(result)
[' 1. Beautiful is better than ugly.\n',
'- 2. Explicit is better than implicit.\n',
'- 3. Simple is better than complex.\n',
'+ 3. Simple is better than complex.\n',
'? ++\n',
'- 4. Complex is better than complicated.\n',
'? ^ ---- ^\n',
'+ 4. Complicated is better than complex.\n',
'? ++++ ^ ^\n',
'+ 5. Flat is better than nested.\n']
As a single multi-line string it looks like this:
>>> print(''.join(result), end="")
1. Beautiful is better than ugly.
- 2. Explicit is better than implicit.
- 3. Simple is better than complex.
+ 3. Simple is better than complex.
? ++
- 4. Complex is better than complicated.
? ^ ---- ^
+ 4. Complicated is better than complex.
? ++++ ^ ^
+ 5. Flat is better than nested.
Methods:
__init__(linejunk=None, charjunk=None)
Construct a text differencer, with optional filters.
compare(a, b)
Compare two sequences of lines; generate the resulting delta.
"""
def __init__(self, linejunk=None, charjunk=None):
"""
Construct a text differencer, with optional filters.
The two optional keyword parameters are for filter functions:
- `linejunk`: A function that should accept a single string argument,
and return true iff the string is junk. The module-level function
`IS_LINE_JUNK` may be used to filter out lines without visible
characters, except for at most one splat ('#'). It is recommended
to leave linejunk None; the underlying SequenceMatcher class has
an adaptive notion of "noise" lines that's better than any static
definition the author has ever been able to craft.
- `charjunk`: A function that should accept a string of length 1. The
module-level function `IS_CHARACTER_JUNK` may be used to filter out
whitespace characters (a blank or tab; **note**: bad idea to include
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
"""
self.linejunk = linejunk
self.charjunk = charjunk
def compare(self, a, b):
r"""
Compare two sequences of lines; generate the resulting delta.
Each sequence must contain individual single-line strings ending with
newlines. Such sequences can be obtained from the `readlines()` method
of file-like objects. The delta generated also consists of newline-
terminated strings, ready to be printed as-is via the writeline()
method of a file-like object.
Example:
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
... 'ore\ntree\nemu\n'.splitlines(True))),
... end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
cruncher = SequenceMatcher(self.linejunk, a, b)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if tag == 'replace':
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
elif tag == 'delete':
g = self._dump('-', a, alo, ahi)
elif tag == 'insert':
g = self._dump('+', b, blo, bhi)
elif tag == 'equal':
g = self._dump(' ', a, alo, ahi)
else:
raise ValueError('unknown tag %r' % (tag,))
yield from g
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in range(lo, hi):
yield '%s %s' % (tag, x[i])
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
assert alo < ahi and blo < bhi
# dump the shorter block first -- reduces the burden on short-term
# memory if the blocks are of very different sizes
if bhi - blo < ahi - alo:
first = self._dump('+', b, blo, bhi)
second = self._dump('-', a, alo, ahi)
else:
first = self._dump('-', a, alo, ahi)
second = self._dump('+', b, blo, bhi)
for g in first, second:
yield from g
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
r"""
When replacing one block of lines with another, search the blocks
for *similar* lines; the best-matching pair (if any) is used as a
synch point, and intraline difference marking is done on the
similar pair. Lots of work, but often worth it.
Example:
>>> d = Differ()
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
... ['abcdefGhijkl\n'], 0, 1)
>>> print(''.join(results), end="")
- abcDefghiJkl
? ^ ^ ^
+ abcdefGhijkl
? ^ ^ ^
"""
# don't synch up unless the lines have a similarity score of at
# least cutoff; best_ratio tracks the best score seen so far
best_ratio, cutoff = 0.74, 0.75
cruncher = SequenceMatcher(self.charjunk)
eqi, eqj = None, None # 1st indices of equal lines (if any)
# search for the pair that matches best without being identical
# (identical lines must be junk lines, & we don't want to synch up
# on junk -- unless we have to)
for j in range(blo, bhi):
bj = b[j]
cruncher.set_seq2(bj)
for i in range(alo, ahi):
ai = a[i]
if ai == bj:
if eqi is None:
eqi, eqj = i, j
continue
cruncher.set_seq1(ai)
# computing similarity is expensive, so use the quick
# upper bounds first -- have seen this speed up messy
# compares by a factor of 3.
# note that ratio() is only expensive to compute the first
# time it's called on a sequence pair; the expensive part
# of the computation is cached by cruncher
if cruncher.real_quick_ratio() > best_ratio and \
cruncher.quick_ratio() > best_ratio and \
cruncher.ratio() > best_ratio:
best_ratio, best_i, best_j = cruncher.ratio(), i, j
if best_ratio < cutoff:
# no non-identical "pretty close" pair
if eqi is None:
# no identical pair either -- treat it as a straight replace
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
return
# no close pair, but an identical pair -- synch up on that
best_i, best_j, best_ratio = eqi, eqj, 1.0
else:
# there's a close pair, so forget the identical pair (if any)
eqi = None
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
# identical
# pump out diffs from before the synch point
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
# do intraline marking on the synch pair
aelt, belt = a[best_i], b[best_j]
if eqi is None:
# pump out a '-', '?', '+', '?' quad for the synched lines
atags = btags = ""
cruncher.set_seqs(aelt, belt)
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
la, lb = ai2 - ai1, bj2 - bj1
if tag == 'replace':
atags += '^' * la
btags += '^' * lb
elif tag == 'delete':
atags += '-' * la
elif tag == 'insert':
btags += '+' * lb
elif tag == 'equal':
atags += ' ' * la
btags += ' ' * lb
else:
raise ValueError('unknown tag %r' % (tag,))
yield from self._qformat(aelt, belt, atags, btags)
else:
# the synch pair is identical
yield ' ' + aelt
# pump out diffs from after the synch point
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
g = []
if alo < ahi:
if blo < bhi:
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
else:
g = self._dump('-', a, alo, ahi)
elif blo < bhi:
g = self._dump('+', b, blo, bhi)
yield from g
def _qformat(self, aline, bline, atags, btags):
r"""
Format "?" output and deal with leading tabs.
Example:
>>> d = Differ()
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
... ' ^ ^ ^ ', ' ^ ^ ^ ')
>>> for line in results: print(repr(line))
...
'- \tabcDefghiJkl\n'
'? \t ^ ^ ^\n'
'+ \tabcdefGhijkl\n'
'? \t ^ ^ ^\n'
"""
# Can hurt, but will probably help most of the time.
common = min(_count_leading(aline, "\t"),
_count_leading(bline, "\t"))
common = min(common, _count_leading(atags[:common], " "))
common = min(common, _count_leading(btags[:common], " "))
atags = atags[common:].rstrip()
btags = btags[common:].rstrip()
yield "- " + aline
if atags:
yield "? %s%s\n" % ("\t" * common, atags)
yield "+ " + bline
if btags:
yield "? %s%s\n" % ("\t" * common, btags)
# With respect to junk, an earlier version of ndiff simply refused to
# *start* a match with a junk element. The result was cases like this:
# before: private Thread currentThread;
# after: private volatile Thread currentThread;
# If you consider whitespace to be junk, the longest contiguous match
# not starting with junk is "e Thread currentThread". So ndiff reported
# that "e volatil" was inserted between the 't' and the 'e' in "private".
# While an accurate view, to people that's absurd. The current version
# looks for matching blocks that are entirely junk-free, then extends the
# longest one of those as far as possible but only with matching junk.
# So now "currentThread" is matched, then extended to suck up the
# preceding blank; then "private" is matched, and extended to suck up the
# following blank; then "Thread" is matched; and finally ndiff reports
# that "volatile " was inserted before "Thread". The only quibble
# remaining is that perhaps it was really the case that " volatile"
# was inserted after "private". I can live with that <wink>.
import re
def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
Examples:
>>> IS_LINE_JUNK('\n')
True
>>> IS_LINE_JUNK(' # \n')
True
>>> IS_LINE_JUNK('hello\n')
False
"""
return pat(line) is not None
def IS_CHARACTER_JUNK(ch, ws=" \t"):
r"""
Return 1 for ignorable character: iff `ch` is a space or tab.
Examples:
>>> IS_CHARACTER_JUNK(' ')
True
>>> IS_CHARACTER_JUNK('\t')
True
>>> IS_CHARACTER_JUNK('\n')
False
>>> IS_CHARACTER_JUNK('x')
False
"""
return ch in ws
########################################################################
### Unified Diff
########################################################################
def _format_range_unified(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{},{}'.format(beginning, length)
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a unified diff.
Unified diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with ---, +++, or @@) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The unidiff format normally has a header for filenames and modification
times. Any or all of these may be specified using strings for
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
Example:
>>> for line in unified_diff('one two three four'.split(),
... 'zero one tree four'.split(), 'Original', 'Current',
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
... lineterm=''):
... print(line) # doctest: +NORMALIZE_WHITESPACE
--- Original 2005-01-26 23:30:50
+++ Current 2010-04-02 10:20:52
@@ -1,4 +1,4 @@
+zero
one
-two
-three
+tree
four
"""
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
file1_range = _format_range_unified(first[1], last[2])
file2_range = _format_range_unified(first[3], last[4])
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in a[i1:i2]:
yield ' ' + line
continue
if tag in {'replace', 'delete'}:
for line in a[i1:i2]:
yield '-' + line
if tag in {'replace', 'insert'}:
for line in b[j1:j2]:
yield '+' + line
########################################################################
### Context Diff
########################################################################
def _format_range_context(start, stop):
'Convert range to the "ed" format'
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if not length:
beginning -= 1 # empty ranges begin at line just before the range
if length <= 1:
return '{}'.format(beginning)
return '{},{}'.format(beginning, beginning + length - 1)
# See http://www.unix.org/single_unix_specification/
def context_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
r"""
Compare two sequences of lines; generate the delta as a context diff.
Context diffs are a compact way of showing line changes and a few
lines of context. The number of context lines is set by 'n' which
defaults to three.
By default, the diff control lines (those with *** or ---) are
created with a trailing newline. This is helpful so that inputs
created from file.readlines() result in diffs that are suitable for
file.writelines() since both the inputs and outputs have trailing
newlines.
For inputs that do not have trailing newlines, set the lineterm
argument to "" so that the output will be uniformly newline free.
The context diff format normally has a header for filenames and
modification times. Any or all of these may be specified using
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
The modification times are normally expressed in the ISO 8601 format.
If not specified, the strings default to blanks.
Example:
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
... end="")
*** Original
--- Current
***************
*** 1,4 ****
one
! two
! three
four
--- 1,4 ----
+ zero
one
! tree
four
"""
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
started = False
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
if not started:
started = True
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
todate = '\t{}'.format(tofiledate) if tofiledate else ''
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
yield '--- {}{}{}'.format(tofile, todate, lineterm)
first, last = group[0], group[-1]
yield '***************' + lineterm
file1_range = _format_range_context(first[1], last[2])
yield '*** {} ****{}'.format(file1_range, lineterm)
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
for tag, i1, i2, _, _ in group:
if tag != 'insert':
for line in a[i1:i2]:
yield prefix[tag] + line
file2_range = _format_range_context(first[3], last[4])
yield '--- {} ----{}'.format(file2_range, lineterm)
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
for tag, _, _, j1, j2 in group:
if tag != 'delete':
for line in b[j1:j2]:
yield prefix[tag] + line
def _check_types(a, b, *args):
# Checking types is weird, but the alternative is garbled output when
# someone passes mixed bytes and str to {unified,context}_diff(). E.g.
# without this check, passing filenames as bytes results in output like
# --- b'oldfile.txt'
# +++ b'newfile.txt'
# because of how str.format() incorporates bytes objects.
if a and not isinstance(a[0], str):
raise TypeError('lines to compare must be str, not %s (%r)' %
(type(a[0]).__name__, a[0]))
if b and not isinstance(b[0], str):
raise TypeError('lines to compare must be str, not %s (%r)' %
(type(b[0]).__name__, b[0]))
for arg in args:
if not isinstance(arg, str):
raise TypeError('all arguments must be str, not: %r' % (arg,))
def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'',
fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'):
r"""
Compare `a` and `b`, two sequences of lines represented as bytes rather
than str. This is a wrapper for `dfunc`, which is typically either
unified_diff() or context_diff(). Inputs are losslessly converted to
strings so that `dfunc` only has to worry about strings, and encoded
back to bytes on return. This is necessary to compare files with
unknown or inconsistent encoding. All other inputs (except `n`) must be
bytes rather than str.
"""
def decode(s):
try:
return s.decode('ascii', 'surrogateescape')
except AttributeError as err:
msg = ('all arguments must be bytes, not %s (%r)' %
(type(s).__name__, s))
raise TypeError(msg) from err
a = list(map(decode, a))
b = list(map(decode, b))
fromfile = decode(fromfile)
tofile = decode(tofile)
fromfiledate = decode(fromfiledate)
tofiledate = decode(tofiledate)
lineterm = decode(lineterm)
lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm)
for line in lines:
yield line.encode('ascii', 'surrogateescape')
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
r"""
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
Optional keyword parameters `linejunk` and `charjunk` are for filter
functions, or can be None:
- linejunk: A function that should accept a single string argument and
return true iff the string is junk. The default is None, and is
recommended; the underlying SequenceMatcher class has an adaptive
notion of "noise" lines.
- charjunk: A function that accepts a character (string of length
1), and returns true iff the character is junk. The default is
the module-level function IS_CHARACTER_JUNK, which filters out
whitespace characters (a blank or tab; note: it's a bad idea to
include newline in this!).
Tools/scripts/ndiff.py is a command-line front-end to this function.
Example:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> print(''.join(diff), end="")
- one
? ^
+ ore
? ^
- two
- three
? -
+ tree
+ emu
"""
return Differ(linejunk, charjunk).compare(a, b)
def _mdiff(fromlines, tolines, context=None, linejunk=None,
charjunk=IS_CHARACTER_JUNK):
r"""Returns generator yielding marked up from/to side by side differences.
Arguments:
fromlines -- list of text lines to compared to tolines
tolines -- list of text lines to be compared to fromlines
context -- number of context lines to display on each side of difference,
if None, all from/to text lines will be generated.
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
line num -- integer or None (to indicate a context separation)
line text -- original line text with following markers inserted:
'\0+' -- marks start of added text
'\0-' -- marks start of deleted text
'\0^' -- marks start of changed text
'\1' -- marks end of added/deleted/changed text
boolean flag -- None indicates context separation, True indicates
either "from" or "to" line contains a change, otherwise False.
This function/iterator was originally developed to generate side by side
file difference for making HTML pages (see HtmlDiff class for example
usage).
Note, this function utilizes the ndiff function to generate the side by
side difference markup. Optional ndiff arguments may be passed to this
function and they in turn will be passed to ndiff.
"""
import re
# regular expression for finding intraline change indices
change_re = re.compile(r'(\++|\-+|\^+)')
# create the difference iterator to generate the differences
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
def _make_line(lines, format_key, side, num_lines=[0,0]):
"""Returns line of text with user's change markup and line formatting.
lines -- list of lines from the ndiff generator to produce a line of
text from. When producing the line of text to return, the
lines used are removed from this list.
format_key -- '+' return first line in list with "add" markup around
the entire line.
'-' return first line in list with "delete" markup around
the entire line.
'?' return first line in list with add/delete/change
intraline markup (indices obtained from second line)
None return first line in list with no markup
side -- indice into the num_lines list (0=from,1=to)
num_lines -- from/to current line number. This is NOT intended to be a
passed parameter. It is present as a keyword argument to
maintain memory of the current line numbers between calls
of this function.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
num_lines[side] += 1
# Handle case where no user markup is to be added, just return line of
# text with user's line format to allow for usage of the line number.
if format_key is None:
return (num_lines[side],lines.pop(0)[2:])
# Handle case of intraline changes
if format_key == '?':
text, markers = lines.pop(0), lines.pop(0)
# find intraline changes (store change type and indices in tuples)
sub_info = []
def record_sub_info(match_object,sub_info=sub_info):
sub_info.append([match_object.group(1)[0],match_object.span()])
return match_object.group(1)
change_re.sub(record_sub_info,markers)
# process each tuple inserting our special marks that won't be
# noticed by an xml/html escaper.
for key,(begin,end) in reversed(sub_info):
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
text = text[2:]
# Handle case of add/delete entire line
else:
text = lines.pop(0)[2:]
# if line of text is just a newline, insert a space so there is
# something for the user to highlight and see.
if not text:
text = ' '
# insert marks that won't be noticed by an xml/html escaper.
text = '\0' + format_key + text + '\1'
# Return line of text, first allow user's line formatter to do its
# thing (such as adding the line number) then replace the special
# marks with what the user's change markup.
return (num_lines[side],text)
def _line_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from a
differencing iterator, processes them and yields them. When it can
it yields both a "from" and a "to" line, otherwise it will yield one
or the other. In addition to yielding the lines of from/to text, a
boolean flag is yielded to indicate if the text line(s) have
differences in them.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
lines = []
num_blanks_pending, num_blanks_to_yield = 0, 0
while True:
# Load up next 4 lines so we can look ahead, create strings which
# are a concatenation of the first character of each of the 4 lines
# so we can do some very readable comparisons.
while len(lines) < 4:
lines.append(next(diff_lines_iterator, 'X'))
s = ''.join([line[0] for line in lines])
if s.startswith('X'):
# When no more lines, pump out any remaining blank lines so the
# corresponding add/delete lines get a matching blank line so
# all line pairs get yielded at the next level.
num_blanks_to_yield = num_blanks_pending
elif s.startswith('-?+?'):
# simple intraline change
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
continue
elif s.startswith('--++'):
# in delete block, add block coming: we do NOT want to get
# caught up on blank lines yet, just process the delete line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith(('--?+', '--+', '- ')):
# in delete block and see an intraline change or unchanged line
# coming: yield the delete line and then blanks
from_line,to_line = _make_line(lines,'-',0), None
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
elif s.startswith('-+?'):
# intraline change
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
continue
elif s.startswith('-?+'):
# intraline change
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
continue
elif s.startswith('-'):
# delete FROM line
num_blanks_pending -= 1
yield _make_line(lines,'-',0), None, True
continue
elif s.startswith('+--'):
# in add block, delete block coming: we do NOT want to get
# caught up on blank lines yet, just process the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(('+ ', '+-')):
# will be leaving an add block: yield blanks then add line
from_line, to_line = None, _make_line(lines,'+',1)
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
elif s.startswith('+'):
# inside an add block, yield the add line
num_blanks_pending += 1
yield None, _make_line(lines,'+',1), True
continue
elif s.startswith(' '):
# unchanged text, yield it to both sides
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
continue
# Catch up on the blank lines so when we yield the next from/to
# pair, they are lined up.
while(num_blanks_to_yield < 0):
num_blanks_to_yield += 1
yield None,('','\n'),True
while(num_blanks_to_yield > 0):
num_blanks_to_yield -= 1
yield ('','\n'),None,True
if s.startswith('X'):
return
else:
yield from_line,to_line,True
def _line_pair_iterator():
"""Yields from/to lines of text with a change indication.
This function is an iterator. It itself pulls lines from the line
iterator. Its difference from that iterator is that this function
always yields a pair of from/to text lines (with the change
indication). If necessary it will collect single from/to lines
until it has a matching pair from/to pair to yield.
Note, this function is purposefully not defined at the module scope so
that data it needs from its parent function (within whose context it
is defined) does not need to be of module scope.
"""
line_iterator = _line_iterator()
fromlines,tolines=[],[]
while True:
# Collecting lines of text until we have a from/to pair
while (len(fromlines)==0 or len(tolines)==0):
try:
from_line, to_line, found_diff = next(line_iterator)
except StopIteration:
return
if from_line is not None:
fromlines.append((from_line,found_diff))
if to_line is not None:
tolines.append((to_line,found_diff))
# Once we have a pair, remove them from the collection and yield it
from_line, fromDiff = fromlines.pop(0)
to_line, to_diff = tolines.pop(0)
yield (from_line,to_line,fromDiff or to_diff)
# Handle case where user does not want context differencing, just yield
# them up without doing anything else with them.
line_pair_iterator = _line_pair_iterator()
if context is None:
yield from line_pair_iterator
# Handle case where user wants context differencing. We must do some
# storage of lines until we know for sure that they are to be yielded.
else:
context += 1
lines_to_write = 0
while True:
# Store lines up until we find a difference, note use of a
# circular queue because we only need to keep around what
# we need for context.
index, contextLines = 0, [None]*(context)
found_diff = False
while(found_diff is False):
try:
from_line, to_line, found_diff = next(line_pair_iterator)
except StopIteration:
return
i = index % context
contextLines[i] = (from_line, to_line, found_diff)
index += 1
# Yield lines that we have collected so far, but first yield
# the user's separator.
if index > context:
yield None, None, None
lines_to_write = context
else:
lines_to_write = index
index = 0
while(lines_to_write):
i = index % context
index += 1
yield contextLines[i]
lines_to_write -= 1
# Now yield the context lines after the change
lines_to_write = context-1
try:
while(lines_to_write):
from_line, to_line, found_diff = next(line_pair_iterator)
# If another change within the context, extend the context
if found_diff:
lines_to_write = context-1
else:
lines_to_write -= 1
yield from_line, to_line, found_diff
except StopIteration:
# Catch exception from next() and return normally
return
_file_template = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type"
content="text/html; charset=%(charset)s" />
<title></title>
<style type="text/css">%(styles)s
</style>
</head>
<body>
%(table)s%(legend)s
</body>
</html>"""
_styles = """
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
_table_template = """
<table class="diff" id="difflib_chg_%(prefix)s_top"
cellspacing="0" cellpadding="0" rules="groups" >
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
%(header_row)s
<tbody>
%(data_rows)s </tbody>
</table>"""
_legend = """
<table class="diff" summary="Legends">
<tr> <th colspan="2"> Legends </th> </tr>
<tr> <td> <table border="" summary="Colors">
<tr><th> Colors </th> </tr>
<tr><td class="diff_add"> Added </td></tr>
<tr><td class="diff_chg">Changed</td> </tr>
<tr><td class="diff_sub">Deleted</td> </tr>
</table></td>
<td> <table border="" summary="Links">
<tr><th colspan="2"> Links </th> </tr>
<tr><td>(f)irst change</td> </tr>
<tr><td>(n)ext change</td> </tr>
<tr><td>(t)op</td> </tr>
</table></td> </tr>
</table>"""
class HtmlDiff(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See tools/scripts/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self, fromlines, tolines, fromdesc='', todesc='',
context=False, numlines=5, *, charset='utf-8'):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
charset -- charset of the HTML document
"""
return (self._file_template % dict(
styles=self._styles,
legend=self._legend,
table=self.make_table(fromlines, tolines, fromdesc, todesc,
context=context, numlines=numlines),
charset=charset
)).encode(charset, 'xmlcharrefreplace').decode(charset)
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
del re
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
def _test():
import sys
try:
import doctest, difflib
except ImportError:
sys.exit(1)
print(doctest.testmod(difflib))
if __name__ == "__main__":
_test()
| 84,450 | 2,103 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/symtable.py | """Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE,
LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL)
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
top = _symtable.symtable(code, filename, compile_type)
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec. Deprecated method."""
return False
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
locs = (LOCAL, CELL)
test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
self.__locals = self.__idents_matching(test)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_annotated(self):
return bool(self.__flags & DEF_ANNOT)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError("name is bound to multiple namespaces")
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
with open(sys.argv[0]) as f:
src = f.read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print(info, info.is_local(), info.is_namespace())
| 7,277 | 239 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/hmac.py | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key.ljust(blocksize, b'\0')
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| 5,057 | 145 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/_collections_abc.py | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Awaitable", "Coroutine",
"AsyncIterable", "AsyncIterator", "AsyncGenerator",
"Hashable", "Iterable", "Iterator", "Generator", "Reversible",
"Sized", "Container", "Callable", "Collection",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# This module has been renamed from collections.abc to _collections_abc to
# speed up interpreter startup. Some of the types such as MutableMapping are
# required early but collections module imports a lot of other modules.
# See issue #19218
__name__ = "collections.abc"
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types might not be distinct
# and they may have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
longrange_iterator = type(iter(range(1 << 1000)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
generator = type((lambda: (yield))())
## coroutine ##
async def _coro(): pass
_coro = _coro()
coroutine = type(_coro)
_coro.close() # Prevent ResourceWarning
del _coro
## asynchronous generator ##
async def _ag(): yield
_ag = _ag()
async_generator = type(_ag)
del _ag
### ONE-TRICK PONIES ###
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
return _check_methods(C, "__hash__")
return NotImplemented
class Awaitable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __await__(self):
yield
@classmethod
def __subclasshook__(cls, C):
if cls is Awaitable:
return _check_methods(C, "__await__")
return NotImplemented
class Coroutine(Awaitable):
__slots__ = ()
@abstractmethod
def send(self, value):
"""Send a value into the coroutine.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the coroutine.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("coroutine ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is Coroutine:
return _check_methods(C, '__await__', 'send', 'throw', 'close')
return NotImplemented
Coroutine.register(coroutine)
class AsyncIterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __aiter__(self):
return AsyncIterator()
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncIterable:
return _check_methods(C, "__aiter__")
return NotImplemented
class AsyncIterator(AsyncIterable):
__slots__ = ()
@abstractmethod
async def __anext__(self):
"""Return the next item or raise StopAsyncIteration when exhausted."""
raise StopAsyncIteration
def __aiter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncIterator:
return _check_methods(C, "__anext__", "__aiter__")
return NotImplemented
class AsyncGenerator(AsyncIterator):
__slots__ = ()
async def __anext__(self):
"""Return the next item from the asynchronous generator.
When exhausted, raise StopAsyncIteration.
"""
return await self.asend(None)
@abstractmethod
async def asend(self, value):
"""Send a value into the asynchronous generator.
Return next yielded value or raise StopAsyncIteration.
"""
raise StopAsyncIteration
@abstractmethod
async def athrow(self, typ, val=None, tb=None):
"""Raise an exception in the asynchronous generator.
Return next yielded value or raise StopAsyncIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
async def aclose(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
await self.athrow(GeneratorExit)
except (GeneratorExit, StopAsyncIteration):
pass
else:
raise RuntimeError("asynchronous generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is AsyncGenerator:
return _check_methods(C, '__aiter__', '__anext__',
'asend', 'athrow', 'aclose')
return NotImplemented
AsyncGenerator.register(async_generator)
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
return _check_methods(C, "__iter__")
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
return _check_methods(C, '__iter__', '__next__')
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(longrange_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Reversible(Iterable):
__slots__ = ()
@abstractmethod
def __reversed__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Reversible:
return _check_methods(C, "__reversed__", "__iter__")
return NotImplemented
class Generator(Iterator):
__slots__ = ()
def __next__(self):
"""Return the next item from the generator.
When exhausted, raise StopIteration.
"""
return self.send(None)
@abstractmethod
def send(self, value):
"""Send a value into the generator.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the generator.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside generator.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("generator ignored GeneratorExit")
@classmethod
def __subclasshook__(cls, C):
if cls is Generator:
return _check_methods(C, '__iter__', '__next__',
'send', 'throw', 'close')
return NotImplemented
Generator.register(generator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
return _check_methods(C, "__len__")
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
return _check_methods(C, "__contains__")
return NotImplemented
class Collection(Sized, Iterable, Container):
__slots__ = ()
@classmethod
def __subclasshook__(cls, C):
if cls is Collection:
return _check_methods(C, "__len__", "__iter__", "__contains__")
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
return _check_methods(C, "__call__")
return NotImplemented
### SETS ###
class Set(Collection):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), redefine __le__ and __ge__,
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) < len(other):
return False
for elem in other:
if elem not in self:
return False
return True
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
__rand__ = __and__
def isdisjoint(self, other):
'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
__ror__ = __or__
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __rsub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in other
if value not in self)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
__rxor__ = __xor__
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
"""A mutable set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__, __len__,
add(), and discard().
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Collection):
__slots__ = ()
"""A Mapping is a generic container for associating key/value
pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __iter__, and __len__.
"""
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return KeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return ItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
__reversed__ = None
Mapping.register(mappingproxy)
class MappingView(Sized):
__slots__ = '_mapping',
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
__slots__ = ()
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
yield from self._mapping
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
__slots__ = ()
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v is value or v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
__slots__ = ()
def __contains__(self, value):
for key in self._mapping:
v = self._mapping[key]
if v is value or v == value:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
"""A MutableMapping is a generic container for associating
key/value pairs.
This class provides concrete generic implementations of all
methods except for __getitem__, __setitem__, __delitem__,
__iter__, and __len__.
"""
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
'''D.popitem() -> (k, v), remove and return some (key, value) pair
as a 2-tuple; but raise KeyError if D is empty.
'''
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k, v in F.items(): D[k] = v
'''
if not args:
raise TypeError("descriptor 'update' of 'MutableMapping' object "
"needs an argument")
self, *args = args
if len(args) > 1:
raise TypeError('update expected at most 1 arguments, got %d' %
len(args))
if args:
other = args[0]
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Reversible, Collection):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v is value or v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value, start=0, stop=None):
'''S.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
Supporting start and stop arguments is optional, but
recommended.
'''
if start is not None and start < 0:
start = max(len(self) + start, 0)
if stop is not None and stop < 0:
stop += len(self)
i = start
while stop is None or i < stop:
try:
v = self[i]
if v is value or v == value:
return i
except IndexError:
break
i += 1
raise ValueError
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v is value or v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
Sequence.register(memoryview)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
"""All the operations on a read-write sequence.
Concrete subclasses must provide __new__ or __init__,
__getitem__, __setitem__, __delitem__, __len__, and insert().
"""
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
'S.insert(index, value) -- insert value before index'
raise IndexError
def append(self, value):
'S.append(value) -- append value to the end of the sequence'
self.insert(len(self), value)
def clear(self):
'S.clear() -> None -- remove all items from S'
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
'''S.pop([index]) -> item -- remove and return item at index (default last).
Raise IndexError if list is empty or index is out of range.
'''
v = self[index]
del self[index]
return v
def remove(self, value):
'''S.remove(value) -- remove first occurrence of value.
Raise ValueError if the value is not present.
'''
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| 26,392 | 1,012 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/macpath.py | """Pathname and path-related operations for the Macintosh."""
# strings representing various path-related bits and pieces
# These are primarily for export; internally, they are hardcoded.
# Should be set before imports for resolving cyclic dependency.
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
import os
from stat import *
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
def _get_colon(path):
if isinstance(path, bytes):
return b':'
else:
return ':'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
if not isinstance(path, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(path.__class__.__name__))
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
colon = _get_colon(s)
return colon in s and s[:1] != colon
def join(s, *p):
try:
colon = _get_colon(s)
path = s
if not p:
path[:0] + colon #23780: Ensure compatible data type even if p is null.
for t in p:
if (not path) or isabs(t):
path = t
continue
if t[:1] == colon:
t = t[1:]
if colon not in path:
path = colon + path
if path[-1:] != colon:
path = path + colon
path = path + t
return path
except (TypeError, AttributeError, BytesWarning):
genericpath._check_arg_types('join', s, *p)
raise
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
colon = _get_colon(s)
if colon not in s: return s[:0], s
col = 0
for i in range(len(s)):
if s[i:i+1] == colon: col = i + 1
path, file = s[:col-1], s[col:]
if path and not colon in path:
path = path + colon
return path, file
def splitext(p):
if isinstance(p, bytes):
return genericpath._splitext(p, b':', altsep, b'.')
else:
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return p[:0], p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and not components[1]
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except OSError:
return False
return True
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
colon = _get_colon(s)
if colon not in s:
return colon + s
comps = s.split(colon)
i = 1
while i < len(comps)-1:
if not comps[i] and comps[i-1]:
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error('Cannot use :: immediately after volume name')
else:
i = i + 1
s = colon.join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1:] == colon and len(comps) > 2 and s != colon*len(s):
s = s[:-1]
return s
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
colon = _get_colon(path)
components = path.split(colon)
path = components[0] + colon
for c in components[1:]:
path = join(path, c)
try:
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
except Carbon.File.Error:
pass
return path
supports_unicode_filenames = True
| 5,971 | 213 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/turtle.py | #
# turtle.py: a Tkinter based turtle graphics module for Python
# Version 1.1b - 4. 5. 2009
#
# Copyright (C) 2006 - 2010 Gregor Lingl
# email: [email protected]
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
"""
Turtle graphics is a popular way for introducing programming to
kids. It was part of the original Logo programming language developed
by Wally Feurzig and Seymour Papert in 1966.
Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it
the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
the direction it is facing, drawing a line as it moves. Give it the
command turtle.right(25), and it rotates in-place 25 degrees clockwise.
By combining together these and similar commands, intricate shapes and
pictures can easily be drawn.
----- turtle.py
This module is an extended reimplementation of turtle.py from the
Python standard distribution up to Python 2.5. (See: http://www.python.org)
It tries to keep the merits of turtle.py and to be (nearly) 100%
compatible with it. This means in the first place to enable the
learning programmer to use all the commands, classes and methods
interactively when using the module from within IDLE run with
the -n switch.
Roughly it has the following features added:
- Better animation of the turtle movements, especially of turning the
turtle. So the turtles can more easily be used as a visual feedback
instrument by the (beginning) programmer.
- Different turtle shapes, gif-images as turtle shapes, user defined
and user controllable turtle shapes, among them compound
(multicolored) shapes. Turtle shapes can be stretched and tilted, which
makes turtles very versatile geometrical objects.
- Fine control over turtle movement and screen updates via delay(),
and enhanced tracer() and speed() methods.
- Aliases for the most commonly used commands, like fd for forward etc.,
following the early Logo traditions. This reduces the boring work of
typing long sequences of commands, which often occur in a natural way
when kids try to program fancy pictures on their first encounter with
turtle graphics.
- Turtles now have an undo()-method with configurable undo-buffer.
- Some simple commands/methods for creating event driven programs
(mouse-, key-, timer-events). Especially useful for programming games.
- A scrollable Canvas class. The default scrollable Canvas can be
extended interactively as needed while playing around with the turtle(s).
- A TurtleScreen class with methods controlling background color or
background image, window and canvas size and other properties of the
TurtleScreen.
- There is a method, setworldcoordinates(), to install a user defined
coordinate-system for the TurtleScreen.
- The implementation uses a 2-vector class named Vec2D, derived from tuple.
This class is public, so it can be imported by the application programmer,
which makes certain types of computations very natural and compact.
- Appearance of the TurtleScreen and the Turtles at startup/import can be
configured by means of a turtle.cfg configuration file.
The default configuration mimics the appearance of the old turtle module.
- If configured appropriately the module reads in docstrings from a docstring
dictionary in some different language, supplied separately and replaces
the English ones by those read in. There is a utility function
write_docstringdict() to write a dictionary with the original (English)
docstrings to disc, so it can serve as a template for translations.
Behind the scenes there are some features included with possible
extensions in mind. These will be commented and documented elsewhere.
"""
_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009"
# print(_ver)
import tkinter as TK
import types
import math
import time
import inspect
import sys
from os.path import isfile, split, join
from copy import deepcopy
from tkinter import simpledialog
_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen',
'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D']
_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
'getshapes', 'listen', 'mainloop', 'mode', 'numinput',
'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer',
'register_shape', 'resetscreen', 'screensize', 'setup',
'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update',
'window_height', 'window_width']
_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly',
'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle',
'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards',
'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
'write', 'xcor', 'ycor']
_tg_utilities = ['write_docstringdict', 'done']
__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions +
_tg_utilities + ['Terminator']) # + _math_functions)
_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos',
'pu', 'rt', 'seth', 'setpos', 'setposition', 'st',
'turtlesize', 'up', 'width']
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
def config_dict(filename):
"""Convert content of config-file into dictionary."""
with open(filename, "r") as f:
cfglines = f.readlines()
cfgdict = {}
for line in cfglines:
line = line.strip()
if not line or line.startswith("#"):
continue
try:
key, value = line.split("=")
except ValueError:
print("Bad line in config-file %s:\n%s" % (filename,line))
continue
key = key.strip()
value = value.strip()
if value in ["True", "False", "None", "''", '""']:
value = eval(value)
else:
try:
if "." in value:
value = float(value)
else:
value = int(value)
except ValueError:
pass # value need not be converted
cfgdict[key] = value
return cfgdict
def readconfig(cfgdict):
"""Read config-files, change configuration-dict accordingly.
If there is a turtle.cfg file in the current working directory,
read it from there. If this contains an importconfig-value,
say 'myway', construct filename turtle_mayway.cfg else use
turtle.cfg and read it from the import-directory, where
turtle.py is located.
Update configuration dictionary first according to config-file,
in the import directory, then according to config-file in the
current working directory.
If no config-file is found, the default configuration is used.
"""
default_cfg = "turtle.cfg"
cfgdict1 = {}
cfgdict2 = {}
if isfile(default_cfg):
cfgdict1 = config_dict(default_cfg)
if "importconfig" in cfgdict1:
default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
try:
head, tail = split(__file__)
cfg_file2 = join(head, default_cfg)
except Exception:
cfg_file2 = ""
if isfile(cfg_file2):
cfgdict2 = config_dict(cfg_file2)
_CFG.update(cfgdict2)
_CFG.update(cfgdict1)
try:
readconfig(_CFG)
except Exception:
print ("No configfile read, reason unknown")
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
## helper functions for Scrolled Canvas, to forward Canvas-methods
## to ScrolledCanvas class
def __methodDict(cls, _dict):
"""helper function for Scrolled Canvas"""
baseList = list(cls.__bases__)
baseList.reverse()
for _super in baseList:
__methodDict(_super, _dict)
for key, value in cls.__dict__.items():
if type(value) == types.FunctionType:
_dict[key] = value
def __methods(cls):
"""helper function for Scrolled Canvas"""
_dict = {}
__methodDict(cls, _dict)
return _dict.keys()
__stringBody = (
'def %(method)s(self, *args, **kw): return ' +
'self.%(attribute)s.%(method)s(*args, **kw)')
def __forwardmethods(fromClass, toClass, toPart, exclude = ()):
### MANY CHANGES ###
_dict_1 = {}
__methodDict(toClass, _dict_1)
_dict = {}
mfc = __methods(fromClass)
for ex in _dict_1.keys():
if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc:
pass
else:
_dict[ex] = _dict_1[ex]
for method, func in _dict.items():
d = {'method': method, 'func': func}
if isinstance(toPart, str):
execString = \
__stringBody % {'method' : method, 'attribute' : toPart}
exec(execString, d)
setattr(fromClass, method, d[method]) ### NEWU!
class ScrolledCanvas(TK.Frame):
"""Modeled after the scrolled canvas class from Grayons's Tkinter book.
Used as the default canvas, which pops up automatically when
using turtle graphics functions or the Turtle class.
"""
def __init__(self, master, width=500, height=350,
canvwidth=600, canvheight=500):
TK.Frame.__init__(self, master, width=width, height=height)
self._rootwindow = self.winfo_toplevel()
self.width, self.height = width, height
self.canvwidth, self.canvheight = canvwidth, canvheight
self.bg = "white"
self._canvas = TK.Canvas(master, width=width, height=height,
bg=self.bg, relief=TK.SUNKEN, borderwidth=2)
self.hscroll = TK.Scrollbar(master, command=self._canvas.xview,
orient=TK.HORIZONTAL)
self.vscroll = TK.Scrollbar(master, command=self._canvas.yview)
self._canvas.configure(xscrollcommand=self.hscroll.set,
yscrollcommand=self.vscroll.set)
self.rowconfigure(0, weight=1, minsize=0)
self.columnconfigure(0, weight=1, minsize=0)
self._canvas.grid(padx=1, in_ = self, pady=1, row=0,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.reset()
self._rootwindow.bind('<Configure>', self.onResize)
def reset(self, canvwidth=None, canvheight=None, bg = None):
"""Adjust canvas and scrollbars according to given canvas size."""
if canvwidth:
self.canvwidth = canvwidth
if canvheight:
self.canvheight = canvheight
if bg:
self.bg = bg
self._canvas.config(bg=bg,
scrollregion=(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2))
self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /
self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /
self.canvheight)
self.adjustScrolls()
def adjustScrolls(self):
""" Adjust scrollbars according to window- and canvas-size.
"""
cwidth = self._canvas.winfo_width()
cheight = self._canvas.winfo_height()
self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)
if cwidth < self.canvwidth or cheight < self.canvheight:
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
else:
self.hscroll.grid_forget()
self.vscroll.grid_forget()
def onResize(self, event):
"""self-explanatory"""
self.adjustScrolls()
def bbox(self, *args):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.bbox(*args)
def cget(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.cget(*args, **kwargs)
def config(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.config(*args, **kwargs)
def bind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.bind(*args, **kwargs)
def unbind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.unbind(*args, **kwargs)
def focus_force(self):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.focus_force()
__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas')
class _Root(TK.Tk):
"""Root class for Screen based on Tkinter."""
def __init__(self):
TK.Tk.__init__(self)
def setupcanvas(self, width, height, cwidth, cheight):
self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight)
self._canvas.pack(expand=1, fill="both")
def _getcanvas(self):
return self._canvas
def set_geometry(self, width, height, startx, starty):
self.geometry("%dx%d%+d%+d"%(width, height, startx, starty))
def ondestroy(self, destroy):
self.wm_protocol("WM_DELETE_WINDOW", destroy)
def win_width(self):
return self.winfo_screenwidth()
def win_height(self):
return self.winfo_screenheight()
Canvas = TK.Canvas
class TurtleScreenBase(object):
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
@staticmethod
def _blankimage():
"""return a blank image object
"""
img = TK.PhotoImage(width=1, height=1)
img.blank()
return img
@staticmethod
def _image(filename):
"""return an image object containing the
imagedata from a gif-file named filename.
"""
return TK.PhotoImage(file=filename)
def __init__(self, cv):
self.cv = cv
if isinstance(cv, ScrolledCanvas):
w = self.cv.canvwidth
h = self.cv.canvheight
else: # expected: ordinary TK.Canvas
w = int(self.cv.cget("width"))
h = int(self.cv.cget("height"))
self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 ))
self.canvwidth = w
self.canvheight = h
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(polyitem, *cl)
if fill is not None:
self.cv.itemconfigure(polyitem, fill=fill)
if outline is not None:
self.cv.itemconfigure(polyitem, outline=outline)
if width is not None:
self.cv.itemconfigure(polyitem, width=width)
if top:
self.cv.tag_raise(polyitem)
def _createline(self):
"""Create an invisible line item on canvas self.cv)
"""
return self.cv.create_line(0, 0, 0, 0, fill="", width=2,
capstyle = TK.ROUND)
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
if coordlist is not None:
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(lineitem, *cl)
if fill is not None:
self.cv.itemconfigure(lineitem, fill=fill)
if width is not None:
self.cv.itemconfigure(lineitem, width=width)
if top:
self.cv.tag_raise(lineitem)
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
self.cv.delete(item)
def _update(self):
"""Redraw graphics items on canvas
"""
self.cv.update()
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
self.cv.after(delay)
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
try:
rgb = self.cv.winfo_rgb(color)
ok = True
except TK.TclError:
ok = False
return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.config(bg = color)
self._update()
else:
return self.cv.cget("bg")
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
x, y = pos
x = x * self.xscale
y = y * self.yscale
anchor = {"left":"sw", "center":"s", "right":"se" }
item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align],
fill = pencolor, font = font)
x0, y0, x1, y1 = self.cv.bbox(item)
self.cv.update()
return item, x1-1
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _onclick(self, item, fun, num=1, add=None):
"""Bind fun to mouse-click event on turtle.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
"""
if fun is None:
self.cv.tag_unbind(item, "<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button-%s>" % num, eventfun, add)
def _onrelease(self, item, fun, num=1, add=None):
"""Bind fun to mouse-button-release event on turtle.
fun must be a function with two arguments, the coordinates
of the point on the canvas where mouse button is released.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-ButtonRelease>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button%s-ButtonRelease>" % num,
eventfun, add)
def _ondrag(self, item, fun, num=1, add=None):
"""Bind fun to mouse-move-event (with pressed mouse button) on turtle.
fun must be a function with two arguments, the coordinates of the
actual mouse position on the canvas.
num, the number of the mouse-button defaults to 1
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-Motion>" % num)
else:
def eventfun(event):
try:
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
except Exception:
pass
self.cv.tag_bind(item, "<Button%s-Motion>" % num, eventfun, add)
def _onscreenclick(self, fun, num=1, add=None):
"""Bind fun to mouse-click event on canvas.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.unbind("<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.bind("<Button-%s>" % num, eventfun, add)
def _onkeyrelease(self, fun, key):
"""Bind fun to key-release event of key.
Canvas must have focus. See method listen
"""
if fun is None:
self.cv.unbind("<KeyRelease-%s>" % key, None)
else:
def eventfun(event):
fun()
self.cv.bind("<KeyRelease-%s>" % key, eventfun)
def _onkeypress(self, fun, key=None):
"""If key is given, bind fun to key-press event of key.
Otherwise bind fun to any key-press.
Canvas must have focus. See method listen.
"""
if fun is None:
if key is None:
self.cv.unbind("<KeyPress>", None)
else:
self.cv.unbind("<KeyPress-%s>" % key, None)
else:
def eventfun(event):
fun()
if key is None:
self.cv.bind("<KeyPress>", eventfun)
else:
self.cv.bind("<KeyPress-%s>" % key, eventfun)
def _listen(self):
"""Set focus on canvas (in order to collect key-events)
"""
self.cv.focus_force()
def _ontimer(self, fun, t):
"""Install a timer, which calls fun after t milliseconds.
"""
if t == 0:
self.cv.after_idle(fun)
else:
self.cv.after(t, fun)
def _createimage(self, image):
"""Create and return image item on canvas.
"""
return self.cv.create_image(0, 0, image=image)
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
x, y = pos
self.cv.coords(item, (x * self.xscale, -y * self.yscale))
self.cv.itemconfig(item, image=image)
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
self.cv.itemconfig(item, image=image)
self.cv.tag_lower(item)
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
return self.cv.type(item)
def _pointlist(self, item):
"""returns list of coordinate-pairs of points of item
Example (for insiders):
>>> from turtle import *
>>> getscreen()._pointlist(getturtle().turtle._item)
[(0.0, 9.9999999999999982), (0.0, -9.9999999999999982),
(9.9999999999999982, 0.0)]
>>> """
cl = self.cv.coords(item)
pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)]
return pl
def _setscrollregion(self, srx1, sry1, srx2, sry2):
self.cv.config(scrollregion=(srx1, sry1, srx2, sry2))
def _rescale(self, xscalefactor, yscalefactor):
items = self.cv.find_all()
for item in items:
coordinates = list(self.cv.coords(item))
newcoordlist = []
while coordinates:
x, y = coordinates[:2]
newcoordlist.append(x * xscalefactor)
newcoordlist.append(y * yscalefactor)
coordinates = coordinates[2:]
self.cv.coords(item, *newcoordlist)
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
# needs amendment
if not isinstance(self.cv, ScrolledCanvas):
return self.canvwidth, self.canvheight
if canvwidth is canvheight is bg is None:
return self.cv.canvwidth, self.cv.canvheight
if canvwidth is not None:
self.canvwidth = canvwidth
if canvheight is not None:
self.canvheight = canvheight
self.cv.reset(canvwidth, canvheight, bg)
def _window_size(self):
""" Return the width and height of the turtle window.
"""
width = self.cv.winfo_width()
if width <= 1: # the window isn't managed by a geometry manager
width = self.cv['width']
height = self.cv.winfo_height()
if height <= 1: # the window isn't managed by a geometry manager
height = self.cv['height']
return width, height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
TK.mainloop()
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
return simpledialog.askstring(title, prompt)
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
return simpledialog.askfloat(title, prompt, initialvalue=default,
minvalue=minval, maxvalue=maxval)
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
This stops execution of a turtle graphics script.
Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
class Shape(object):
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
>>> # .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class Tbuffer(object):
"""Ring buffer used as undobuffer for RawTurtle objects."""
def __init__(self, bufsize=10):
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
self.cumulate = False
def reset(self, bufsize=None):
if bufsize is None:
for i in range(self.bufsize):
self.buffer[i] = [None]
else:
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
def push(self, item):
if self.bufsize > 0:
if not self.cumulate:
self.ptr = (self.ptr + 1) % self.bufsize
self.buffer[self.ptr] = item
else:
self.buffer[self.ptr].append(item)
def pop(self):
if self.bufsize > 0:
item = self.buffer[self.ptr]
if item is None:
return None
else:
self.buffer[self.ptr] = [None]
self.ptr = (self.ptr - 1) % self.bufsize
return (item)
def nr_of_items(self):
return self.bufsize - self.buffer.count([None])
def __repr__(self):
return str(self.buffer) + " " + str(self.ptr)
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
if sys.platform == 'darwin':
# Force Turtle window to the front on OS X. This is needed because
# the Turtle window will show behind the Terminal window when you
# start the demo from the command line.
rootwindow = cv.winfo_toplevel()
rootwindow.call('wm', 'attributes', '.', '-topmost', '1')
rootwindow.call('wm', 'attributes', '.', '-topmost', '0')
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
>>> screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
for btn in 1, 2, 3:
self.onclick(None, btn)
self.onkeypress(None)
for key in self._keys[:]:
self.onkey(None, key)
self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- one of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
... left(10)
... forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except (TypeError, ValueError):
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... fd(dist)
... rt(90)
... dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"""Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
tracing = self._tracing
self._tracing = True
for t in self.turtles():
t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
btn -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkey(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkeypress(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, or by keeping pressed the up-arrow key.
consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
... if running:
... fd(50)
... lt(60)
... screen.ontimer(f, 250)
...
>>> f() # makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
>>> # e.g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator(object):
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen(object):
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
>>> turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage(object):
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
elif isinstance(canvas, (ScrolledCanvas, Canvas)):
for screen in RawTurtle.screens:
if screen.cv == canvas:
self.screen = screen
break
else:
self.screen = TurtleScreen(canvas)
RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
self.currentLineItem = screen._createline()
self.currentLine = [self._position]
self.items = [self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = Tbuffer(undobuffersize)
self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None or size <= 0:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
... undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
for item in self.items:
self.screen._delete(item)
self.currentLineItem = self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
self.items = [self.currentLineItem]
self.clearstamps()
self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
self._update()
def _update_data(self):
self.screen._incrementudc()
if self.screen._updatecounter != 0:
return
if len(self.currentLine)>1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
self._update_data()
self._drawturtle()
screen._update() # TurtleScreenBase
screen._delay(screen._delayvalue) # TurtleScreenBase
else:
self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... turtle.fd(dist)
... turtle.rt(90)
... dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except (TypeError, ValueError):
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def clone(self):
"""Create and return a clone of the turtle.
No argument.
Create and return a clone of the turtle with same position, heading
and turtle properties.
Example (for a Turtle instance named mick):
mick = Turtle()
joe = mick.clone()
"""
screen = self.screen
self._newLine(self._drawing)
turtle = self.turtle
self.screen = None
self.turtle = None # too make self deepcopy-able
q = deepcopy(self)
self.screen = screen
self.turtle = turtle
q.screen = screen
q.turtle = _TurtleImage(screen, self.turtle.shapeIndex)
screen._turtles.append(q)
ttype = screen._shapes[self.turtle.shapeIndex]._type
if ttype == "polygon":
q.turtle._item = screen._createpoly()
elif ttype == "image":
q.turtle._item = screen._createimage(screen._shapes["blank"]._data)
elif ttype == "compound":
q.turtle._item = [screen._createpoly() for item in
screen._shapes[self.turtle.shapeIndex]._data]
q.currentLineItem = screen._createline()
q._update()
return q
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
(4.0, -1.0, -0.0, 2.0)
"""
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self.pen(resizemode="user")
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
titem = self.turtle._item
if self._shown and screen._updatecounter == 0 and screen._tracing > 0:
self._hidden_from_screen = False
tshape = shape._data
if ttype == "polygon":
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(titem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
screen._drawimage(titem, self._position, tshape)
elif ttype == "compound":
for item, (poly, fc, oc) in zip(titem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
else:
if self._hidden_from_screen:
return
if ttype == "polygon":
screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "")
elif ttype == "image":
screen._drawimage(titem, self._position,
screen._shapes["blank"]._data)
elif ttype == "compound":
for item in titem:
screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "")
self._hidden_from_screen = True
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
... turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
## Version with undo-stuff
go_modes = ( self._drawing,
self._pencolor,
self._pensize,
isinstance(self._fillpath, list))
screen = self.screen
undo_entry = ("go", self._position, end, go_modes,
(self.currentLineItem,
self.currentLine[:],
screen._pointlist(self.currentLineItem),
self.items[:])
)
if self.undobuffer:
self.undobuffer.push(undo_entry)
start = self._position
if self._speed and screen._tracing == 1:
diff = (end-start)
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = start + delta * n
if self._drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
self._pencolor, self._pensize, top)
self._update()
if self._drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=self._pensize)
# Turtle now at end,
if self._drawing: # now update currentLine
self.currentLine.append(end)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
if self._creatingPoly:
self._poly.append(end)
if len(self.currentLine) > 42: # 42! answer to the ultimate question
# of life, the universe and everything
self._newLine()
self._update() #count=True)
def _undogoto(self, entry):
"""Reverse a _goto. Used for undo()
"""
old, new, go_modes, coodata = entry
drawing, pc, ps, filling = go_modes
cLI, cL, pl, items = coodata
screen = self.screen
if abs(self._position - new) > 0.5:
print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!")
# restore former situation
self.currentLineItem = cLI
self.currentLine = cL
if pl == [(0, 0), (0, 0)]:
usepc = ""
else:
usepc = pc
screen._drawline(cLI, pl, fill=usepc, width=ps)
todelete = [i for i in self.items if (i not in items) and
(screen._type(i) == "line")]
for i in todelete:
screen._delete(i)
self.items.remove(i)
start = old
if self._speed and screen._tracing == 1:
diff = old - new
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = new + delta * n
if drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
pc, ps, top)
self._update()
if drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=ps)
# Turtle now at position old,
self._position = old
## if undo is done during creating a polygon, the last vertex
## will be deleted. if the polygon is entirely deleted,
## creatingPoly will be set to False.
## Polygons created before the last one will not be affected by undo()
if self._creatingPoly:
if len(self._poly) > 0:
self._poly.pop()
if self._poly == []:
self._creatingPoly = False
self._poly = None
if filling:
if self._fillpath == []:
self._fillpath = None
print("Unwahrscheinlich in _undogoto!")
elif self._fillpath is not None:
self._fillpath.pop()
self._update() #count=True)
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
if tracing == 1 and self._speed > 0:
anglevel = 3.0 * self._speed
steps = 1 + int(abs(angle)/anglevel)
delta = 1.0*angle/steps
for _ in range(steps):
self._orient = self._orient.rotate(delta)
self._update()
self._orient = neworient
self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
if len(self.currentLine) > 1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
self.currentLineItem = self.screen._createline()
self.items.append(self.currentLineItem)
else:
self.screen._drawline(self.currentLineItem, top=True)
self.currentLine = []
if usePos:
self.currentLine = [self._position]
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
... turtle.pensize(5)
... else:
... turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
self.items.append(self._fillitem)
self._fillpath = [self._position]
self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
btn -- number of the mouse-button defaults to 1 (left mouse button).
add -- True or False. If True, new binding will be added, otherwise
it will replace a former binding.
Example for the anonymous turtle, i. e. the procedural way:
>>> def turn(x, y):
... left(360)
...
>>> onclick(turn) # Now clicking into the turtle will turn it.
>>> onclick(None) # event-binding will be removed
"""
self.screen._onclick(self.turtle._item, fun, btn, add)
self._update()
def onrelease(self, fun, btn=1, add=None):
"""Bind fun to mouse-button-release event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
btn -- number of the mouse-button defaults to 1 (left mouse button).
Example (for a MyTurtle instance named joe):
>>> class MyTurtle(Turtle):
... def glow(self,x,y):
... self.fillcolor("red")
... def unglow(self,x,y):
... self.fillcolor("")
...
>>> joe = MyTurtle()
>>> joe.onclick(joe.glow)
>>> joe.onrelease(joe.unglow)
Clicking on joe turns fillcolor red, unclicking turns it to
transparent.
"""
self.screen._onrelease(self.turtle._item, fun, btn, add)
self._update()
def ondrag(self, fun, btn=1, add=None):
"""Bind fun to mouse-move event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
btn -- number of the mouse-button defaults to 1 (left mouse button).
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
Example (for a Turtle instance named turtle):
>>> turtle.ondrag(turtle.goto)
Subsequently clicking and dragging a Turtle will move it
across the screen thereby producing handdrawings (if pen is
down).
"""
self.screen._ondrag(self.turtle._item, fun, btn, add)
def _undo(self, action, data):
"""Does the main part of the work for undo()
"""
if self.undobuffer is None:
return
if action == "rot":
angle, degPAU = data
self._rotate(-angle*degPAU/self._degreesPerAU)
dummy = self.undobuffer.pop()
elif action == "stamp":
stitem = data[0]
self.clearstamp(stitem)
elif action == "go":
self._undogoto(data)
elif action in ["wri", "dot"]:
item = data[0]
self.screen._delete(item)
self.items.remove(item)
elif action == "dofill":
item = data[0]
self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),
fill="", outline="")
elif action == "beginfill":
item = data[0]
self._fillitem = self._fillpath = None
if item in self.items:
self.screen._delete(item)
self.items.remove(item)
elif action == "pen":
TPen.pen(self, data[0])
self.undobuffer.pop()
def undo(self):
"""undo (repeatedly) the last turtle action.
No argument.
undo (repeatedly) the last turtle action.
Number of available undo actions is determined by the size of
the undobuffer.
Example (for a Turtle instance named turtle):
>>> for i in range(4):
... turtle.fd(50); turtle.lt(80)
...
>>> for i in range(8):
... turtle.undo()
...
"""
if self.undobuffer is None:
return
item = self.undobuffer.pop()
action = item[0]
data = item[1:]
if action == "seq":
while data:
item = data.pop()
self._undo(item[0], item[1:])
else:
self._undo(action, data)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
self._root.title(_Screen._title)
self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
def title(self, titlestring):
"""Set title of turtle-window
Argument:
titlestring -- a string, to appear in the titlebar of the
turtle graphics window.
This is a method of Screen-class. Not available for TurtleScreen-
objects.
Example (for a Screen instance named screen):
>>> screen.title("Welcome to the turtle-zoo!")
"""
if _Screen._root is not None:
_Screen._root.title(titlestring)
_Screen._title = titlestring
def _destroy(self):
root = self._root
if root is _Screen._root:
Turtle._pen = None
Turtle._screen = None
_Screen._root = None
_Screen._canvas = None
TurtleScreen._RUNNING = False
root.destroy()
def bye(self):
"""Shut the turtlegraphics window.
Example (for a TurtleScreen instance named screen):
>>> screen.bye()
"""
self._destroy()
def exitonclick(self):
"""Go into mainloop until the mouse is clicked.
No arguments.
Bind bye() method to mouseclick on TurtleScreen.
If "using_IDLE" - value in configuration dictionary is False
(default value), enter mainloop.
If IDLE with -n switch (no subprocess) is used, this value should be
set to True in turtle.cfg. In this case IDLE's mainloop
is active also for the client script.
This is a method of the Screen-class and not available for
TurtleScreen instances.
Example (for a Screen instance named screen):
>>> screen.exitonclick()
"""
def exitGracefully(x, y):
"""Screen.bye() with two dummy-parameters"""
self.bye()
self.onclick(exitGracefully)
if _CFG["using_IDLE"]:
return
try:
mainloop()
except AttributeError:
exit(0)
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def write_docstringdict(filename="turtle_docstringdict"):
"""Create and write docstring-dictionary to file.
Optional argument:
filename -- a string, used as filename
default value is turtle_docstringdict
Has to be called explicitly, (not used by the turtle-graphics classes)
The docstring dictionary will be written to the Python script <filname>.py
It is intended to serve as a template for translation of the docstrings
into different languages.
"""
docsdict = {}
for methodname in _tg_screen_functions:
key = "_Screen."+methodname
docsdict[key] = eval(key).__doc__
for methodname in _tg_turtle_functions:
key = "Turtle."+methodname
docsdict[key] = eval(key).__doc__
with open("%s.py" % filename,"w") as f:
keys = sorted([x for x in docsdict.keys()
if x.split('.')[1] not in _alias_list])
f.write('docsdict = {\n\n')
for key in keys[:-1]:
f.write('%s :\n' % repr(key))
f.write(' """%s\n""",\n\n' % docsdict[key])
key = keys[-1]
f.write('%s :\n' % repr(key))
f.write(' """%s\n"""\n\n' % docsdict[key])
f.write("}\n")
f.close()
def read_docstrings(lang):
"""Read in docstrings from lang-specific docstring dictionary.
Transfer docstrings, translated to lang, from a dictionary-file
to the methods of classes Screen and Turtle and - in revised form -
to the corresponding functions.
"""
modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()}
module = __import__(modname)
docsdict = module.docsdict
for key in docsdict:
try:
# eval(key).im_func.__doc__ = docsdict[key]
eval(key).__doc__ = docsdict[key]
except Exception:
print("Bad docstring-entry: %s" % key)
_LANGUAGE = _CFG["language"]
try:
if _LANGUAGE != "english":
read_docstrings(_LANGUAGE)
except ImportError:
print("Cannot find docsdict for", _LANGUAGE)
except Exception:
print ("Unknown Error when trying to import %s-docstring-dictionary" %
_LANGUAGE)
def getmethparlist(ob):
"""Get strings describing the arguments for the given object
Returns a pair of strings representing function parameter lists
including parenthesis. The first string is suitable for use in
function definition and the second is suitable for use in function
call. The "self" parameter is not included.
"""
defText = callText = ""
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
# Try and build one for Python defined functions
args, varargs, varkw = inspect.getargs(ob.__code__)
items2 = args[1:]
realArgs = args[1:]
defaults = ob.__defaults__ or []
defaults = ["=%r" % (value,) for value in defaults]
defaults = [""] * (len(realArgs)-len(defaults)) + defaults
items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]
if varargs is not None:
items1.append("*" + varargs)
items2.append("*" + varargs)
if varkw is not None:
items1.append("**" + varkw)
items2.append("**" + varkw)
defText = ", ".join(items1)
defText = "(%s)" % defText
callText = ", ".join(items2)
callText = "(%s)" % callText
return defText, callText
def _turtle_docrevise(docstr):
"""To reduce docstrings from RawTurtle class for functions
"""
import re
if docstr is None:
return None
turtlename = _CFG["exampleturtle"]
newdocstr = docstr.replace("%s." % turtlename,"")
parexp = re.compile(r' \(.+ %s\):' % turtlename)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
def _screen_docrevise(docstr):
"""To reduce docstrings from TurtleScreen class for functions
"""
import re
if docstr is None:
return None
screenname = _CFG["examplescreen"]
newdocstr = docstr.replace("%s." % screenname,"")
parexp = re.compile(r' \(.+ %s\):' % screenname)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
## The following mechanism makes all methods of RawTurtle and Turtle available
## as functions. So we can enhance, change, add, delete methods to these
## classes and do not need to change anything here.
__func_body = """\
def {name}{paramslist}:
if {obj} is None:
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNING = True
raise Terminator
{obj} = {init}
try:
return {obj}.{name}{argslist}
except TK.TclError:
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNING = True
raise Terminator
raise
"""
def _make_global_funcs(functions, cls, obj, init, docrevise):
for methodname in functions:
method = getattr(cls, methodname)
pl1, pl2 = getmethparlist(method)
if pl1 == "":
print(">>>>>>", pl1, pl2)
continue
defstr = __func_body.format(obj=obj, init=init, name=methodname,
paramslist=pl1, argslist=pl2)
exec(defstr, globals())
globals()[methodname].__doc__ = docrevise(method.__doc__)
_make_global_funcs(_tg_screen_functions, _Screen,
'Turtle._screen', 'Screen()', _screen_docrevise)
_make_global_funcs(_tg_turtle_functions, Turtle,
'Turtle._pen', 'Turtle()', _turtle_docrevise)
done = mainloop
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
| 143,620 | 4,140 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/imp.py | """This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import create_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
create_dynamic = None
from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name
from importlib._bootstrap_external import SourcelessFileLoader
from importlib import machinery
from importlib import util
import importlib
import os
import sys
import tokenize
import types
import warnings
warnings.warn("the imp module is deprecated in favour of importlib; "
"see the module's documentation for alternative uses",
DeprecationWarning, stacklevel=2)
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def new_module(name):
"""**DEPRECATED**
Create a new module.
The module is not entered into sys.modules.
"""
return types.ModuleType(name)
def get_magic():
"""**DEPRECATED**
Return the magic number for .pyc files.
"""
return util.MAGIC_NUMBER
def get_tag():
"""Return the magic tag for .pyc files."""
return sys.implementation.cache_tag
def cache_from_source(path, debug_override=None):
"""**DEPRECATED**
Given the path to a .py file, return the path to its .pyc file.
The .py file does not need to exist; this simply returns the path to the
.pyc file calculated as if the .py file were imported.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return util.cache_from_source(path, debug_override)
def source_from_cache(path):
"""**DEPRECATED**
Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
return util.source_from_cache(path)
def get_suffixes():
"""**DEPRECATED**"""
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""**DEPRECATED**
Null import object.
"""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibility support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
def load_source(name, pathname, file=None):
loader = _LoadSourceCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = machinery.SourceFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
def load_compiled(name, pathname, file=None):
"""**DEPRECATED**"""
loader = _LoadCompiledCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = SourcelessFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module
def load_package(name, path):
"""**DEPRECATED**"""
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
init_path = os.path.join(path, '__init__' + extension)
if os.path.exists(init_path):
path = init_path
break
else:
raise ValueError('{!r} is not a package'.format(path))
spec = util.spec_from_file_location(name, path,
submodule_search_locations=[])
if name in sys.modules:
return _exec(spec, sys.modules[name])
else:
return _load(spec)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'path' must be None or a list, "
"not {}".format(type(path)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_ERR_MSG.format(name), name=name)
encoding = None
if 'b' not in mode:
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
def reload(module):
"""**DEPRECATED**
Reload the module and return it.
The module must have been successfully imported before.
"""
return importlib.reload(module)
def init_builtin(name):
"""**DEPRECATED**
Load and return a built-in module by name, or None is such module doesn't
exist
"""
try:
return _builtin_from_name(name)
except ImportError:
return None
if create_dynamic:
def load_dynamic(name, path, file=None):
"""**DEPRECATED**
Load an extension module.
"""
import importlib.machinery
loader = importlib.machinery.ExtensionFileLoader(name, path)
# Issue #24748: Skip the sys.modules check in _load_module_shim;
# always load new extension
spec = importlib.machinery.ModuleSpec(
name=name, loader=loader, origin=path)
return _load(spec)
else:
load_dynamic = None
| 10,669 | 347 | jart/cosmopolitan | false |
cosmopolitan/third_party/python/Lib/telnetlib.py | r"""TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write(b'guido\r\n')
>>> print(tn.read_all())
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to a selector in order to wait until
more data is available. Note that in this case, read_eager() may return b''
even if there was data on the socket, because the protocol negotiation may have
eaten the data. This is why EOFError is needed in some cases to distinguish
between "no data" and "connection closed" (since the socket also appears ready
for reading when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import sys
import socket
import selectors
from time import monotonic as _time
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = bytes([255]) # "Interpret As Command"
DONT = bytes([254])
DO = bytes([253])
WONT = bytes([252])
WILL = bytes([251])
theNULL = bytes([0])
SE = bytes([240]) # Subnegotiation End
NOP = bytes([241]) # No Operation
DM = bytes([242]) # Data Mark
BRK = bytes([243]) # Break
IP = bytes([244]) # Interrupt process
AO = bytes([245]) # Abort output
AYT = bytes([246]) # Are You There
EC = bytes([247]) # Erase Character
EL = bytes([248]) # Erase Line
GA = bytes([249]) # Go Ahead
SB = bytes([250]) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = bytes([0]) # 8-bit data path
ECHO = bytes([1]) # echo
RCP = bytes([2]) # prepare to reconnect
SGA = bytes([3]) # suppress go ahead
NAMS = bytes([4]) # approximate message size
STATUS = bytes([5]) # give status
TM = bytes([6]) # timing mark
RCTE = bytes([7]) # remote controlled transmission and echo
NAOL = bytes([8]) # negotiate about output line width
NAOP = bytes([9]) # negotiate about output page size
NAOCRD = bytes([10]) # negotiate about CR disposition
NAOHTS = bytes([11]) # negotiate about horizontal tabstops
NAOHTD = bytes([12]) # negotiate about horizontal tab disposition
NAOFFD = bytes([13]) # negotiate about formfeed disposition
NAOVTS = bytes([14]) # negotiate about vertical tab stops
NAOVTD = bytes([15]) # negotiate about vertical tab disposition
NAOLFD = bytes([16]) # negotiate about output LF disposition
XASCII = bytes([17]) # extended ascii character set
LOGOUT = bytes([18]) # force logout
BM = bytes([19]) # byte macro
DET = bytes([20]) # data entry terminal
SUPDUP = bytes([21]) # supdup protocol
SUPDUPOUTPUT = bytes([22]) # supdup output
SNDLOC = bytes([23]) # send location
TTYPE = bytes([24]) # terminal type
EOR = bytes([25]) # end or record
TUID = bytes([26]) # TACACS user identification
OUTMRK = bytes([27]) # output marking
TTYLOC = bytes([28]) # terminal location number
VT3270REGIME = bytes([29]) # 3270 regime
X3PAD = bytes([30]) # X.3 PAD
NAWS = bytes([31]) # window size
TSPEED = bytes([32]) # terminal speed
LFLOW = bytes([33]) # remote flow control
LINEMODE = bytes([34]) # Linemode option
XDISPLOC = bytes([35]) # X Display Location
OLD_ENVIRON = bytes([36]) # Old - Environment variables
AUTHENTICATION = bytes([37]) # Authenticate
ENCRYPT = bytes([38]) # Encryption option
NEW_ENVIRON = bytes([39]) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = bytes([40]) # TN3270E
XAUTH = bytes([41]) # XAUTH
CHARSET = bytes([42]) # CHARSET
RSP = bytes([43]) # Telnet Remote Serial Port
COM_PORT_OPTION = bytes([44]) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = bytes([45]) # Telnet Suppress Local Echo
TLS = bytes([46]) # Telnet Start TLS
KERMIT = bytes([47]) # KERMIT
SEND_URL = bytes([48]) # SEND-URL
FORWARD_X = bytes([49]) # FORWARD_X
PRAGMA_LOGON = bytes([138]) # TELOPT PRAGMA LOGON
SSPI_LOGON = bytes([139]) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = bytes([140]) # TELOPT PRAGMA HEARTBEAT
EXOPL = bytes([255]) # Extended-Options-List
NOOPT = bytes([0])
# poll/select have the advantage of not requiring any extra file descriptor,
# contrarily to epoll/kqueue (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
_TelnetSelector = selectors.PollSelector
else:
_TelnetSelector = selectors.SelectSelector
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; port number
and timeout are optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
self.rawq = b''
self.irawq = 0
self.cookedq = b''
self.eof = 0
self.iacseq = b'' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = b''
self.option_callback = None
if host is not None:
self.open(host, port, timeout)
def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
self.sock = socket.create_connection((host, port), timeout)
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print('Telnet(%s,%s):' % (self.host, self.port), end=' ')
if args:
print(msg % args)
else:
print(msg)
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
sock = self.sock
self.sock = None
self.eof = True
self.iacseq = b''
self.sb = 0
if sock:
sock.close()
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
OSError if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %r", buffer)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
if timeout is not None:
deadline = _time() + timeout
with _TelnetSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while not self.eof:
if selector.select(timeout):
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
if timeout is not None:
timeout = deadline - _time()
if timeout < 0:
break
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = b''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return b'' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = b''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return b'' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return b'' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return b'' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return b'' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = b''
if not buf and self.eof and not self.rawq:
raise EOFError('telnet connection closed')
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return b'' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = b''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = [b'', b'']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == b"\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = b''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = b''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = b''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1:2]
self.iacseq = b''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = b'' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq:self.irawq+1]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = b''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = b''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %r", buf)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
with _TelnetSelector() as selector:
selector.register(self, selectors.EVENT_READ)
return bool(selector.select(0))
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
with _TelnetSelector() as selector:
selector.register(self, selectors.EVENT_READ)
selector.register(sys.stdin, selectors.EVENT_READ)
while True:
for key, events in selector.select():
if key.fileobj is self:
try:
text = self.read_eager()
except EOFError:
print('*** Connection closed by remote host ***')
return
if text:
sys.stdout.write(text.decode('ascii'))
sys.stdout.flush()
elif key.fileobj is sys.stdin:
line = sys.stdin.readline().encode('ascii')
if not line:
return
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import _thread
_thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line.encode('ascii'))
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print('*** Connection closed by remote host ***')
return
if data:
sys.stdout.write(data.decode('ascii'))
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
if timeout is not None:
deadline = _time() + timeout
with _TelnetSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while not self.eof:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if timeout is not None:
ready = selector.select(timeout)
timeout = deadline - _time()
if not ready:
if timeout < 0:
break
else:
continue
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
with Telnet() as tn:
tn.set_debuglevel(debuglevel)
tn.open(host, port, timeout=0.5)
tn.interact()
if __name__ == '__main__':
test()
| 23,136 | 676 | jart/cosmopolitan | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.